22c9efd86b0dd366bb4ce4374cc3fe2b9d66bb8f
[linux-block.git] / drivers / md / md.c
1 /*
2    md.c : Multiple Devices driver for Linux
3      Copyright (C) 1998, 1999, 2000 Ingo Molnar
4
5      completely rewritten, based on the MD driver code from Marc Zyngier
6
7    Changes:
8
9    - RAID-1/RAID-5 extensions by Miguel de Icaza, Gadi Oxman, Ingo Molnar
10    - RAID-6 extensions by H. Peter Anvin <hpa@zytor.com>
11    - boot support for linear and striped mode by Harald Hoyer <HarryH@Royal.Net>
12    - kerneld support by Boris Tobotras <boris@xtalk.msk.su>
13    - kmod support by: Cyrus Durgin
14    - RAID0 bugfixes: Mark Anthony Lisher <markal@iname.com>
15    - Devfs support by Richard Gooch <rgooch@atnf.csiro.au>
16
17    - lots of fixes and improvements to the RAID1/RAID5 and generic
18      RAID code (such as request based resynchronization):
19
20      Neil Brown <neilb@cse.unsw.edu.au>.
21
22    - persistent bitmap code
23      Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc.
24
25    This program is free software; you can redistribute it and/or modify
26    it under the terms of the GNU General Public License as published by
27    the Free Software Foundation; either version 2, or (at your option)
28    any later version.
29
30    You should have received a copy of the GNU General Public License
31    (for example /usr/src/linux/COPYING); if not, write to the Free
32    Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
33
34    Errors, Warnings, etc.
35    Please use:
36      pr_crit() for error conditions that risk data loss
37      pr_err() for error conditions that are unexpected, like an IO error
38          or internal inconsistency
39      pr_warn() for error conditions that could have been predicated, like
40          adding a device to an array when it has incompatible metadata
41      pr_info() for every interesting, very rare events, like an array starting
42          or stopping, or resync starting or stopping
43      pr_debug() for everything else.
44
45 */
46
47 #include <linux/kthread.h>
48 #include <linux/blkdev.h>
49 #include <linux/badblocks.h>
50 #include <linux/sysctl.h>
51 #include <linux/seq_file.h>
52 #include <linux/fs.h>
53 #include <linux/poll.h>
54 #include <linux/ctype.h>
55 #include <linux/string.h>
56 #include <linux/hdreg.h>
57 #include <linux/proc_fs.h>
58 #include <linux/random.h>
59 #include <linux/module.h>
60 #include <linux/reboot.h>
61 #include <linux/file.h>
62 #include <linux/compat.h>
63 #include <linux/delay.h>
64 #include <linux/raid/md_p.h>
65 #include <linux/raid/md_u.h>
66 #include <linux/slab.h>
67 #include "md.h"
68 #include "bitmap.h"
69 #include "md-cluster.h"
70
71 #ifndef MODULE
72 static void autostart_arrays(int part);
73 #endif
74
75 /* pers_list is a list of registered personalities protected
76  * by pers_lock.
77  * pers_lock does extra service to protect accesses to
78  * mddev->thread when the mutex cannot be held.
79  */
80 static LIST_HEAD(pers_list);
81 static DEFINE_SPINLOCK(pers_lock);
82
83 struct md_cluster_operations *md_cluster_ops;
84 EXPORT_SYMBOL(md_cluster_ops);
85 struct module *md_cluster_mod;
86 EXPORT_SYMBOL(md_cluster_mod);
87
88 static DECLARE_WAIT_QUEUE_HEAD(resync_wait);
89 static struct workqueue_struct *md_wq;
90 static struct workqueue_struct *md_misc_wq;
91
92 static int remove_and_add_spares(struct mddev *mddev,
93                                  struct md_rdev *this);
94 static void mddev_detach(struct mddev *mddev);
95
96 /*
97  * Default number of read corrections we'll attempt on an rdev
98  * before ejecting it from the array. We divide the read error
99  * count by 2 for every hour elapsed between read errors.
100  */
101 #define MD_DEFAULT_MAX_CORRECTED_READ_ERRORS 20
102 /*
103  * Current RAID-1,4,5 parallel reconstruction 'guaranteed speed limit'
104  * is 1000 KB/sec, so the extra system load does not show up that much.
105  * Increase it if you want to have more _guaranteed_ speed. Note that
106  * the RAID driver will use the maximum available bandwidth if the IO
107  * subsystem is idle. There is also an 'absolute maximum' reconstruction
108  * speed limit - in case reconstruction slows down your system despite
109  * idle IO detection.
110  *
111  * you can change it via /proc/sys/dev/raid/speed_limit_min and _max.
112  * or /sys/block/mdX/md/sync_speed_{min,max}
113  */
114
115 static int sysctl_speed_limit_min = 1000;
116 static int sysctl_speed_limit_max = 200000;
117 static inline int speed_min(struct mddev *mddev)
118 {
119         return mddev->sync_speed_min ?
120                 mddev->sync_speed_min : sysctl_speed_limit_min;
121 }
122
123 static inline int speed_max(struct mddev *mddev)
124 {
125         return mddev->sync_speed_max ?
126                 mddev->sync_speed_max : sysctl_speed_limit_max;
127 }
128
129 static struct ctl_table_header *raid_table_header;
130
131 static struct ctl_table raid_table[] = {
132         {
133                 .procname       = "speed_limit_min",
134                 .data           = &sysctl_speed_limit_min,
135                 .maxlen         = sizeof(int),
136                 .mode           = S_IRUGO|S_IWUSR,
137                 .proc_handler   = proc_dointvec,
138         },
139         {
140                 .procname       = "speed_limit_max",
141                 .data           = &sysctl_speed_limit_max,
142                 .maxlen         = sizeof(int),
143                 .mode           = S_IRUGO|S_IWUSR,
144                 .proc_handler   = proc_dointvec,
145         },
146         { }
147 };
148
149 static struct ctl_table raid_dir_table[] = {
150         {
151                 .procname       = "raid",
152                 .maxlen         = 0,
153                 .mode           = S_IRUGO|S_IXUGO,
154                 .child          = raid_table,
155         },
156         { }
157 };
158
159 static struct ctl_table raid_root_table[] = {
160         {
161                 .procname       = "dev",
162                 .maxlen         = 0,
163                 .mode           = 0555,
164                 .child          = raid_dir_table,
165         },
166         {  }
167 };
168
169 static const struct block_device_operations md_fops;
170
171 static int start_readonly;
172
173 /* bio_clone_mddev
174  * like bio_clone, but with a local bio set
175  */
176
177 struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs,
178                             struct mddev *mddev)
179 {
180         struct bio *b;
181
182         if (!mddev || !mddev->bio_set)
183                 return bio_alloc(gfp_mask, nr_iovecs);
184
185         b = bio_alloc_bioset(gfp_mask, nr_iovecs, mddev->bio_set);
186         if (!b)
187                 return NULL;
188         return b;
189 }
190 EXPORT_SYMBOL_GPL(bio_alloc_mddev);
191
192 struct bio *bio_clone_mddev(struct bio *bio, gfp_t gfp_mask,
193                             struct mddev *mddev)
194 {
195         if (!mddev || !mddev->bio_set)
196                 return bio_clone(bio, gfp_mask);
197
198         return bio_clone_bioset(bio, gfp_mask, mddev->bio_set);
199 }
200 EXPORT_SYMBOL_GPL(bio_clone_mddev);
201
202 /*
203  * We have a system wide 'event count' that is incremented
204  * on any 'interesting' event, and readers of /proc/mdstat
205  * can use 'poll' or 'select' to find out when the event
206  * count increases.
207  *
208  * Events are:
209  *  start array, stop array, error, add device, remove device,
210  *  start build, activate spare
211  */
212 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
213 static atomic_t md_event_count;
214 void md_new_event(struct mddev *mddev)
215 {
216         atomic_inc(&md_event_count);
217         wake_up(&md_event_waiters);
218 }
219 EXPORT_SYMBOL_GPL(md_new_event);
220
221 /*
222  * Enables to iterate over all existing md arrays
223  * all_mddevs_lock protects this list.
224  */
225 static LIST_HEAD(all_mddevs);
226 static DEFINE_SPINLOCK(all_mddevs_lock);
227
228 /*
229  * iterates through all used mddevs in the system.
230  * We take care to grab the all_mddevs_lock whenever navigating
231  * the list, and to always hold a refcount when unlocked.
232  * Any code which breaks out of this loop while own
233  * a reference to the current mddev and must mddev_put it.
234  */
235 #define for_each_mddev(_mddev,_tmp)                                     \
236                                                                         \
237         for (({ spin_lock(&all_mddevs_lock);                            \
238                 _tmp = all_mddevs.next;                                 \
239                 _mddev = NULL;});                                       \
240              ({ if (_tmp != &all_mddevs)                                \
241                         mddev_get(list_entry(_tmp, struct mddev, all_mddevs));\
242                 spin_unlock(&all_mddevs_lock);                          \
243                 if (_mddev) mddev_put(_mddev);                          \
244                 _mddev = list_entry(_tmp, struct mddev, all_mddevs);    \
245                 _tmp != &all_mddevs;});                                 \
246              ({ spin_lock(&all_mddevs_lock);                            \
247                 _tmp = _tmp->next;})                                    \
248                 )
249
250 /* Rather than calling directly into the personality make_request function,
251  * IO requests come here first so that we can check if the device is
252  * being suspended pending a reconfiguration.
253  * We hold a refcount over the call to ->make_request.  By the time that
254  * call has finished, the bio has been linked into some internal structure
255  * and so is visible to ->quiesce(), so we don't need the refcount any more.
256  */
257 static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio)
258 {
259         const int rw = bio_data_dir(bio);
260         struct mddev *mddev = q->queuedata;
261         unsigned int sectors;
262         int cpu;
263
264         blk_queue_split(q, &bio, q->bio_split);
265
266         if (mddev == NULL || mddev->pers == NULL) {
267                 bio_io_error(bio);
268                 return BLK_QC_T_NONE;
269         }
270         if (mddev->ro == 1 && unlikely(rw == WRITE)) {
271                 if (bio_sectors(bio) != 0)
272                         bio->bi_error = -EROFS;
273                 bio_endio(bio);
274                 return BLK_QC_T_NONE;
275         }
276         smp_rmb(); /* Ensure implications of  'active' are visible */
277         rcu_read_lock();
278         if (mddev->suspended) {
279                 DEFINE_WAIT(__wait);
280                 for (;;) {
281                         prepare_to_wait(&mddev->sb_wait, &__wait,
282                                         TASK_UNINTERRUPTIBLE);
283                         if (!mddev->suspended)
284                                 break;
285                         rcu_read_unlock();
286                         schedule();
287                         rcu_read_lock();
288                 }
289                 finish_wait(&mddev->sb_wait, &__wait);
290         }
291         atomic_inc(&mddev->active_io);
292         rcu_read_unlock();
293
294         /*
295          * save the sectors now since our bio can
296          * go away inside make_request
297          */
298         sectors = bio_sectors(bio);
299         /* bio could be mergeable after passing to underlayer */
300         bio->bi_opf &= ~REQ_NOMERGE;
301         mddev->pers->make_request(mddev, bio);
302
303         cpu = part_stat_lock();
304         part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
305         part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw], sectors);
306         part_stat_unlock();
307
308         if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended)
309                 wake_up(&mddev->sb_wait);
310
311         return BLK_QC_T_NONE;
312 }
313
314 /* mddev_suspend makes sure no new requests are submitted
315  * to the device, and that any requests that have been submitted
316  * are completely handled.
317  * Once mddev_detach() is called and completes, the module will be
318  * completely unused.
319  */
320 void mddev_suspend(struct mddev *mddev)
321 {
322         WARN_ON_ONCE(mddev->thread && current == mddev->thread->tsk);
323         if (mddev->suspended++)
324                 return;
325         synchronize_rcu();
326         wait_event(mddev->sb_wait, atomic_read(&mddev->active_io) == 0);
327         mddev->pers->quiesce(mddev, 1);
328
329         del_timer_sync(&mddev->safemode_timer);
330 }
331 EXPORT_SYMBOL_GPL(mddev_suspend);
332
333 void mddev_resume(struct mddev *mddev)
334 {
335         if (--mddev->suspended)
336                 return;
337         wake_up(&mddev->sb_wait);
338         mddev->pers->quiesce(mddev, 0);
339
340         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
341         md_wakeup_thread(mddev->thread);
342         md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
343 }
344 EXPORT_SYMBOL_GPL(mddev_resume);
345
346 int mddev_congested(struct mddev *mddev, int bits)
347 {
348         struct md_personality *pers = mddev->pers;
349         int ret = 0;
350
351         rcu_read_lock();
352         if (mddev->suspended)
353                 ret = 1;
354         else if (pers && pers->congested)
355                 ret = pers->congested(mddev, bits);
356         rcu_read_unlock();
357         return ret;
358 }
359 EXPORT_SYMBOL_GPL(mddev_congested);
360 static int md_congested(void *data, int bits)
361 {
362         struct mddev *mddev = data;
363         return mddev_congested(mddev, bits);
364 }
365
366 /*
367  * Generic flush handling for md
368  */
369
370 static void md_end_flush(struct bio *bio)
371 {
372         struct md_rdev *rdev = bio->bi_private;
373         struct mddev *mddev = rdev->mddev;
374
375         rdev_dec_pending(rdev, mddev);
376
377         if (atomic_dec_and_test(&mddev->flush_pending)) {
378                 /* The pre-request flush has finished */
379                 queue_work(md_wq, &mddev->flush_work);
380         }
381         bio_put(bio);
382 }
383
384 static void md_submit_flush_data(struct work_struct *ws);
385
386 static void submit_flushes(struct work_struct *ws)
387 {
388         struct mddev *mddev = container_of(ws, struct mddev, flush_work);
389         struct md_rdev *rdev;
390
391         INIT_WORK(&mddev->flush_work, md_submit_flush_data);
392         atomic_set(&mddev->flush_pending, 1);
393         rcu_read_lock();
394         rdev_for_each_rcu(rdev, mddev)
395                 if (rdev->raid_disk >= 0 &&
396                     !test_bit(Faulty, &rdev->flags)) {
397                         /* Take two references, one is dropped
398                          * when request finishes, one after
399                          * we reclaim rcu_read_lock
400                          */
401                         struct bio *bi;
402                         atomic_inc(&rdev->nr_pending);
403                         atomic_inc(&rdev->nr_pending);
404                         rcu_read_unlock();
405                         bi = bio_alloc_mddev(GFP_NOIO, 0, mddev);
406                         bi->bi_end_io = md_end_flush;
407                         bi->bi_private = rdev;
408                         bi->bi_bdev = rdev->bdev;
409                         bio_set_op_attrs(bi, REQ_OP_WRITE, WRITE_FLUSH);
410                         atomic_inc(&mddev->flush_pending);
411                         submit_bio(bi);
412                         rcu_read_lock();
413                         rdev_dec_pending(rdev, mddev);
414                 }
415         rcu_read_unlock();
416         if (atomic_dec_and_test(&mddev->flush_pending))
417                 queue_work(md_wq, &mddev->flush_work);
418 }
419
420 static void md_submit_flush_data(struct work_struct *ws)
421 {
422         struct mddev *mddev = container_of(ws, struct mddev, flush_work);
423         struct bio *bio = mddev->flush_bio;
424
425         if (bio->bi_iter.bi_size == 0)
426                 /* an empty barrier - all done */
427                 bio_endio(bio);
428         else {
429                 bio->bi_opf &= ~REQ_PREFLUSH;
430                 mddev->pers->make_request(mddev, bio);
431         }
432
433         mddev->flush_bio = NULL;
434         wake_up(&mddev->sb_wait);
435 }
436
437 void md_flush_request(struct mddev *mddev, struct bio *bio)
438 {
439         spin_lock_irq(&mddev->lock);
440         wait_event_lock_irq(mddev->sb_wait,
441                             !mddev->flush_bio,
442                             mddev->lock);
443         mddev->flush_bio = bio;
444         spin_unlock_irq(&mddev->lock);
445
446         INIT_WORK(&mddev->flush_work, submit_flushes);
447         queue_work(md_wq, &mddev->flush_work);
448 }
449 EXPORT_SYMBOL(md_flush_request);
450
451 void md_unplug(struct blk_plug_cb *cb, bool from_schedule)
452 {
453         struct mddev *mddev = cb->data;
454         md_wakeup_thread(mddev->thread);
455         kfree(cb);
456 }
457 EXPORT_SYMBOL(md_unplug);
458
459 static inline struct mddev *mddev_get(struct mddev *mddev)
460 {
461         atomic_inc(&mddev->active);
462         return mddev;
463 }
464
465 static void mddev_delayed_delete(struct work_struct *ws);
466
467 static void mddev_put(struct mddev *mddev)
468 {
469         struct bio_set *bs = NULL;
470
471         if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock))
472                 return;
473         if (!mddev->raid_disks && list_empty(&mddev->disks) &&
474             mddev->ctime == 0 && !mddev->hold_active) {
475                 /* Array is not configured at all, and not held active,
476                  * so destroy it */
477                 list_del_init(&mddev->all_mddevs);
478                 bs = mddev->bio_set;
479                 mddev->bio_set = NULL;
480                 if (mddev->gendisk) {
481                         /* We did a probe so need to clean up.  Call
482                          * queue_work inside the spinlock so that
483                          * flush_workqueue() after mddev_find will
484                          * succeed in waiting for the work to be done.
485                          */
486                         INIT_WORK(&mddev->del_work, mddev_delayed_delete);
487                         queue_work(md_misc_wq, &mddev->del_work);
488                 } else
489                         kfree(mddev);
490         }
491         spin_unlock(&all_mddevs_lock);
492         if (bs)
493                 bioset_free(bs);
494 }
495
496 static void md_safemode_timeout(unsigned long data);
497
498 void mddev_init(struct mddev *mddev)
499 {
500         mutex_init(&mddev->open_mutex);
501         mutex_init(&mddev->reconfig_mutex);
502         mutex_init(&mddev->bitmap_info.mutex);
503         INIT_LIST_HEAD(&mddev->disks);
504         INIT_LIST_HEAD(&mddev->all_mddevs);
505         setup_timer(&mddev->safemode_timer, md_safemode_timeout,
506                     (unsigned long) mddev);
507         atomic_set(&mddev->active, 1);
508         atomic_set(&mddev->openers, 0);
509         atomic_set(&mddev->active_io, 0);
510         spin_lock_init(&mddev->lock);
511         atomic_set(&mddev->flush_pending, 0);
512         init_waitqueue_head(&mddev->sb_wait);
513         init_waitqueue_head(&mddev->recovery_wait);
514         mddev->reshape_position = MaxSector;
515         mddev->reshape_backwards = 0;
516         mddev->last_sync_action = "none";
517         mddev->resync_min = 0;
518         mddev->resync_max = MaxSector;
519         mddev->level = LEVEL_NONE;
520 }
521 EXPORT_SYMBOL_GPL(mddev_init);
522
523 static struct mddev *mddev_find(dev_t unit)
524 {
525         struct mddev *mddev, *new = NULL;
526
527         if (unit && MAJOR(unit) != MD_MAJOR)
528                 unit &= ~((1<<MdpMinorShift)-1);
529
530  retry:
531         spin_lock(&all_mddevs_lock);
532
533         if (unit) {
534                 list_for_each_entry(mddev, &all_mddevs, all_mddevs)
535                         if (mddev->unit == unit) {
536                                 mddev_get(mddev);
537                                 spin_unlock(&all_mddevs_lock);
538                                 kfree(new);
539                                 return mddev;
540                         }
541
542                 if (new) {
543                         list_add(&new->all_mddevs, &all_mddevs);
544                         spin_unlock(&all_mddevs_lock);
545                         new->hold_active = UNTIL_IOCTL;
546                         return new;
547                 }
548         } else if (new) {
549                 /* find an unused unit number */
550                 static int next_minor = 512;
551                 int start = next_minor;
552                 int is_free = 0;
553                 int dev = 0;
554                 while (!is_free) {
555                         dev = MKDEV(MD_MAJOR, next_minor);
556                         next_minor++;
557                         if (next_minor > MINORMASK)
558                                 next_minor = 0;
559                         if (next_minor == start) {
560                                 /* Oh dear, all in use. */
561                                 spin_unlock(&all_mddevs_lock);
562                                 kfree(new);
563                                 return NULL;
564                         }
565
566                         is_free = 1;
567                         list_for_each_entry(mddev, &all_mddevs, all_mddevs)
568                                 if (mddev->unit == dev) {
569                                         is_free = 0;
570                                         break;
571                                 }
572                 }
573                 new->unit = dev;
574                 new->md_minor = MINOR(dev);
575                 new->hold_active = UNTIL_STOP;
576                 list_add(&new->all_mddevs, &all_mddevs);
577                 spin_unlock(&all_mddevs_lock);
578                 return new;
579         }
580         spin_unlock(&all_mddevs_lock);
581
582         new = kzalloc(sizeof(*new), GFP_KERNEL);
583         if (!new)
584                 return NULL;
585
586         new->unit = unit;
587         if (MAJOR(unit) == MD_MAJOR)
588                 new->md_minor = MINOR(unit);
589         else
590                 new->md_minor = MINOR(unit) >> MdpMinorShift;
591
592         mddev_init(new);
593
594         goto retry;
595 }
596
597 static struct attribute_group md_redundancy_group;
598
599 void mddev_unlock(struct mddev *mddev)
600 {
601         if (mddev->to_remove) {
602                 /* These cannot be removed under reconfig_mutex as
603                  * an access to the files will try to take reconfig_mutex
604                  * while holding the file unremovable, which leads to
605                  * a deadlock.
606                  * So hold set sysfs_active while the remove in happeing,
607                  * and anything else which might set ->to_remove or my
608                  * otherwise change the sysfs namespace will fail with
609                  * -EBUSY if sysfs_active is still set.
610                  * We set sysfs_active under reconfig_mutex and elsewhere
611                  * test it under the same mutex to ensure its correct value
612                  * is seen.
613                  */
614                 struct attribute_group *to_remove = mddev->to_remove;
615                 mddev->to_remove = NULL;
616                 mddev->sysfs_active = 1;
617                 mutex_unlock(&mddev->reconfig_mutex);
618
619                 if (mddev->kobj.sd) {
620                         if (to_remove != &md_redundancy_group)
621                                 sysfs_remove_group(&mddev->kobj, to_remove);
622                         if (mddev->pers == NULL ||
623                             mddev->pers->sync_request == NULL) {
624                                 sysfs_remove_group(&mddev->kobj, &md_redundancy_group);
625                                 if (mddev->sysfs_action)
626                                         sysfs_put(mddev->sysfs_action);
627                                 mddev->sysfs_action = NULL;
628                         }
629                 }
630                 mddev->sysfs_active = 0;
631         } else
632                 mutex_unlock(&mddev->reconfig_mutex);
633
634         /* As we've dropped the mutex we need a spinlock to
635          * make sure the thread doesn't disappear
636          */
637         spin_lock(&pers_lock);
638         md_wakeup_thread(mddev->thread);
639         spin_unlock(&pers_lock);
640 }
641 EXPORT_SYMBOL_GPL(mddev_unlock);
642
643 struct md_rdev *md_find_rdev_nr_rcu(struct mddev *mddev, int nr)
644 {
645         struct md_rdev *rdev;
646
647         rdev_for_each_rcu(rdev, mddev)
648                 if (rdev->desc_nr == nr)
649                         return rdev;
650
651         return NULL;
652 }
653 EXPORT_SYMBOL_GPL(md_find_rdev_nr_rcu);
654
655 static struct md_rdev *find_rdev(struct mddev *mddev, dev_t dev)
656 {
657         struct md_rdev *rdev;
658
659         rdev_for_each(rdev, mddev)
660                 if (rdev->bdev->bd_dev == dev)
661                         return rdev;
662
663         return NULL;
664 }
665
666 static struct md_rdev *find_rdev_rcu(struct mddev *mddev, dev_t dev)
667 {
668         struct md_rdev *rdev;
669
670         rdev_for_each_rcu(rdev, mddev)
671                 if (rdev->bdev->bd_dev == dev)
672                         return rdev;
673
674         return NULL;
675 }
676
677 static struct md_personality *find_pers(int level, char *clevel)
678 {
679         struct md_personality *pers;
680         list_for_each_entry(pers, &pers_list, list) {
681                 if (level != LEVEL_NONE && pers->level == level)
682                         return pers;
683                 if (strcmp(pers->name, clevel)==0)
684                         return pers;
685         }
686         return NULL;
687 }
688
689 /* return the offset of the super block in 512byte sectors */
690 static inline sector_t calc_dev_sboffset(struct md_rdev *rdev)
691 {
692         sector_t num_sectors = i_size_read(rdev->bdev->bd_inode) / 512;
693         return MD_NEW_SIZE_SECTORS(num_sectors);
694 }
695
696 static int alloc_disk_sb(struct md_rdev *rdev)
697 {
698         rdev->sb_page = alloc_page(GFP_KERNEL);
699         if (!rdev->sb_page)
700                 return -ENOMEM;
701         return 0;
702 }
703
704 void md_rdev_clear(struct md_rdev *rdev)
705 {
706         if (rdev->sb_page) {
707                 put_page(rdev->sb_page);
708                 rdev->sb_loaded = 0;
709                 rdev->sb_page = NULL;
710                 rdev->sb_start = 0;
711                 rdev->sectors = 0;
712         }
713         if (rdev->bb_page) {
714                 put_page(rdev->bb_page);
715                 rdev->bb_page = NULL;
716         }
717         badblocks_exit(&rdev->badblocks);
718 }
719 EXPORT_SYMBOL_GPL(md_rdev_clear);
720
721 static void super_written(struct bio *bio)
722 {
723         struct md_rdev *rdev = bio->bi_private;
724         struct mddev *mddev = rdev->mddev;
725
726         if (bio->bi_error) {
727                 pr_err("md: super_written gets error=%d\n", bio->bi_error);
728                 md_error(mddev, rdev);
729         }
730
731         if (atomic_dec_and_test(&mddev->pending_writes))
732                 wake_up(&mddev->sb_wait);
733         rdev_dec_pending(rdev, mddev);
734         bio_put(bio);
735 }
736
737 void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
738                    sector_t sector, int size, struct page *page)
739 {
740         /* write first size bytes of page to sector of rdev
741          * Increment mddev->pending_writes before returning
742          * and decrement it on completion, waking up sb_wait
743          * if zero is reached.
744          * If an error occurred, call md_error
745          */
746         struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, mddev);
747
748         atomic_inc(&rdev->nr_pending);
749
750         bio->bi_bdev = rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev;
751         bio->bi_iter.bi_sector = sector;
752         bio_add_page(bio, page, size, 0);
753         bio->bi_private = rdev;
754         bio->bi_end_io = super_written;
755         bio_set_op_attrs(bio, REQ_OP_WRITE, WRITE_FLUSH_FUA);
756
757         atomic_inc(&mddev->pending_writes);
758         submit_bio(bio);
759 }
760
761 void md_super_wait(struct mddev *mddev)
762 {
763         /* wait for all superblock writes that were scheduled to complete */
764         wait_event(mddev->sb_wait, atomic_read(&mddev->pending_writes)==0);
765 }
766
767 int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
768                  struct page *page, int op, int op_flags, bool metadata_op)
769 {
770         struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, rdev->mddev);
771         int ret;
772
773         bio->bi_bdev = (metadata_op && rdev->meta_bdev) ?
774                 rdev->meta_bdev : rdev->bdev;
775         bio_set_op_attrs(bio, op, op_flags);
776         if (metadata_op)
777                 bio->bi_iter.bi_sector = sector + rdev->sb_start;
778         else if (rdev->mddev->reshape_position != MaxSector &&
779                  (rdev->mddev->reshape_backwards ==
780                   (sector >= rdev->mddev->reshape_position)))
781                 bio->bi_iter.bi_sector = sector + rdev->new_data_offset;
782         else
783                 bio->bi_iter.bi_sector = sector + rdev->data_offset;
784         bio_add_page(bio, page, size, 0);
785
786         submit_bio_wait(bio);
787
788         ret = !bio->bi_error;
789         bio_put(bio);
790         return ret;
791 }
792 EXPORT_SYMBOL_GPL(sync_page_io);
793
794 static int read_disk_sb(struct md_rdev *rdev, int size)
795 {
796         char b[BDEVNAME_SIZE];
797
798         if (rdev->sb_loaded)
799                 return 0;
800
801         if (!sync_page_io(rdev, 0, size, rdev->sb_page, REQ_OP_READ, 0, true))
802                 goto fail;
803         rdev->sb_loaded = 1;
804         return 0;
805
806 fail:
807         pr_err("md: disabled device %s, could not read superblock.\n",
808                bdevname(rdev->bdev,b));
809         return -EINVAL;
810 }
811
812 static int uuid_equal(mdp_super_t *sb1, mdp_super_t *sb2)
813 {
814         return  sb1->set_uuid0 == sb2->set_uuid0 &&
815                 sb1->set_uuid1 == sb2->set_uuid1 &&
816                 sb1->set_uuid2 == sb2->set_uuid2 &&
817                 sb1->set_uuid3 == sb2->set_uuid3;
818 }
819
820 static int sb_equal(mdp_super_t *sb1, mdp_super_t *sb2)
821 {
822         int ret;
823         mdp_super_t *tmp1, *tmp2;
824
825         tmp1 = kmalloc(sizeof(*tmp1),GFP_KERNEL);
826         tmp2 = kmalloc(sizeof(*tmp2),GFP_KERNEL);
827
828         if (!tmp1 || !tmp2) {
829                 ret = 0;
830                 goto abort;
831         }
832
833         *tmp1 = *sb1;
834         *tmp2 = *sb2;
835
836         /*
837          * nr_disks is not constant
838          */
839         tmp1->nr_disks = 0;
840         tmp2->nr_disks = 0;
841
842         ret = (memcmp(tmp1, tmp2, MD_SB_GENERIC_CONSTANT_WORDS * 4) == 0);
843 abort:
844         kfree(tmp1);
845         kfree(tmp2);
846         return ret;
847 }
848
849 static u32 md_csum_fold(u32 csum)
850 {
851         csum = (csum & 0xffff) + (csum >> 16);
852         return (csum & 0xffff) + (csum >> 16);
853 }
854
855 static unsigned int calc_sb_csum(mdp_super_t *sb)
856 {
857         u64 newcsum = 0;
858         u32 *sb32 = (u32*)sb;
859         int i;
860         unsigned int disk_csum, csum;
861
862         disk_csum = sb->sb_csum;
863         sb->sb_csum = 0;
864
865         for (i = 0; i < MD_SB_BYTES/4 ; i++)
866                 newcsum += sb32[i];
867         csum = (newcsum & 0xffffffff) + (newcsum>>32);
868
869 #ifdef CONFIG_ALPHA
870         /* This used to use csum_partial, which was wrong for several
871          * reasons including that different results are returned on
872          * different architectures.  It isn't critical that we get exactly
873          * the same return value as before (we always csum_fold before
874          * testing, and that removes any differences).  However as we
875          * know that csum_partial always returned a 16bit value on
876          * alphas, do a fold to maximise conformity to previous behaviour.
877          */
878         sb->sb_csum = md_csum_fold(disk_csum);
879 #else
880         sb->sb_csum = disk_csum;
881 #endif
882         return csum;
883 }
884
885 /*
886  * Handle superblock details.
887  * We want to be able to handle multiple superblock formats
888  * so we have a common interface to them all, and an array of
889  * different handlers.
890  * We rely on user-space to write the initial superblock, and support
891  * reading and updating of superblocks.
892  * Interface methods are:
893  *   int load_super(struct md_rdev *dev, struct md_rdev *refdev, int minor_version)
894  *      loads and validates a superblock on dev.
895  *      if refdev != NULL, compare superblocks on both devices
896  *    Return:
897  *      0 - dev has a superblock that is compatible with refdev
898  *      1 - dev has a superblock that is compatible and newer than refdev
899  *          so dev should be used as the refdev in future
900  *     -EINVAL superblock incompatible or invalid
901  *     -othererror e.g. -EIO
902  *
903  *   int validate_super(struct mddev *mddev, struct md_rdev *dev)
904  *      Verify that dev is acceptable into mddev.
905  *       The first time, mddev->raid_disks will be 0, and data from
906  *       dev should be merged in.  Subsequent calls check that dev
907  *       is new enough.  Return 0 or -EINVAL
908  *
909  *   void sync_super(struct mddev *mddev, struct md_rdev *dev)
910  *     Update the superblock for rdev with data in mddev
911  *     This does not write to disc.
912  *
913  */
914
915 struct super_type  {
916         char                *name;
917         struct module       *owner;
918         int                 (*load_super)(struct md_rdev *rdev,
919                                           struct md_rdev *refdev,
920                                           int minor_version);
921         int                 (*validate_super)(struct mddev *mddev,
922                                               struct md_rdev *rdev);
923         void                (*sync_super)(struct mddev *mddev,
924                                           struct md_rdev *rdev);
925         unsigned long long  (*rdev_size_change)(struct md_rdev *rdev,
926                                                 sector_t num_sectors);
927         int                 (*allow_new_offset)(struct md_rdev *rdev,
928                                                 unsigned long long new_offset);
929 };
930
931 /*
932  * Check that the given mddev has no bitmap.
933  *
934  * This function is called from the run method of all personalities that do not
935  * support bitmaps. It prints an error message and returns non-zero if mddev
936  * has a bitmap. Otherwise, it returns 0.
937  *
938  */
939 int md_check_no_bitmap(struct mddev *mddev)
940 {
941         if (!mddev->bitmap_info.file && !mddev->bitmap_info.offset)
942                 return 0;
943         pr_warn("%s: bitmaps are not supported for %s\n",
944                 mdname(mddev), mddev->pers->name);
945         return 1;
946 }
947 EXPORT_SYMBOL(md_check_no_bitmap);
948
949 /*
950  * load_super for 0.90.0
951  */
952 static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_version)
953 {
954         char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
955         mdp_super_t *sb;
956         int ret;
957
958         /*
959          * Calculate the position of the superblock (512byte sectors),
960          * it's at the end of the disk.
961          *
962          * It also happens to be a multiple of 4Kb.
963          */
964         rdev->sb_start = calc_dev_sboffset(rdev);
965
966         ret = read_disk_sb(rdev, MD_SB_BYTES);
967         if (ret)
968                 return ret;
969
970         ret = -EINVAL;
971
972         bdevname(rdev->bdev, b);
973         sb = page_address(rdev->sb_page);
974
975         if (sb->md_magic != MD_SB_MAGIC) {
976                 pr_warn("md: invalid raid superblock magic on %s\n", b);
977                 goto abort;
978         }
979
980         if (sb->major_version != 0 ||
981             sb->minor_version < 90 ||
982             sb->minor_version > 91) {
983                 pr_warn("Bad version number %d.%d on %s\n",
984                         sb->major_version, sb->minor_version, b);
985                 goto abort;
986         }
987
988         if (sb->raid_disks <= 0)
989                 goto abort;
990
991         if (md_csum_fold(calc_sb_csum(sb)) != md_csum_fold(sb->sb_csum)) {
992                 pr_warn("md: invalid superblock checksum on %s\n", b);
993                 goto abort;
994         }
995
996         rdev->preferred_minor = sb->md_minor;
997         rdev->data_offset = 0;
998         rdev->new_data_offset = 0;
999         rdev->sb_size = MD_SB_BYTES;
1000         rdev->badblocks.shift = -1;
1001
1002         if (sb->level == LEVEL_MULTIPATH)
1003                 rdev->desc_nr = -1;
1004         else
1005                 rdev->desc_nr = sb->this_disk.number;
1006
1007         if (!refdev) {
1008                 ret = 1;
1009         } else {
1010                 __u64 ev1, ev2;
1011                 mdp_super_t *refsb = page_address(refdev->sb_page);
1012                 if (!uuid_equal(refsb, sb)) {
1013                         pr_warn("md: %s has different UUID to %s\n",
1014                                 b, bdevname(refdev->bdev,b2));
1015                         goto abort;
1016                 }
1017                 if (!sb_equal(refsb, sb)) {
1018                         pr_warn("md: %s has same UUID but different superblock to %s\n",
1019                                 b, bdevname(refdev->bdev, b2));
1020                         goto abort;
1021                 }
1022                 ev1 = md_event(sb);
1023                 ev2 = md_event(refsb);
1024                 if (ev1 > ev2)
1025                         ret = 1;
1026                 else
1027                         ret = 0;
1028         }
1029         rdev->sectors = rdev->sb_start;
1030         /* Limit to 4TB as metadata cannot record more than that.
1031          * (not needed for Linear and RAID0 as metadata doesn't
1032          * record this size)
1033          */
1034         if (IS_ENABLED(CONFIG_LBDAF) && (u64)rdev->sectors >= (2ULL << 32) &&
1035             sb->level >= 1)
1036                 rdev->sectors = (sector_t)(2ULL << 32) - 2;
1037
1038         if (rdev->sectors < ((sector_t)sb->size) * 2 && sb->level >= 1)
1039                 /* "this cannot possibly happen" ... */
1040                 ret = -EINVAL;
1041
1042  abort:
1043         return ret;
1044 }
1045
1046 /*
1047  * validate_super for 0.90.0
1048  */
1049 static int super_90_validate(struct mddev *mddev, struct md_rdev *rdev)
1050 {
1051         mdp_disk_t *desc;
1052         mdp_super_t *sb = page_address(rdev->sb_page);
1053         __u64 ev1 = md_event(sb);
1054
1055         rdev->raid_disk = -1;
1056         clear_bit(Faulty, &rdev->flags);
1057         clear_bit(In_sync, &rdev->flags);
1058         clear_bit(Bitmap_sync, &rdev->flags);
1059         clear_bit(WriteMostly, &rdev->flags);
1060
1061         if (mddev->raid_disks == 0) {
1062                 mddev->major_version = 0;
1063                 mddev->minor_version = sb->minor_version;
1064                 mddev->patch_version = sb->patch_version;
1065                 mddev->external = 0;
1066                 mddev->chunk_sectors = sb->chunk_size >> 9;
1067                 mddev->ctime = sb->ctime;
1068                 mddev->utime = sb->utime;
1069                 mddev->level = sb->level;
1070                 mddev->clevel[0] = 0;
1071                 mddev->layout = sb->layout;
1072                 mddev->raid_disks = sb->raid_disks;
1073                 mddev->dev_sectors = ((sector_t)sb->size) * 2;
1074                 mddev->events = ev1;
1075                 mddev->bitmap_info.offset = 0;
1076                 mddev->bitmap_info.space = 0;
1077                 /* bitmap can use 60 K after the 4K superblocks */
1078                 mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9;
1079                 mddev->bitmap_info.default_space = 64*2 - (MD_SB_BYTES >> 9);
1080                 mddev->reshape_backwards = 0;
1081
1082                 if (mddev->minor_version >= 91) {
1083                         mddev->reshape_position = sb->reshape_position;
1084                         mddev->delta_disks = sb->delta_disks;
1085                         mddev->new_level = sb->new_level;
1086                         mddev->new_layout = sb->new_layout;
1087                         mddev->new_chunk_sectors = sb->new_chunk >> 9;
1088                         if (mddev->delta_disks < 0)
1089                                 mddev->reshape_backwards = 1;
1090                 } else {
1091                         mddev->reshape_position = MaxSector;
1092                         mddev->delta_disks = 0;
1093                         mddev->new_level = mddev->level;
1094                         mddev->new_layout = mddev->layout;
1095                         mddev->new_chunk_sectors = mddev->chunk_sectors;
1096                 }
1097
1098                 if (sb->state & (1<<MD_SB_CLEAN))
1099                         mddev->recovery_cp = MaxSector;
1100                 else {
1101                         if (sb->events_hi == sb->cp_events_hi &&
1102                                 sb->events_lo == sb->cp_events_lo) {
1103                                 mddev->recovery_cp = sb->recovery_cp;
1104                         } else
1105                                 mddev->recovery_cp = 0;
1106                 }
1107
1108                 memcpy(mddev->uuid+0, &sb->set_uuid0, 4);
1109                 memcpy(mddev->uuid+4, &sb->set_uuid1, 4);
1110                 memcpy(mddev->uuid+8, &sb->set_uuid2, 4);
1111                 memcpy(mddev->uuid+12,&sb->set_uuid3, 4);
1112
1113                 mddev->max_disks = MD_SB_DISKS;
1114
1115                 if (sb->state & (1<<MD_SB_BITMAP_PRESENT) &&
1116                     mddev->bitmap_info.file == NULL) {
1117                         mddev->bitmap_info.offset =
1118                                 mddev->bitmap_info.default_offset;
1119                         mddev->bitmap_info.space =
1120                                 mddev->bitmap_info.default_space;
1121                 }
1122
1123         } else if (mddev->pers == NULL) {
1124                 /* Insist on good event counter while assembling, except
1125                  * for spares (which don't need an event count) */
1126                 ++ev1;
1127                 if (sb->disks[rdev->desc_nr].state & (
1128                             (1<<MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE)))
1129                         if (ev1 < mddev->events)
1130                                 return -EINVAL;
1131         } else if (mddev->bitmap) {
1132                 /* if adding to array with a bitmap, then we can accept an
1133                  * older device ... but not too old.
1134                  */
1135                 if (ev1 < mddev->bitmap->events_cleared)
1136                         return 0;
1137                 if (ev1 < mddev->events)
1138                         set_bit(Bitmap_sync, &rdev->flags);
1139         } else {
1140                 if (ev1 < mddev->events)
1141                         /* just a hot-add of a new device, leave raid_disk at -1 */
1142                         return 0;
1143         }
1144
1145         if (mddev->level != LEVEL_MULTIPATH) {
1146                 desc = sb->disks + rdev->desc_nr;
1147
1148                 if (desc->state & (1<<MD_DISK_FAULTY))
1149                         set_bit(Faulty, &rdev->flags);
1150                 else if (desc->state & (1<<MD_DISK_SYNC) /* &&
1151                             desc->raid_disk < mddev->raid_disks */) {
1152                         set_bit(In_sync, &rdev->flags);
1153                         rdev->raid_disk = desc->raid_disk;
1154                         rdev->saved_raid_disk = desc->raid_disk;
1155                 } else if (desc->state & (1<<MD_DISK_ACTIVE)) {
1156                         /* active but not in sync implies recovery up to
1157                          * reshape position.  We don't know exactly where
1158                          * that is, so set to zero for now */
1159                         if (mddev->minor_version >= 91) {
1160                                 rdev->recovery_offset = 0;
1161                                 rdev->raid_disk = desc->raid_disk;
1162                         }
1163                 }
1164                 if (desc->state & (1<<MD_DISK_WRITEMOSTLY))
1165                         set_bit(WriteMostly, &rdev->flags);
1166         } else /* MULTIPATH are always insync */
1167                 set_bit(In_sync, &rdev->flags);
1168         return 0;
1169 }
1170
1171 /*
1172  * sync_super for 0.90.0
1173  */
1174 static void super_90_sync(struct mddev *mddev, struct md_rdev *rdev)
1175 {
1176         mdp_super_t *sb;
1177         struct md_rdev *rdev2;
1178         int next_spare = mddev->raid_disks;
1179
1180         /* make rdev->sb match mddev data..
1181          *
1182          * 1/ zero out disks
1183          * 2/ Add info for each disk, keeping track of highest desc_nr (next_spare);
1184          * 3/ any empty disks < next_spare become removed
1185          *
1186          * disks[0] gets initialised to REMOVED because
1187          * we cannot be sure from other fields if it has
1188          * been initialised or not.
1189          */
1190         int i;
1191         int active=0, working=0,failed=0,spare=0,nr_disks=0;
1192
1193         rdev->sb_size = MD_SB_BYTES;
1194
1195         sb = page_address(rdev->sb_page);
1196
1197         memset(sb, 0, sizeof(*sb));
1198
1199         sb->md_magic = MD_SB_MAGIC;
1200         sb->major_version = mddev->major_version;
1201         sb->patch_version = mddev->patch_version;
1202         sb->gvalid_words  = 0; /* ignored */
1203         memcpy(&sb->set_uuid0, mddev->uuid+0, 4);
1204         memcpy(&sb->set_uuid1, mddev->uuid+4, 4);
1205         memcpy(&sb->set_uuid2, mddev->uuid+8, 4);
1206         memcpy(&sb->set_uuid3, mddev->uuid+12,4);
1207
1208         sb->ctime = clamp_t(time64_t, mddev->ctime, 0, U32_MAX);
1209         sb->level = mddev->level;
1210         sb->size = mddev->dev_sectors / 2;
1211         sb->raid_disks = mddev->raid_disks;
1212         sb->md_minor = mddev->md_minor;
1213         sb->not_persistent = 0;
1214         sb->utime = clamp_t(time64_t, mddev->utime, 0, U32_MAX);
1215         sb->state = 0;
1216         sb->events_hi = (mddev->events>>32);
1217         sb->events_lo = (u32)mddev->events;
1218
1219         if (mddev->reshape_position == MaxSector)
1220                 sb->minor_version = 90;
1221         else {
1222                 sb->minor_version = 91;
1223                 sb->reshape_position = mddev->reshape_position;
1224                 sb->new_level = mddev->new_level;
1225                 sb->delta_disks = mddev->delta_disks;
1226                 sb->new_layout = mddev->new_layout;
1227                 sb->new_chunk = mddev->new_chunk_sectors << 9;
1228         }
1229         mddev->minor_version = sb->minor_version;
1230         if (mddev->in_sync)
1231         {
1232                 sb->recovery_cp = mddev->recovery_cp;
1233                 sb->cp_events_hi = (mddev->events>>32);
1234                 sb->cp_events_lo = (u32)mddev->events;
1235                 if (mddev->recovery_cp == MaxSector)
1236                         sb->state = (1<< MD_SB_CLEAN);
1237         } else
1238                 sb->recovery_cp = 0;
1239
1240         sb->layout = mddev->layout;
1241         sb->chunk_size = mddev->chunk_sectors << 9;
1242
1243         if (mddev->bitmap && mddev->bitmap_info.file == NULL)
1244                 sb->state |= (1<<MD_SB_BITMAP_PRESENT);
1245
1246         sb->disks[0].state = (1<<MD_DISK_REMOVED);
1247         rdev_for_each(rdev2, mddev) {
1248                 mdp_disk_t *d;
1249                 int desc_nr;
1250                 int is_active = test_bit(In_sync, &rdev2->flags);
1251
1252                 if (rdev2->raid_disk >= 0 &&
1253                     sb->minor_version >= 91)
1254                         /* we have nowhere to store the recovery_offset,
1255                          * but if it is not below the reshape_position,
1256                          * we can piggy-back on that.
1257                          */
1258                         is_active = 1;
1259                 if (rdev2->raid_disk < 0 ||
1260                     test_bit(Faulty, &rdev2->flags))
1261                         is_active = 0;
1262                 if (is_active)
1263                         desc_nr = rdev2->raid_disk;
1264                 else
1265                         desc_nr = next_spare++;
1266                 rdev2->desc_nr = desc_nr;
1267                 d = &sb->disks[rdev2->desc_nr];
1268                 nr_disks++;
1269                 d->number = rdev2->desc_nr;
1270                 d->major = MAJOR(rdev2->bdev->bd_dev);
1271                 d->minor = MINOR(rdev2->bdev->bd_dev);
1272                 if (is_active)
1273                         d->raid_disk = rdev2->raid_disk;
1274                 else
1275                         d->raid_disk = rdev2->desc_nr; /* compatibility */
1276                 if (test_bit(Faulty, &rdev2->flags))
1277                         d->state = (1<<MD_DISK_FAULTY);
1278                 else if (is_active) {
1279                         d->state = (1<<MD_DISK_ACTIVE);
1280                         if (test_bit(In_sync, &rdev2->flags))
1281                                 d->state |= (1<<MD_DISK_SYNC);
1282                         active++;
1283                         working++;
1284                 } else {
1285                         d->state = 0;
1286                         spare++;
1287                         working++;
1288                 }
1289                 if (test_bit(WriteMostly, &rdev2->flags))
1290                         d->state |= (1<<MD_DISK_WRITEMOSTLY);
1291         }
1292         /* now set the "removed" and "faulty" bits on any missing devices */
1293         for (i=0 ; i < mddev->raid_disks ; i++) {
1294                 mdp_disk_t *d = &sb->disks[i];
1295                 if (d->state == 0 && d->number == 0) {
1296                         d->number = i;
1297                         d->raid_disk = i;
1298                         d->state = (1<<MD_DISK_REMOVED);
1299                         d->state |= (1<<MD_DISK_FAULTY);
1300                         failed++;
1301                 }
1302         }
1303         sb->nr_disks = nr_disks;
1304         sb->active_disks = active;
1305         sb->working_disks = working;
1306         sb->failed_disks = failed;
1307         sb->spare_disks = spare;
1308
1309         sb->this_disk = sb->disks[rdev->desc_nr];
1310         sb->sb_csum = calc_sb_csum(sb);
1311 }
1312
1313 /*
1314  * rdev_size_change for 0.90.0
1315  */
1316 static unsigned long long
1317 super_90_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
1318 {
1319         if (num_sectors && num_sectors < rdev->mddev->dev_sectors)
1320                 return 0; /* component must fit device */
1321         if (rdev->mddev->bitmap_info.offset)
1322                 return 0; /* can't move bitmap */
1323         rdev->sb_start = calc_dev_sboffset(rdev);
1324         if (!num_sectors || num_sectors > rdev->sb_start)
1325                 num_sectors = rdev->sb_start;
1326         /* Limit to 4TB as metadata cannot record more than that.
1327          * 4TB == 2^32 KB, or 2*2^32 sectors.
1328          */
1329         if (IS_ENABLED(CONFIG_LBDAF) && (u64)num_sectors >= (2ULL << 32) &&
1330             rdev->mddev->level >= 1)
1331                 num_sectors = (sector_t)(2ULL << 32) - 2;
1332         md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
1333                        rdev->sb_page);
1334         md_super_wait(rdev->mddev);
1335         return num_sectors;
1336 }
1337
1338 static int
1339 super_90_allow_new_offset(struct md_rdev *rdev, unsigned long long new_offset)
1340 {
1341         /* non-zero offset changes not possible with v0.90 */
1342         return new_offset == 0;
1343 }
1344
1345 /*
1346  * version 1 superblock
1347  */
1348
1349 static __le32 calc_sb_1_csum(struct mdp_superblock_1 *sb)
1350 {
1351         __le32 disk_csum;
1352         u32 csum;
1353         unsigned long long newcsum;
1354         int size = 256 + le32_to_cpu(sb->max_dev)*2;
1355         __le32 *isuper = (__le32*)sb;
1356
1357         disk_csum = sb->sb_csum;
1358         sb->sb_csum = 0;
1359         newcsum = 0;
1360         for (; size >= 4; size -= 4)
1361                 newcsum += le32_to_cpu(*isuper++);
1362
1363         if (size == 2)
1364                 newcsum += le16_to_cpu(*(__le16*) isuper);
1365
1366         csum = (newcsum & 0xffffffff) + (newcsum >> 32);
1367         sb->sb_csum = disk_csum;
1368         return cpu_to_le32(csum);
1369 }
1370
1371 static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_version)
1372 {
1373         struct mdp_superblock_1 *sb;
1374         int ret;
1375         sector_t sb_start;
1376         sector_t sectors;
1377         char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
1378         int bmask;
1379
1380         /*
1381          * Calculate the position of the superblock in 512byte sectors.
1382          * It is always aligned to a 4K boundary and
1383          * depeding on minor_version, it can be:
1384          * 0: At least 8K, but less than 12K, from end of device
1385          * 1: At start of device
1386          * 2: 4K from start of device.
1387          */
1388         switch(minor_version) {
1389         case 0:
1390                 sb_start = i_size_read(rdev->bdev->bd_inode) >> 9;
1391                 sb_start -= 8*2;
1392                 sb_start &= ~(sector_t)(4*2-1);
1393                 break;
1394         case 1:
1395                 sb_start = 0;
1396                 break;
1397         case 2:
1398                 sb_start = 8;
1399                 break;
1400         default:
1401                 return -EINVAL;
1402         }
1403         rdev->sb_start = sb_start;
1404
1405         /* superblock is rarely larger than 1K, but it can be larger,
1406          * and it is safe to read 4k, so we do that
1407          */
1408         ret = read_disk_sb(rdev, 4096);
1409         if (ret) return ret;
1410
1411         sb = page_address(rdev->sb_page);
1412
1413         if (sb->magic != cpu_to_le32(MD_SB_MAGIC) ||
1414             sb->major_version != cpu_to_le32(1) ||
1415             le32_to_cpu(sb->max_dev) > (4096-256)/2 ||
1416             le64_to_cpu(sb->super_offset) != rdev->sb_start ||
1417             (le32_to_cpu(sb->feature_map) & ~MD_FEATURE_ALL) != 0)
1418                 return -EINVAL;
1419
1420         if (calc_sb_1_csum(sb) != sb->sb_csum) {
1421                 pr_warn("md: invalid superblock checksum on %s\n",
1422                         bdevname(rdev->bdev,b));
1423                 return -EINVAL;
1424         }
1425         if (le64_to_cpu(sb->data_size) < 10) {
1426                 pr_warn("md: data_size too small on %s\n",
1427                         bdevname(rdev->bdev,b));
1428                 return -EINVAL;
1429         }
1430         if (sb->pad0 ||
1431             sb->pad3[0] ||
1432             memcmp(sb->pad3, sb->pad3+1, sizeof(sb->pad3) - sizeof(sb->pad3[1])))
1433                 /* Some padding is non-zero, might be a new feature */
1434                 return -EINVAL;
1435
1436         rdev->preferred_minor = 0xffff;
1437         rdev->data_offset = le64_to_cpu(sb->data_offset);
1438         rdev->new_data_offset = rdev->data_offset;
1439         if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE) &&
1440             (le32_to_cpu(sb->feature_map) & MD_FEATURE_NEW_OFFSET))
1441                 rdev->new_data_offset += (s32)le32_to_cpu(sb->new_offset);
1442         atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
1443
1444         rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
1445         bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
1446         if (rdev->sb_size & bmask)
1447                 rdev->sb_size = (rdev->sb_size | bmask) + 1;
1448
1449         if (minor_version
1450             && rdev->data_offset < sb_start + (rdev->sb_size/512))
1451                 return -EINVAL;
1452         if (minor_version
1453             && rdev->new_data_offset < sb_start + (rdev->sb_size/512))
1454                 return -EINVAL;
1455
1456         if (sb->level == cpu_to_le32(LEVEL_MULTIPATH))
1457                 rdev->desc_nr = -1;
1458         else
1459                 rdev->desc_nr = le32_to_cpu(sb->dev_number);
1460
1461         if (!rdev->bb_page) {
1462                 rdev->bb_page = alloc_page(GFP_KERNEL);
1463                 if (!rdev->bb_page)
1464                         return -ENOMEM;
1465         }
1466         if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BAD_BLOCKS) &&
1467             rdev->badblocks.count == 0) {
1468                 /* need to load the bad block list.
1469                  * Currently we limit it to one page.
1470                  */
1471                 s32 offset;
1472                 sector_t bb_sector;
1473                 u64 *bbp;
1474                 int i;
1475                 int sectors = le16_to_cpu(sb->bblog_size);
1476                 if (sectors > (PAGE_SIZE / 512))
1477                         return -EINVAL;
1478                 offset = le32_to_cpu(sb->bblog_offset);
1479                 if (offset == 0)
1480                         return -EINVAL;
1481                 bb_sector = (long long)offset;
1482                 if (!sync_page_io(rdev, bb_sector, sectors << 9,
1483                                   rdev->bb_page, REQ_OP_READ, 0, true))
1484                         return -EIO;
1485                 bbp = (u64 *)page_address(rdev->bb_page);
1486                 rdev->badblocks.shift = sb->bblog_shift;
1487                 for (i = 0 ; i < (sectors << (9-3)) ; i++, bbp++) {
1488                         u64 bb = le64_to_cpu(*bbp);
1489                         int count = bb & (0x3ff);
1490                         u64 sector = bb >> 10;
1491                         sector <<= sb->bblog_shift;
1492                         count <<= sb->bblog_shift;
1493                         if (bb + 1 == 0)
1494                                 break;
1495                         if (badblocks_set(&rdev->badblocks, sector, count, 1))
1496                                 return -EINVAL;
1497                 }
1498         } else if (sb->bblog_offset != 0)
1499                 rdev->badblocks.shift = 0;
1500
1501         if (!refdev) {
1502                 ret = 1;
1503         } else {
1504                 __u64 ev1, ev2;
1505                 struct mdp_superblock_1 *refsb = page_address(refdev->sb_page);
1506
1507                 if (memcmp(sb->set_uuid, refsb->set_uuid, 16) != 0 ||
1508                     sb->level != refsb->level ||
1509                     sb->layout != refsb->layout ||
1510                     sb->chunksize != refsb->chunksize) {
1511                         pr_warn("md: %s has strangely different superblock to %s\n",
1512                                 bdevname(rdev->bdev,b),
1513                                 bdevname(refdev->bdev,b2));
1514                         return -EINVAL;
1515                 }
1516                 ev1 = le64_to_cpu(sb->events);
1517                 ev2 = le64_to_cpu(refsb->events);
1518
1519                 if (ev1 > ev2)
1520                         ret = 1;
1521                 else
1522                         ret = 0;
1523         }
1524         if (minor_version) {
1525                 sectors = (i_size_read(rdev->bdev->bd_inode) >> 9);
1526                 sectors -= rdev->data_offset;
1527         } else
1528                 sectors = rdev->sb_start;
1529         if (sectors < le64_to_cpu(sb->data_size))
1530                 return -EINVAL;
1531         rdev->sectors = le64_to_cpu(sb->data_size);
1532         return ret;
1533 }
1534
1535 static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev)
1536 {
1537         struct mdp_superblock_1 *sb = page_address(rdev->sb_page);
1538         __u64 ev1 = le64_to_cpu(sb->events);
1539
1540         rdev->raid_disk = -1;
1541         clear_bit(Faulty, &rdev->flags);
1542         clear_bit(In_sync, &rdev->flags);
1543         clear_bit(Bitmap_sync, &rdev->flags);
1544         clear_bit(WriteMostly, &rdev->flags);
1545
1546         if (mddev->raid_disks == 0) {
1547                 mddev->major_version = 1;
1548                 mddev->patch_version = 0;
1549                 mddev->external = 0;
1550                 mddev->chunk_sectors = le32_to_cpu(sb->chunksize);
1551                 mddev->ctime = le64_to_cpu(sb->ctime);
1552                 mddev->utime = le64_to_cpu(sb->utime);
1553                 mddev->level = le32_to_cpu(sb->level);
1554                 mddev->clevel[0] = 0;
1555                 mddev->layout = le32_to_cpu(sb->layout);
1556                 mddev->raid_disks = le32_to_cpu(sb->raid_disks);
1557                 mddev->dev_sectors = le64_to_cpu(sb->size);
1558                 mddev->events = ev1;
1559                 mddev->bitmap_info.offset = 0;
1560                 mddev->bitmap_info.space = 0;
1561                 /* Default location for bitmap is 1K after superblock
1562                  * using 3K - total of 4K
1563                  */
1564                 mddev->bitmap_info.default_offset = 1024 >> 9;
1565                 mddev->bitmap_info.default_space = (4096-1024) >> 9;
1566                 mddev->reshape_backwards = 0;
1567
1568                 mddev->recovery_cp = le64_to_cpu(sb->resync_offset);
1569                 memcpy(mddev->uuid, sb->set_uuid, 16);
1570
1571                 mddev->max_disks =  (4096-256)/2;
1572
1573                 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET) &&
1574                     mddev->bitmap_info.file == NULL) {
1575                         mddev->bitmap_info.offset =
1576                                 (__s32)le32_to_cpu(sb->bitmap_offset);
1577                         /* Metadata doesn't record how much space is available.
1578                          * For 1.0, we assume we can use up to the superblock
1579                          * if before, else to 4K beyond superblock.
1580                          * For others, assume no change is possible.
1581                          */
1582                         if (mddev->minor_version > 0)
1583                                 mddev->bitmap_info.space = 0;
1584                         else if (mddev->bitmap_info.offset > 0)
1585                                 mddev->bitmap_info.space =
1586                                         8 - mddev->bitmap_info.offset;
1587                         else
1588                                 mddev->bitmap_info.space =
1589                                         -mddev->bitmap_info.offset;
1590                 }
1591
1592                 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) {
1593                         mddev->reshape_position = le64_to_cpu(sb->reshape_position);
1594                         mddev->delta_disks = le32_to_cpu(sb->delta_disks);
1595                         mddev->new_level = le32_to_cpu(sb->new_level);
1596                         mddev->new_layout = le32_to_cpu(sb->new_layout);
1597                         mddev->new_chunk_sectors = le32_to_cpu(sb->new_chunk);
1598                         if (mddev->delta_disks < 0 ||
1599                             (mddev->delta_disks == 0 &&
1600                              (le32_to_cpu(sb->feature_map)
1601                               & MD_FEATURE_RESHAPE_BACKWARDS)))
1602                                 mddev->reshape_backwards = 1;
1603                 } else {
1604                         mddev->reshape_position = MaxSector;
1605                         mddev->delta_disks = 0;
1606                         mddev->new_level = mddev->level;
1607                         mddev->new_layout = mddev->layout;
1608                         mddev->new_chunk_sectors = mddev->chunk_sectors;
1609                 }
1610
1611                 if (le32_to_cpu(sb->feature_map) & MD_FEATURE_JOURNAL)
1612                         set_bit(MD_HAS_JOURNAL, &mddev->flags);
1613         } else if (mddev->pers == NULL) {
1614                 /* Insist of good event counter while assembling, except for
1615                  * spares (which don't need an event count) */
1616                 ++ev1;
1617                 if (rdev->desc_nr >= 0 &&
1618                     rdev->desc_nr < le32_to_cpu(sb->max_dev) &&
1619                     (le16_to_cpu(sb->dev_roles[rdev->desc_nr]) < MD_DISK_ROLE_MAX ||
1620                      le16_to_cpu(sb->dev_roles[rdev->desc_nr]) == MD_DISK_ROLE_JOURNAL))
1621                         if (ev1 < mddev->events)
1622                                 return -EINVAL;
1623         } else if (mddev->bitmap) {
1624                 /* If adding to array with a bitmap, then we can accept an
1625                  * older device, but not too old.
1626                  */
1627                 if (ev1 < mddev->bitmap->events_cleared)
1628                         return 0;
1629                 if (ev1 < mddev->events)
1630                         set_bit(Bitmap_sync, &rdev->flags);
1631         } else {
1632                 if (ev1 < mddev->events)
1633                         /* just a hot-add of a new device, leave raid_disk at -1 */
1634                         return 0;
1635         }
1636         if (mddev->level != LEVEL_MULTIPATH) {
1637                 int role;
1638                 if (rdev->desc_nr < 0 ||
1639                     rdev->desc_nr >= le32_to_cpu(sb->max_dev)) {
1640                         role = MD_DISK_ROLE_SPARE;
1641                         rdev->desc_nr = -1;
1642                 } else
1643                         role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]);
1644                 switch(role) {
1645                 case MD_DISK_ROLE_SPARE: /* spare */
1646                         break;
1647                 case MD_DISK_ROLE_FAULTY: /* faulty */
1648                         set_bit(Faulty, &rdev->flags);
1649                         break;
1650                 case MD_DISK_ROLE_JOURNAL: /* journal device */
1651                         if (!(le32_to_cpu(sb->feature_map) & MD_FEATURE_JOURNAL)) {
1652                                 /* journal device without journal feature */
1653                                 pr_warn("md: journal device provided without journal feature, ignoring the device\n");
1654                                 return -EINVAL;
1655                         }
1656                         set_bit(Journal, &rdev->flags);
1657                         rdev->journal_tail = le64_to_cpu(sb->journal_tail);
1658                         rdev->raid_disk = 0;
1659                         break;
1660                 default:
1661                         rdev->saved_raid_disk = role;
1662                         if ((le32_to_cpu(sb->feature_map) &
1663                              MD_FEATURE_RECOVERY_OFFSET)) {
1664                                 rdev->recovery_offset = le64_to_cpu(sb->recovery_offset);
1665                                 if (!(le32_to_cpu(sb->feature_map) &
1666                                       MD_FEATURE_RECOVERY_BITMAP))
1667                                         rdev->saved_raid_disk = -1;
1668                         } else
1669                                 set_bit(In_sync, &rdev->flags);
1670                         rdev->raid_disk = role;
1671                         break;
1672                 }
1673                 if (sb->devflags & WriteMostly1)
1674                         set_bit(WriteMostly, &rdev->flags);
1675                 if (le32_to_cpu(sb->feature_map) & MD_FEATURE_REPLACEMENT)
1676                         set_bit(Replacement, &rdev->flags);
1677         } else /* MULTIPATH are always insync */
1678                 set_bit(In_sync, &rdev->flags);
1679
1680         return 0;
1681 }
1682
1683 static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
1684 {
1685         struct mdp_superblock_1 *sb;
1686         struct md_rdev *rdev2;
1687         int max_dev, i;
1688         /* make rdev->sb match mddev and rdev data. */
1689
1690         sb = page_address(rdev->sb_page);
1691
1692         sb->feature_map = 0;
1693         sb->pad0 = 0;
1694         sb->recovery_offset = cpu_to_le64(0);
1695         memset(sb->pad3, 0, sizeof(sb->pad3));
1696
1697         sb->utime = cpu_to_le64((__u64)mddev->utime);
1698         sb->events = cpu_to_le64(mddev->events);
1699         if (mddev->in_sync)
1700                 sb->resync_offset = cpu_to_le64(mddev->recovery_cp);
1701         else if (test_bit(MD_JOURNAL_CLEAN, &mddev->flags))
1702                 sb->resync_offset = cpu_to_le64(MaxSector);
1703         else
1704                 sb->resync_offset = cpu_to_le64(0);
1705
1706         sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
1707
1708         sb->raid_disks = cpu_to_le32(mddev->raid_disks);
1709         sb->size = cpu_to_le64(mddev->dev_sectors);
1710         sb->chunksize = cpu_to_le32(mddev->chunk_sectors);
1711         sb->level = cpu_to_le32(mddev->level);
1712         sb->layout = cpu_to_le32(mddev->layout);
1713
1714         if (test_bit(WriteMostly, &rdev->flags))
1715                 sb->devflags |= WriteMostly1;
1716         else
1717                 sb->devflags &= ~WriteMostly1;
1718         sb->data_offset = cpu_to_le64(rdev->data_offset);
1719         sb->data_size = cpu_to_le64(rdev->sectors);
1720
1721         if (mddev->bitmap && mddev->bitmap_info.file == NULL) {
1722                 sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_info.offset);
1723                 sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET);
1724         }
1725
1726         if (rdev->raid_disk >= 0 && !test_bit(Journal, &rdev->flags) &&
1727             !test_bit(In_sync, &rdev->flags)) {
1728                 sb->feature_map |=
1729                         cpu_to_le32(MD_FEATURE_RECOVERY_OFFSET);
1730                 sb->recovery_offset =
1731                         cpu_to_le64(rdev->recovery_offset);
1732                 if (rdev->saved_raid_disk >= 0 && mddev->bitmap)
1733                         sb->feature_map |=
1734                                 cpu_to_le32(MD_FEATURE_RECOVERY_BITMAP);
1735         }
1736         /* Note: recovery_offset and journal_tail share space  */
1737         if (test_bit(Journal, &rdev->flags))
1738                 sb->journal_tail = cpu_to_le64(rdev->journal_tail);
1739         if (test_bit(Replacement, &rdev->flags))
1740                 sb->feature_map |=
1741                         cpu_to_le32(MD_FEATURE_REPLACEMENT);
1742
1743         if (mddev->reshape_position != MaxSector) {
1744                 sb->feature_map |= cpu_to_le32(MD_FEATURE_RESHAPE_ACTIVE);
1745                 sb->reshape_position = cpu_to_le64(mddev->reshape_position);
1746                 sb->new_layout = cpu_to_le32(mddev->new_layout);
1747                 sb->delta_disks = cpu_to_le32(mddev->delta_disks);
1748                 sb->new_level = cpu_to_le32(mddev->new_level);
1749                 sb->new_chunk = cpu_to_le32(mddev->new_chunk_sectors);
1750                 if (mddev->delta_disks == 0 &&
1751                     mddev->reshape_backwards)
1752                         sb->feature_map
1753                                 |= cpu_to_le32(MD_FEATURE_RESHAPE_BACKWARDS);
1754                 if (rdev->new_data_offset != rdev->data_offset) {
1755                         sb->feature_map
1756                                 |= cpu_to_le32(MD_FEATURE_NEW_OFFSET);
1757                         sb->new_offset = cpu_to_le32((__u32)(rdev->new_data_offset
1758                                                              - rdev->data_offset));
1759                 }
1760         }
1761
1762         if (mddev_is_clustered(mddev))
1763                 sb->feature_map |= cpu_to_le32(MD_FEATURE_CLUSTERED);
1764
1765         if (rdev->badblocks.count == 0)
1766                 /* Nothing to do for bad blocks*/ ;
1767         else if (sb->bblog_offset == 0)
1768                 /* Cannot record bad blocks on this device */
1769                 md_error(mddev, rdev);
1770         else {
1771                 struct badblocks *bb = &rdev->badblocks;
1772                 u64 *bbp = (u64 *)page_address(rdev->bb_page);
1773                 u64 *p = bb->page;
1774                 sb->feature_map |= cpu_to_le32(MD_FEATURE_BAD_BLOCKS);
1775                 if (bb->changed) {
1776                         unsigned seq;
1777
1778 retry:
1779                         seq = read_seqbegin(&bb->lock);
1780
1781                         memset(bbp, 0xff, PAGE_SIZE);
1782
1783                         for (i = 0 ; i < bb->count ; i++) {
1784                                 u64 internal_bb = p[i];
1785                                 u64 store_bb = ((BB_OFFSET(internal_bb) << 10)
1786                                                 | BB_LEN(internal_bb));
1787                                 bbp[i] = cpu_to_le64(store_bb);
1788                         }
1789                         bb->changed = 0;
1790                         if (read_seqretry(&bb->lock, seq))
1791                                 goto retry;
1792
1793                         bb->sector = (rdev->sb_start +
1794                                       (int)le32_to_cpu(sb->bblog_offset));
1795                         bb->size = le16_to_cpu(sb->bblog_size);
1796                 }
1797         }
1798
1799         max_dev = 0;
1800         rdev_for_each(rdev2, mddev)
1801                 if (rdev2->desc_nr+1 > max_dev)
1802                         max_dev = rdev2->desc_nr+1;
1803
1804         if (max_dev > le32_to_cpu(sb->max_dev)) {
1805                 int bmask;
1806                 sb->max_dev = cpu_to_le32(max_dev);
1807                 rdev->sb_size = max_dev * 2 + 256;
1808                 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
1809                 if (rdev->sb_size & bmask)
1810                         rdev->sb_size = (rdev->sb_size | bmask) + 1;
1811         } else
1812                 max_dev = le32_to_cpu(sb->max_dev);
1813
1814         for (i=0; i<max_dev;i++)
1815                 sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_FAULTY);
1816
1817         if (test_bit(MD_HAS_JOURNAL, &mddev->flags))
1818                 sb->feature_map |= cpu_to_le32(MD_FEATURE_JOURNAL);
1819
1820         rdev_for_each(rdev2, mddev) {
1821                 i = rdev2->desc_nr;
1822                 if (test_bit(Faulty, &rdev2->flags))
1823                         sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_FAULTY);
1824                 else if (test_bit(In_sync, &rdev2->flags))
1825                         sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
1826                 else if (test_bit(Journal, &rdev2->flags))
1827                         sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_JOURNAL);
1828                 else if (rdev2->raid_disk >= 0)
1829                         sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
1830                 else
1831                         sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_SPARE);
1832         }
1833
1834         sb->sb_csum = calc_sb_1_csum(sb);
1835 }
1836
1837 static unsigned long long
1838 super_1_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
1839 {
1840         struct mdp_superblock_1 *sb;
1841         sector_t max_sectors;
1842         if (num_sectors && num_sectors < rdev->mddev->dev_sectors)
1843                 return 0; /* component must fit device */
1844         if (rdev->data_offset != rdev->new_data_offset)
1845                 return 0; /* too confusing */
1846         if (rdev->sb_start < rdev->data_offset) {
1847                 /* minor versions 1 and 2; superblock before data */
1848                 max_sectors = i_size_read(rdev->bdev->bd_inode) >> 9;
1849                 max_sectors -= rdev->data_offset;
1850                 if (!num_sectors || num_sectors > max_sectors)
1851                         num_sectors = max_sectors;
1852         } else if (rdev->mddev->bitmap_info.offset) {
1853                 /* minor version 0 with bitmap we can't move */
1854                 return 0;
1855         } else {
1856                 /* minor version 0; superblock after data */
1857                 sector_t sb_start;
1858                 sb_start = (i_size_read(rdev->bdev->bd_inode) >> 9) - 8*2;
1859                 sb_start &= ~(sector_t)(4*2 - 1);
1860                 max_sectors = rdev->sectors + sb_start - rdev->sb_start;
1861                 if (!num_sectors || num_sectors > max_sectors)
1862                         num_sectors = max_sectors;
1863                 rdev->sb_start = sb_start;
1864         }
1865         sb = page_address(rdev->sb_page);
1866         sb->data_size = cpu_to_le64(num_sectors);
1867         sb->super_offset = rdev->sb_start;
1868         sb->sb_csum = calc_sb_1_csum(sb);
1869         md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
1870                        rdev->sb_page);
1871         md_super_wait(rdev->mddev);
1872         return num_sectors;
1873
1874 }
1875
1876 static int
1877 super_1_allow_new_offset(struct md_rdev *rdev,
1878                          unsigned long long new_offset)
1879 {
1880         /* All necessary checks on new >= old have been done */
1881         struct bitmap *bitmap;
1882         if (new_offset >= rdev->data_offset)
1883                 return 1;
1884
1885         /* with 1.0 metadata, there is no metadata to tread on
1886          * so we can always move back */
1887         if (rdev->mddev->minor_version == 0)
1888                 return 1;
1889
1890         /* otherwise we must be sure not to step on
1891          * any metadata, so stay:
1892          * 36K beyond start of superblock
1893          * beyond end of badblocks
1894          * beyond write-intent bitmap
1895          */
1896         if (rdev->sb_start + (32+4)*2 > new_offset)
1897                 return 0;
1898         bitmap = rdev->mddev->bitmap;
1899         if (bitmap && !rdev->mddev->bitmap_info.file &&
1900             rdev->sb_start + rdev->mddev->bitmap_info.offset +
1901             bitmap->storage.file_pages * (PAGE_SIZE>>9) > new_offset)
1902                 return 0;
1903         if (rdev->badblocks.sector + rdev->badblocks.size > new_offset)
1904                 return 0;
1905
1906         return 1;
1907 }
1908
1909 static struct super_type super_types[] = {
1910         [0] = {
1911                 .name   = "0.90.0",
1912                 .owner  = THIS_MODULE,
1913                 .load_super         = super_90_load,
1914                 .validate_super     = super_90_validate,
1915                 .sync_super         = super_90_sync,
1916                 .rdev_size_change   = super_90_rdev_size_change,
1917                 .allow_new_offset   = super_90_allow_new_offset,
1918         },
1919         [1] = {
1920                 .name   = "md-1",
1921                 .owner  = THIS_MODULE,
1922                 .load_super         = super_1_load,
1923                 .validate_super     = super_1_validate,
1924                 .sync_super         = super_1_sync,
1925                 .rdev_size_change   = super_1_rdev_size_change,
1926                 .allow_new_offset   = super_1_allow_new_offset,
1927         },
1928 };
1929
1930 static void sync_super(struct mddev *mddev, struct md_rdev *rdev)
1931 {
1932         if (mddev->sync_super) {
1933                 mddev->sync_super(mddev, rdev);
1934                 return;
1935         }
1936
1937         BUG_ON(mddev->major_version >= ARRAY_SIZE(super_types));
1938
1939         super_types[mddev->major_version].sync_super(mddev, rdev);
1940 }
1941
1942 static int match_mddev_units(struct mddev *mddev1, struct mddev *mddev2)
1943 {
1944         struct md_rdev *rdev, *rdev2;
1945
1946         rcu_read_lock();
1947         rdev_for_each_rcu(rdev, mddev1) {
1948                 if (test_bit(Faulty, &rdev->flags) ||
1949                     test_bit(Journal, &rdev->flags) ||
1950                     rdev->raid_disk == -1)
1951                         continue;
1952                 rdev_for_each_rcu(rdev2, mddev2) {
1953                         if (test_bit(Faulty, &rdev2->flags) ||
1954                             test_bit(Journal, &rdev2->flags) ||
1955                             rdev2->raid_disk == -1)
1956                                 continue;
1957                         if (rdev->bdev->bd_contains ==
1958                             rdev2->bdev->bd_contains) {
1959                                 rcu_read_unlock();
1960                                 return 1;
1961                         }
1962                 }
1963         }
1964         rcu_read_unlock();
1965         return 0;
1966 }
1967
1968 static LIST_HEAD(pending_raid_disks);
1969
1970 /*
1971  * Try to register data integrity profile for an mddev
1972  *
1973  * This is called when an array is started and after a disk has been kicked
1974  * from the array. It only succeeds if all working and active component devices
1975  * are integrity capable with matching profiles.
1976  */
1977 int md_integrity_register(struct mddev *mddev)
1978 {
1979         struct md_rdev *rdev, *reference = NULL;
1980
1981         if (list_empty(&mddev->disks))
1982                 return 0; /* nothing to do */
1983         if (!mddev->gendisk || blk_get_integrity(mddev->gendisk))
1984                 return 0; /* shouldn't register, or already is */
1985         rdev_for_each(rdev, mddev) {
1986                 /* skip spares and non-functional disks */
1987                 if (test_bit(Faulty, &rdev->flags))
1988                         continue;
1989                 if (rdev->raid_disk < 0)
1990                         continue;
1991                 if (!reference) {
1992                         /* Use the first rdev as the reference */
1993                         reference = rdev;
1994                         continue;
1995                 }
1996                 /* does this rdev's profile match the reference profile? */
1997                 if (blk_integrity_compare(reference->bdev->bd_disk,
1998                                 rdev->bdev->bd_disk) < 0)
1999                         return -EINVAL;
2000         }
2001         if (!reference || !bdev_get_integrity(reference->bdev))
2002                 return 0;
2003         /*
2004          * All component devices are integrity capable and have matching
2005          * profiles, register the common profile for the md device.
2006          */
2007         blk_integrity_register(mddev->gendisk,
2008                                bdev_get_integrity(reference->bdev));
2009
2010         pr_debug("md: data integrity enabled on %s\n", mdname(mddev));
2011         if (bioset_integrity_create(mddev->bio_set, BIO_POOL_SIZE)) {
2012                 pr_err("md: failed to create integrity pool for %s\n",
2013                        mdname(mddev));
2014                 return -EINVAL;
2015         }
2016         return 0;
2017 }
2018 EXPORT_SYMBOL(md_integrity_register);
2019
2020 /*
2021  * Attempt to add an rdev, but only if it is consistent with the current
2022  * integrity profile
2023  */
2024 int md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev)
2025 {
2026         struct blk_integrity *bi_rdev;
2027         struct blk_integrity *bi_mddev;
2028         char name[BDEVNAME_SIZE];
2029
2030         if (!mddev->gendisk)
2031                 return 0;
2032
2033         bi_rdev = bdev_get_integrity(rdev->bdev);
2034         bi_mddev = blk_get_integrity(mddev->gendisk);
2035
2036         if (!bi_mddev) /* nothing to do */
2037                 return 0;
2038
2039         if (blk_integrity_compare(mddev->gendisk, rdev->bdev->bd_disk) != 0) {
2040                 pr_err("%s: incompatible integrity profile for %s\n",
2041                        mdname(mddev), bdevname(rdev->bdev, name));
2042                 return -ENXIO;
2043         }
2044
2045         return 0;
2046 }
2047 EXPORT_SYMBOL(md_integrity_add_rdev);
2048
2049 static int bind_rdev_to_array(struct md_rdev *rdev, struct mddev *mddev)
2050 {
2051         char b[BDEVNAME_SIZE];
2052         struct kobject *ko;
2053         int err;
2054
2055         /* prevent duplicates */
2056         if (find_rdev(mddev, rdev->bdev->bd_dev))
2057                 return -EEXIST;
2058
2059         /* make sure rdev->sectors exceeds mddev->dev_sectors */
2060         if (!test_bit(Journal, &rdev->flags) &&
2061             rdev->sectors &&
2062             (mddev->dev_sectors == 0 || rdev->sectors < mddev->dev_sectors)) {
2063                 if (mddev->pers) {
2064                         /* Cannot change size, so fail
2065                          * If mddev->level <= 0, then we don't care
2066                          * about aligning sizes (e.g. linear)
2067                          */
2068                         if (mddev->level > 0)
2069                                 return -ENOSPC;
2070                 } else
2071                         mddev->dev_sectors = rdev->sectors;
2072         }
2073
2074         /* Verify rdev->desc_nr is unique.
2075          * If it is -1, assign a free number, else
2076          * check number is not in use
2077          */
2078         rcu_read_lock();
2079         if (rdev->desc_nr < 0) {
2080                 int choice = 0;
2081                 if (mddev->pers)
2082                         choice = mddev->raid_disks;
2083                 while (md_find_rdev_nr_rcu(mddev, choice))
2084                         choice++;
2085                 rdev->desc_nr = choice;
2086         } else {
2087                 if (md_find_rdev_nr_rcu(mddev, rdev->desc_nr)) {
2088                         rcu_read_unlock();
2089                         return -EBUSY;
2090                 }
2091         }
2092         rcu_read_unlock();
2093         if (!test_bit(Journal, &rdev->flags) &&
2094             mddev->max_disks && rdev->desc_nr >= mddev->max_disks) {
2095                 pr_warn("md: %s: array is limited to %d devices\n",
2096                         mdname(mddev), mddev->max_disks);
2097                 return -EBUSY;
2098         }
2099         bdevname(rdev->bdev,b);
2100         strreplace(b, '/', '!');
2101
2102         rdev->mddev = mddev;
2103         pr_debug("md: bind<%s>\n", b);
2104
2105         if ((err = kobject_add(&rdev->kobj, &mddev->kobj, "dev-%s", b)))
2106                 goto fail;
2107
2108         ko = &part_to_dev(rdev->bdev->bd_part)->kobj;
2109         if (sysfs_create_link(&rdev->kobj, ko, "block"))
2110                 /* failure here is OK */;
2111         rdev->sysfs_state = sysfs_get_dirent_safe(rdev->kobj.sd, "state");
2112
2113         list_add_rcu(&rdev->same_set, &mddev->disks);
2114         bd_link_disk_holder(rdev->bdev, mddev->gendisk);
2115
2116         /* May as well allow recovery to be retried once */
2117         mddev->recovery_disabled++;
2118
2119         return 0;
2120
2121  fail:
2122         pr_warn("md: failed to register dev-%s for %s\n",
2123                 b, mdname(mddev));
2124         return err;
2125 }
2126
2127 static void md_delayed_delete(struct work_struct *ws)
2128 {
2129         struct md_rdev *rdev = container_of(ws, struct md_rdev, del_work);
2130         kobject_del(&rdev->kobj);
2131         kobject_put(&rdev->kobj);
2132 }
2133
2134 static void unbind_rdev_from_array(struct md_rdev *rdev)
2135 {
2136         char b[BDEVNAME_SIZE];
2137
2138         bd_unlink_disk_holder(rdev->bdev, rdev->mddev->gendisk);
2139         list_del_rcu(&rdev->same_set);
2140         pr_debug("md: unbind<%s>\n", bdevname(rdev->bdev,b));
2141         rdev->mddev = NULL;
2142         sysfs_remove_link(&rdev->kobj, "block");
2143         sysfs_put(rdev->sysfs_state);
2144         rdev->sysfs_state = NULL;
2145         rdev->badblocks.count = 0;
2146         /* We need to delay this, otherwise we can deadlock when
2147          * writing to 'remove' to "dev/state".  We also need
2148          * to delay it due to rcu usage.
2149          */
2150         synchronize_rcu();
2151         INIT_WORK(&rdev->del_work, md_delayed_delete);
2152         kobject_get(&rdev->kobj);
2153         queue_work(md_misc_wq, &rdev->del_work);
2154 }
2155
2156 /*
2157  * prevent the device from being mounted, repartitioned or
2158  * otherwise reused by a RAID array (or any other kernel
2159  * subsystem), by bd_claiming the device.
2160  */
2161 static int lock_rdev(struct md_rdev *rdev, dev_t dev, int shared)
2162 {
2163         int err = 0;
2164         struct block_device *bdev;
2165         char b[BDEVNAME_SIZE];
2166
2167         bdev = blkdev_get_by_dev(dev, FMODE_READ|FMODE_WRITE|FMODE_EXCL,
2168                                  shared ? (struct md_rdev *)lock_rdev : rdev);
2169         if (IS_ERR(bdev)) {
2170                 pr_warn("md: could not open %s.\n", __bdevname(dev, b));
2171                 return PTR_ERR(bdev);
2172         }
2173         rdev->bdev = bdev;
2174         return err;
2175 }
2176
2177 static void unlock_rdev(struct md_rdev *rdev)
2178 {
2179         struct block_device *bdev = rdev->bdev;
2180         rdev->bdev = NULL;
2181         blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
2182 }
2183
2184 void md_autodetect_dev(dev_t dev);
2185
2186 static void export_rdev(struct md_rdev *rdev)
2187 {
2188         char b[BDEVNAME_SIZE];
2189
2190         pr_debug("md: export_rdev(%s)\n", bdevname(rdev->bdev,b));
2191         md_rdev_clear(rdev);
2192 #ifndef MODULE
2193         if (test_bit(AutoDetected, &rdev->flags))
2194                 md_autodetect_dev(rdev->bdev->bd_dev);
2195 #endif
2196         unlock_rdev(rdev);
2197         kobject_put(&rdev->kobj);
2198 }
2199
2200 void md_kick_rdev_from_array(struct md_rdev *rdev)
2201 {
2202         unbind_rdev_from_array(rdev);
2203         export_rdev(rdev);
2204 }
2205 EXPORT_SYMBOL_GPL(md_kick_rdev_from_array);
2206
2207 static void export_array(struct mddev *mddev)
2208 {
2209         struct md_rdev *rdev;
2210
2211         while (!list_empty(&mddev->disks)) {
2212                 rdev = list_first_entry(&mddev->disks, struct md_rdev,
2213                                         same_set);
2214                 md_kick_rdev_from_array(rdev);
2215         }
2216         mddev->raid_disks = 0;
2217         mddev->major_version = 0;
2218 }
2219
2220 static void sync_sbs(struct mddev *mddev, int nospares)
2221 {
2222         /* Update each superblock (in-memory image), but
2223          * if we are allowed to, skip spares which already
2224          * have the right event counter, or have one earlier
2225          * (which would mean they aren't being marked as dirty
2226          * with the rest of the array)
2227          */
2228         struct md_rdev *rdev;
2229         rdev_for_each(rdev, mddev) {
2230                 if (rdev->sb_events == mddev->events ||
2231                     (nospares &&
2232                      rdev->raid_disk < 0 &&
2233                      rdev->sb_events+1 == mddev->events)) {
2234                         /* Don't update this superblock */
2235                         rdev->sb_loaded = 2;
2236                 } else {
2237                         sync_super(mddev, rdev);
2238                         rdev->sb_loaded = 1;
2239                 }
2240         }
2241 }
2242
2243 static bool does_sb_need_changing(struct mddev *mddev)
2244 {
2245         struct md_rdev *rdev;
2246         struct mdp_superblock_1 *sb;
2247         int role;
2248
2249         /* Find a good rdev */
2250         rdev_for_each(rdev, mddev)
2251                 if ((rdev->raid_disk >= 0) && !test_bit(Faulty, &rdev->flags))
2252                         break;
2253
2254         /* No good device found. */
2255         if (!rdev)
2256                 return false;
2257
2258         sb = page_address(rdev->sb_page);
2259         /* Check if a device has become faulty or a spare become active */
2260         rdev_for_each(rdev, mddev) {
2261                 role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]);
2262                 /* Device activated? */
2263                 if (role == 0xffff && rdev->raid_disk >=0 &&
2264                     !test_bit(Faulty, &rdev->flags))
2265                         return true;
2266                 /* Device turned faulty? */
2267                 if (test_bit(Faulty, &rdev->flags) && (role < 0xfffd))
2268                         return true;
2269         }
2270
2271         /* Check if any mddev parameters have changed */
2272         if ((mddev->dev_sectors != le64_to_cpu(sb->size)) ||
2273             (mddev->reshape_position != le64_to_cpu(sb->reshape_position)) ||
2274             (mddev->layout != le64_to_cpu(sb->layout)) ||
2275             (mddev->raid_disks != le32_to_cpu(sb->raid_disks)) ||
2276             (mddev->chunk_sectors != le32_to_cpu(sb->chunksize)))
2277                 return true;
2278
2279         return false;
2280 }
2281
2282 void md_update_sb(struct mddev *mddev, int force_change)
2283 {
2284         struct md_rdev *rdev;
2285         int sync_req;
2286         int nospares = 0;
2287         int any_badblocks_changed = 0;
2288         int ret = -1;
2289
2290         if (mddev->ro) {
2291                 if (force_change)
2292                         set_bit(MD_CHANGE_DEVS, &mddev->flags);
2293                 return;
2294         }
2295
2296 repeat:
2297         if (mddev_is_clustered(mddev)) {
2298                 if (test_and_clear_bit(MD_CHANGE_DEVS, &mddev->flags))
2299                         force_change = 1;
2300                 if (test_and_clear_bit(MD_CHANGE_CLEAN, &mddev->flags))
2301                         nospares = 1;
2302                 ret = md_cluster_ops->metadata_update_start(mddev);
2303                 /* Has someone else has updated the sb */
2304                 if (!does_sb_need_changing(mddev)) {
2305                         if (ret == 0)
2306                                 md_cluster_ops->metadata_update_cancel(mddev);
2307                         bit_clear_unless(&mddev->flags, BIT(MD_CHANGE_PENDING),
2308                                                          BIT(MD_CHANGE_DEVS) |
2309                                                          BIT(MD_CHANGE_CLEAN));
2310                         return;
2311                 }
2312         }
2313
2314         /* First make sure individual recovery_offsets are correct */
2315         rdev_for_each(rdev, mddev) {
2316                 if (rdev->raid_disk >= 0 &&
2317                     mddev->delta_disks >= 0 &&
2318                     !test_bit(Journal, &rdev->flags) &&
2319                     !test_bit(In_sync, &rdev->flags) &&
2320                     mddev->curr_resync_completed > rdev->recovery_offset)
2321                                 rdev->recovery_offset = mddev->curr_resync_completed;
2322
2323         }
2324         if (!mddev->persistent) {
2325                 clear_bit(MD_CHANGE_CLEAN, &mddev->flags);
2326                 clear_bit(MD_CHANGE_DEVS, &mddev->flags);
2327                 if (!mddev->external) {
2328                         clear_bit(MD_CHANGE_PENDING, &mddev->flags);
2329                         rdev_for_each(rdev, mddev) {
2330                                 if (rdev->badblocks.changed) {
2331                                         rdev->badblocks.changed = 0;
2332                                         ack_all_badblocks(&rdev->badblocks);
2333                                         md_error(mddev, rdev);
2334                                 }
2335                                 clear_bit(Blocked, &rdev->flags);
2336                                 clear_bit(BlockedBadBlocks, &rdev->flags);
2337                                 wake_up(&rdev->blocked_wait);
2338                         }
2339                 }
2340                 wake_up(&mddev->sb_wait);
2341                 return;
2342         }
2343
2344         spin_lock(&mddev->lock);
2345
2346         mddev->utime = ktime_get_real_seconds();
2347
2348         if (test_and_clear_bit(MD_CHANGE_DEVS, &mddev->flags))
2349                 force_change = 1;
2350         if (test_and_clear_bit(MD_CHANGE_CLEAN, &mddev->flags))
2351                 /* just a clean<-> dirty transition, possibly leave spares alone,
2352                  * though if events isn't the right even/odd, we will have to do
2353                  * spares after all
2354                  */
2355                 nospares = 1;
2356         if (force_change)
2357                 nospares = 0;
2358         if (mddev->degraded)
2359                 /* If the array is degraded, then skipping spares is both
2360                  * dangerous and fairly pointless.
2361                  * Dangerous because a device that was removed from the array
2362                  * might have a event_count that still looks up-to-date,
2363                  * so it can be re-added without a resync.
2364                  * Pointless because if there are any spares to skip,
2365                  * then a recovery will happen and soon that array won't
2366                  * be degraded any more and the spare can go back to sleep then.
2367                  */
2368                 nospares = 0;
2369
2370         sync_req = mddev->in_sync;
2371
2372         /* If this is just a dirty<->clean transition, and the array is clean
2373          * and 'events' is odd, we can roll back to the previous clean state */
2374         if (nospares
2375             && (mddev->in_sync && mddev->recovery_cp == MaxSector)
2376             && mddev->can_decrease_events
2377             && mddev->events != 1) {
2378                 mddev->events--;
2379                 mddev->can_decrease_events = 0;
2380         } else {
2381                 /* otherwise we have to go forward and ... */
2382                 mddev->events ++;
2383                 mddev->can_decrease_events = nospares;
2384         }
2385
2386         /*
2387          * This 64-bit counter should never wrap.
2388          * Either we are in around ~1 trillion A.C., assuming
2389          * 1 reboot per second, or we have a bug...
2390          */
2391         WARN_ON(mddev->events == 0);
2392
2393         rdev_for_each(rdev, mddev) {
2394                 if (rdev->badblocks.changed)
2395                         any_badblocks_changed++;
2396                 if (test_bit(Faulty, &rdev->flags))
2397                         set_bit(FaultRecorded, &rdev->flags);
2398         }
2399
2400         sync_sbs(mddev, nospares);
2401         spin_unlock(&mddev->lock);
2402
2403         pr_debug("md: updating %s RAID superblock on device (in sync %d)\n",
2404                  mdname(mddev), mddev->in_sync);
2405
2406         bitmap_update_sb(mddev->bitmap);
2407         rdev_for_each(rdev, mddev) {
2408                 char b[BDEVNAME_SIZE];
2409
2410                 if (rdev->sb_loaded != 1)
2411                         continue; /* no noise on spare devices */
2412
2413                 if (!test_bit(Faulty, &rdev->flags)) {
2414                         md_super_write(mddev,rdev,
2415                                        rdev->sb_start, rdev->sb_size,
2416                                        rdev->sb_page);
2417                         pr_debug("md: (write) %s's sb offset: %llu\n",
2418                                  bdevname(rdev->bdev, b),
2419                                  (unsigned long long)rdev->sb_start);
2420                         rdev->sb_events = mddev->events;
2421                         if (rdev->badblocks.size) {
2422                                 md_super_write(mddev, rdev,
2423                                                rdev->badblocks.sector,
2424                                                rdev->badblocks.size << 9,
2425                                                rdev->bb_page);
2426                                 rdev->badblocks.size = 0;
2427                         }
2428
2429                 } else
2430                         pr_debug("md: %s (skipping faulty)\n",
2431                                  bdevname(rdev->bdev, b));
2432
2433                 if (mddev->level == LEVEL_MULTIPATH)
2434                         /* only need to write one superblock... */
2435                         break;
2436         }
2437         md_super_wait(mddev);
2438         /* if there was a failure, MD_CHANGE_DEVS was set, and we re-write super */
2439
2440         if (mddev_is_clustered(mddev) && ret == 0)
2441                 md_cluster_ops->metadata_update_finish(mddev);
2442
2443         if (mddev->in_sync != sync_req ||
2444             !bit_clear_unless(&mddev->flags, BIT(MD_CHANGE_PENDING),
2445                                BIT(MD_CHANGE_DEVS) | BIT(MD_CHANGE_CLEAN)))
2446                 /* have to write it out again */
2447                 goto repeat;
2448         wake_up(&mddev->sb_wait);
2449         if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
2450                 sysfs_notify(&mddev->kobj, NULL, "sync_completed");
2451
2452         rdev_for_each(rdev, mddev) {
2453                 if (test_and_clear_bit(FaultRecorded, &rdev->flags))
2454                         clear_bit(Blocked, &rdev->flags);
2455
2456                 if (any_badblocks_changed)
2457                         ack_all_badblocks(&rdev->badblocks);
2458                 clear_bit(BlockedBadBlocks, &rdev->flags);
2459                 wake_up(&rdev->blocked_wait);
2460         }
2461 }
2462 EXPORT_SYMBOL(md_update_sb);
2463
2464 static int add_bound_rdev(struct md_rdev *rdev)
2465 {
2466         struct mddev *mddev = rdev->mddev;
2467         int err = 0;
2468         bool add_journal = test_bit(Journal, &rdev->flags);
2469
2470         if (!mddev->pers->hot_remove_disk || add_journal) {
2471                 /* If there is hot_add_disk but no hot_remove_disk
2472                  * then added disks for geometry changes,
2473                  * and should be added immediately.
2474                  */
2475                 super_types[mddev->major_version].
2476                         validate_super(mddev, rdev);
2477                 if (add_journal)
2478                         mddev_suspend(mddev);
2479                 err = mddev->pers->hot_add_disk(mddev, rdev);
2480                 if (add_journal)
2481                         mddev_resume(mddev);
2482                 if (err) {
2483                         md_kick_rdev_from_array(rdev);
2484                         return err;
2485                 }
2486         }
2487         sysfs_notify_dirent_safe(rdev->sysfs_state);
2488
2489         set_bit(MD_CHANGE_DEVS, &mddev->flags);
2490         if (mddev->degraded)
2491                 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
2492         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
2493         md_new_event(mddev);
2494         md_wakeup_thread(mddev->thread);
2495         return 0;
2496 }
2497
2498 /* words written to sysfs files may, or may not, be \n terminated.
2499  * We want to accept with case. For this we use cmd_match.
2500  */
2501 static int cmd_match(const char *cmd, const char *str)
2502 {
2503         /* See if cmd, written into a sysfs file, matches
2504          * str.  They must either be the same, or cmd can
2505          * have a trailing newline
2506          */
2507         while (*cmd && *str && *cmd == *str) {
2508                 cmd++;
2509                 str++;
2510         }
2511         if (*cmd == '\n')
2512                 cmd++;
2513         if (*str || *cmd)
2514                 return 0;
2515         return 1;
2516 }
2517
2518 struct rdev_sysfs_entry {
2519         struct attribute attr;
2520         ssize_t (*show)(struct md_rdev *, char *);
2521         ssize_t (*store)(struct md_rdev *, const char *, size_t);
2522 };
2523
2524 static ssize_t
2525 state_show(struct md_rdev *rdev, char *page)
2526 {
2527         char *sep = ",";
2528         size_t len = 0;
2529         unsigned long flags = ACCESS_ONCE(rdev->flags);
2530
2531         if (test_bit(Faulty, &flags) ||
2532             (!test_bit(ExternalBbl, &flags) &&
2533             rdev->badblocks.unacked_exist))
2534                 len += sprintf(page+len, "faulty%s", sep);
2535         if (test_bit(In_sync, &flags))
2536                 len += sprintf(page+len, "in_sync%s", sep);
2537         if (test_bit(Journal, &flags))
2538                 len += sprintf(page+len, "journal%s", sep);
2539         if (test_bit(WriteMostly, &flags))
2540                 len += sprintf(page+len, "write_mostly%s", sep);
2541         if (test_bit(Blocked, &flags) ||
2542             (rdev->badblocks.unacked_exist
2543              && !test_bit(Faulty, &flags)))
2544                 len += sprintf(page+len, "blocked%s", sep);
2545         if (!test_bit(Faulty, &flags) &&
2546             !test_bit(Journal, &flags) &&
2547             !test_bit(In_sync, &flags))
2548                 len += sprintf(page+len, "spare%s", sep);
2549         if (test_bit(WriteErrorSeen, &flags))
2550                 len += sprintf(page+len, "write_error%s", sep);
2551         if (test_bit(WantReplacement, &flags))
2552                 len += sprintf(page+len, "want_replacement%s", sep);
2553         if (test_bit(Replacement, &flags))
2554                 len += sprintf(page+len, "replacement%s", sep);
2555         if (test_bit(ExternalBbl, &flags))
2556                 len += sprintf(page+len, "external_bbl%s", sep);
2557
2558         if (len)
2559                 len -= strlen(sep);
2560
2561         return len+sprintf(page+len, "\n");
2562 }
2563
2564 static ssize_t
2565 state_store(struct md_rdev *rdev, const char *buf, size_t len)
2566 {
2567         /* can write
2568          *  faulty  - simulates an error
2569          *  remove  - disconnects the device
2570          *  writemostly - sets write_mostly
2571          *  -writemostly - clears write_mostly
2572          *  blocked - sets the Blocked flags
2573          *  -blocked - clears the Blocked and possibly simulates an error
2574          *  insync - sets Insync providing device isn't active
2575          *  -insync - clear Insync for a device with a slot assigned,
2576          *            so that it gets rebuilt based on bitmap
2577          *  write_error - sets WriteErrorSeen
2578          *  -write_error - clears WriteErrorSeen
2579          */
2580         int err = -EINVAL;
2581         if (cmd_match(buf, "faulty") && rdev->mddev->pers) {
2582                 md_error(rdev->mddev, rdev);
2583                 if (test_bit(Faulty, &rdev->flags))
2584                         err = 0;
2585                 else
2586                         err = -EBUSY;
2587         } else if (cmd_match(buf, "remove")) {
2588                 if (rdev->mddev->pers) {
2589                         clear_bit(Blocked, &rdev->flags);
2590                         remove_and_add_spares(rdev->mddev, rdev);
2591                 }
2592                 if (rdev->raid_disk >= 0)
2593                         err = -EBUSY;
2594                 else {
2595                         struct mddev *mddev = rdev->mddev;
2596                         err = 0;
2597                         if (mddev_is_clustered(mddev))
2598                                 err = md_cluster_ops->remove_disk(mddev, rdev);
2599
2600                         if (err == 0) {
2601                                 md_kick_rdev_from_array(rdev);
2602                                 if (mddev->pers)
2603                                         md_update_sb(mddev, 1);
2604                                 md_new_event(mddev);
2605                         }
2606                 }
2607         } else if (cmd_match(buf, "writemostly")) {
2608                 set_bit(WriteMostly, &rdev->flags);
2609                 err = 0;
2610         } else if (cmd_match(buf, "-writemostly")) {
2611                 clear_bit(WriteMostly, &rdev->flags);
2612                 err = 0;
2613         } else if (cmd_match(buf, "blocked")) {
2614                 set_bit(Blocked, &rdev->flags);
2615                 err = 0;
2616         } else if (cmd_match(buf, "-blocked")) {
2617                 if (!test_bit(Faulty, &rdev->flags) &&
2618                     !test_bit(ExternalBbl, &rdev->flags) &&
2619                     rdev->badblocks.unacked_exist) {
2620                         /* metadata handler doesn't understand badblocks,
2621                          * so we need to fail the device
2622                          */
2623                         md_error(rdev->mddev, rdev);
2624                 }
2625                 clear_bit(Blocked, &rdev->flags);
2626                 clear_bit(BlockedBadBlocks, &rdev->flags);
2627                 wake_up(&rdev->blocked_wait);
2628                 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
2629                 md_wakeup_thread(rdev->mddev->thread);
2630
2631                 err = 0;
2632         } else if (cmd_match(buf, "insync") && rdev->raid_disk == -1) {
2633                 set_bit(In_sync, &rdev->flags);
2634                 err = 0;
2635         } else if (cmd_match(buf, "-insync") && rdev->raid_disk >= 0 &&
2636                    !test_bit(Journal, &rdev->flags)) {
2637                 if (rdev->mddev->pers == NULL) {
2638                         clear_bit(In_sync, &rdev->flags);
2639                         rdev->saved_raid_disk = rdev->raid_disk;
2640                         rdev->raid_disk = -1;
2641                         err = 0;
2642                 }
2643         } else if (cmd_match(buf, "write_error")) {
2644                 set_bit(WriteErrorSeen, &rdev->flags);
2645                 err = 0;
2646         } else if (cmd_match(buf, "-write_error")) {
2647                 clear_bit(WriteErrorSeen, &rdev->flags);
2648                 err = 0;
2649         } else if (cmd_match(buf, "want_replacement")) {
2650                 /* Any non-spare device that is not a replacement can
2651                  * become want_replacement at any time, but we then need to
2652                  * check if recovery is needed.
2653                  */
2654                 if (rdev->raid_disk >= 0 &&
2655                     !test_bit(Journal, &rdev->flags) &&
2656                     !test_bit(Replacement, &rdev->flags))
2657                         set_bit(WantReplacement, &rdev->flags);
2658                 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
2659                 md_wakeup_thread(rdev->mddev->thread);
2660                 err = 0;
2661         } else if (cmd_match(buf, "-want_replacement")) {
2662                 /* Clearing 'want_replacement' is always allowed.
2663                  * Once replacements starts it is too late though.
2664                  */
2665                 err = 0;
2666                 clear_bit(WantReplacement, &rdev->flags);
2667         } else if (cmd_match(buf, "replacement")) {
2668                 /* Can only set a device as a replacement when array has not
2669                  * yet been started.  Once running, replacement is automatic
2670                  * from spares, or by assigning 'slot'.
2671                  */
2672                 if (rdev->mddev->pers)
2673                         err = -EBUSY;
2674                 else {
2675                         set_bit(Replacement, &rdev->flags);
2676                         err = 0;
2677                 }
2678         } else if (cmd_match(buf, "-replacement")) {
2679                 /* Similarly, can only clear Replacement before start */
2680                 if (rdev->mddev->pers)
2681                         err = -EBUSY;
2682                 else {
2683                         clear_bit(Replacement, &rdev->flags);
2684                         err = 0;
2685                 }
2686         } else if (cmd_match(buf, "re-add")) {
2687                 if (test_bit(Faulty, &rdev->flags) && (rdev->raid_disk == -1)) {
2688                         /* clear_bit is performed _after_ all the devices
2689                          * have their local Faulty bit cleared. If any writes
2690                          * happen in the meantime in the local node, they
2691                          * will land in the local bitmap, which will be synced
2692                          * by this node eventually
2693                          */
2694                         if (!mddev_is_clustered(rdev->mddev) ||
2695                             (err = md_cluster_ops->gather_bitmaps(rdev)) == 0) {
2696                                 clear_bit(Faulty, &rdev->flags);
2697                                 err = add_bound_rdev(rdev);
2698                         }
2699                 } else
2700                         err = -EBUSY;
2701         } else if (cmd_match(buf, "external_bbl") && (rdev->mddev->external)) {
2702                 set_bit(ExternalBbl, &rdev->flags);
2703                 rdev->badblocks.shift = 0;
2704                 err = 0;
2705         } else if (cmd_match(buf, "-external_bbl") && (rdev->mddev->external)) {
2706                 clear_bit(ExternalBbl, &rdev->flags);
2707                 err = 0;
2708         }
2709         if (!err)
2710                 sysfs_notify_dirent_safe(rdev->sysfs_state);
2711         return err ? err : len;
2712 }
2713 static struct rdev_sysfs_entry rdev_state =
2714 __ATTR_PREALLOC(state, S_IRUGO|S_IWUSR, state_show, state_store);
2715
2716 static ssize_t
2717 errors_show(struct md_rdev *rdev, char *page)
2718 {
2719         return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
2720 }
2721
2722 static ssize_t
2723 errors_store(struct md_rdev *rdev, const char *buf, size_t len)
2724 {
2725         unsigned int n;
2726         int rv;
2727
2728         rv = kstrtouint(buf, 10, &n);
2729         if (rv < 0)
2730                 return rv;
2731         atomic_set(&rdev->corrected_errors, n);
2732         return len;
2733 }
2734 static struct rdev_sysfs_entry rdev_errors =
2735 __ATTR(errors, S_IRUGO|S_IWUSR, errors_show, errors_store);
2736
2737 static ssize_t
2738 slot_show(struct md_rdev *rdev, char *page)
2739 {
2740         if (test_bit(Journal, &rdev->flags))
2741                 return sprintf(page, "journal\n");
2742         else if (rdev->raid_disk < 0)
2743                 return sprintf(page, "none\n");
2744         else
2745                 return sprintf(page, "%d\n", rdev->raid_disk);
2746 }
2747
2748 static ssize_t
2749 slot_store(struct md_rdev *rdev, const char *buf, size_t len)
2750 {
2751         int slot;
2752         int err;
2753
2754         if (test_bit(Journal, &rdev->flags))
2755                 return -EBUSY;
2756         if (strncmp(buf, "none", 4)==0)
2757                 slot = -1;
2758         else {
2759                 err = kstrtouint(buf, 10, (unsigned int *)&slot);
2760                 if (err < 0)
2761                         return err;
2762         }
2763         if (rdev->mddev->pers && slot == -1) {
2764                 /* Setting 'slot' on an active array requires also
2765                  * updating the 'rd%d' link, and communicating
2766                  * with the personality with ->hot_*_disk.
2767                  * For now we only support removing
2768                  * failed/spare devices.  This normally happens automatically,
2769                  * but not when the metadata is externally managed.
2770                  */
2771                 if (rdev->raid_disk == -1)
2772                         return -EEXIST;
2773                 /* personality does all needed checks */
2774                 if (rdev->mddev->pers->hot_remove_disk == NULL)
2775                         return -EINVAL;
2776                 clear_bit(Blocked, &rdev->flags);
2777                 remove_and_add_spares(rdev->mddev, rdev);
2778                 if (rdev->raid_disk >= 0)
2779                         return -EBUSY;
2780                 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
2781                 md_wakeup_thread(rdev->mddev->thread);
2782         } else if (rdev->mddev->pers) {
2783                 /* Activating a spare .. or possibly reactivating
2784                  * if we ever get bitmaps working here.
2785                  */
2786                 int err;
2787
2788                 if (rdev->raid_disk != -1)
2789                         return -EBUSY;
2790
2791                 if (test_bit(MD_RECOVERY_RUNNING, &rdev->mddev->recovery))
2792                         return -EBUSY;
2793
2794                 if (rdev->mddev->pers->hot_add_disk == NULL)
2795                         return -EINVAL;
2796
2797                 if (slot >= rdev->mddev->raid_disks &&
2798                     slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks)
2799                         return -ENOSPC;
2800
2801                 rdev->raid_disk = slot;
2802                 if (test_bit(In_sync, &rdev->flags))
2803                         rdev->saved_raid_disk = slot;
2804                 else
2805                         rdev->saved_raid_disk = -1;
2806                 clear_bit(In_sync, &rdev->flags);
2807                 clear_bit(Bitmap_sync, &rdev->flags);
2808                 err = rdev->mddev->pers->
2809                         hot_add_disk(rdev->mddev, rdev);
2810                 if (err) {
2811                         rdev->raid_disk = -1;
2812                         return err;
2813                 } else
2814                         sysfs_notify_dirent_safe(rdev->sysfs_state);
2815                 if (sysfs_link_rdev(rdev->mddev, rdev))
2816                         /* failure here is OK */;
2817                 /* don't wakeup anyone, leave that to userspace. */
2818         } else {
2819                 if (slot >= rdev->mddev->raid_disks &&
2820                     slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks)
2821                         return -ENOSPC;
2822                 rdev->raid_disk = slot;
2823                 /* assume it is working */
2824                 clear_bit(Faulty, &rdev->flags);
2825                 clear_bit(WriteMostly, &rdev->flags);
2826                 set_bit(In_sync, &rdev->flags);
2827                 sysfs_notify_dirent_safe(rdev->sysfs_state);
2828         }
2829         return len;
2830 }
2831
2832 static struct rdev_sysfs_entry rdev_slot =
2833 __ATTR(slot, S_IRUGO|S_IWUSR, slot_show, slot_store);
2834
2835 static ssize_t
2836 offset_show(struct md_rdev *rdev, char *page)
2837 {
2838         return sprintf(page, "%llu\n", (unsigned long long)rdev->data_offset);
2839 }
2840
2841 static ssize_t
2842 offset_store(struct md_rdev *rdev, const char *buf, size_t len)
2843 {
2844         unsigned long long offset;
2845         if (kstrtoull(buf, 10, &offset) < 0)
2846                 return -EINVAL;
2847         if (rdev->mddev->pers && rdev->raid_disk >= 0)
2848                 return -EBUSY;
2849         if (rdev->sectors && rdev->mddev->external)
2850                 /* Must set offset before size, so overlap checks
2851                  * can be sane */
2852                 return -EBUSY;
2853         rdev->data_offset = offset;
2854         rdev->new_data_offset = offset;
2855         return len;
2856 }
2857
2858 static struct rdev_sysfs_entry rdev_offset =
2859 __ATTR(offset, S_IRUGO|S_IWUSR, offset_show, offset_store);
2860
2861 static ssize_t new_offset_show(struct md_rdev *rdev, char *page)
2862 {
2863         return sprintf(page, "%llu\n",
2864                        (unsigned long long)rdev->new_data_offset);
2865 }
2866
2867 static ssize_t new_offset_store(struct md_rdev *rdev,
2868                                 const char *buf, size_t len)
2869 {
2870         unsigned long long new_offset;
2871         struct mddev *mddev = rdev->mddev;
2872
2873         if (kstrtoull(buf, 10, &new_offset) < 0)
2874                 return -EINVAL;
2875
2876         if (mddev->sync_thread ||
2877             test_bit(MD_RECOVERY_RUNNING,&mddev->recovery))
2878                 return -EBUSY;
2879         if (new_offset == rdev->data_offset)
2880                 /* reset is always permitted */
2881                 ;
2882         else if (new_offset > rdev->data_offset) {
2883                 /* must not push array size beyond rdev_sectors */
2884                 if (new_offset - rdev->data_offset
2885                     + mddev->dev_sectors > rdev->sectors)
2886                                 return -E2BIG;
2887         }
2888         /* Metadata worries about other space details. */
2889
2890         /* decreasing the offset is inconsistent with a backwards
2891          * reshape.
2892          */
2893         if (new_offset < rdev->data_offset &&
2894             mddev->reshape_backwards)
2895                 return -EINVAL;
2896         /* Increasing offset is inconsistent with forwards
2897          * reshape.  reshape_direction should be set to
2898          * 'backwards' first.
2899          */
2900         if (new_offset > rdev->data_offset &&
2901             !mddev->reshape_backwards)
2902                 return -EINVAL;
2903
2904         if (mddev->pers && mddev->persistent &&
2905             !super_types[mddev->major_version]
2906             .allow_new_offset(rdev, new_offset))
2907                 return -E2BIG;
2908         rdev->new_data_offset = new_offset;
2909         if (new_offset > rdev->data_offset)
2910                 mddev->reshape_backwards = 1;
2911         else if (new_offset < rdev->data_offset)
2912                 mddev->reshape_backwards = 0;
2913
2914         return len;
2915 }
2916 static struct rdev_sysfs_entry rdev_new_offset =
2917 __ATTR(new_offset, S_IRUGO|S_IWUSR, new_offset_show, new_offset_store);
2918
2919 static ssize_t
2920 rdev_size_show(struct md_rdev *rdev, char *page)
2921 {
2922         return sprintf(page, "%llu\n", (unsigned long long)rdev->sectors / 2);
2923 }
2924
2925 static int overlaps(sector_t s1, sector_t l1, sector_t s2, sector_t l2)
2926 {
2927         /* check if two start/length pairs overlap */
2928         if (s1+l1 <= s2)
2929                 return 0;
2930         if (s2+l2 <= s1)
2931                 return 0;
2932         return 1;
2933 }
2934
2935 static int strict_blocks_to_sectors(const char *buf, sector_t *sectors)
2936 {
2937         unsigned long long blocks;
2938         sector_t new;
2939
2940         if (kstrtoull(buf, 10, &blocks) < 0)
2941                 return -EINVAL;
2942
2943         if (blocks & 1ULL << (8 * sizeof(blocks) - 1))
2944                 return -EINVAL; /* sector conversion overflow */
2945
2946         new = blocks * 2;
2947         if (new != blocks * 2)
2948                 return -EINVAL; /* unsigned long long to sector_t overflow */
2949
2950         *sectors = new;
2951         return 0;
2952 }
2953
2954 static ssize_t
2955 rdev_size_store(struct md_rdev *rdev, const char *buf, size_t len)
2956 {
2957         struct mddev *my_mddev = rdev->mddev;
2958         sector_t oldsectors = rdev->sectors;
2959         sector_t sectors;
2960
2961         if (test_bit(Journal, &rdev->flags))
2962                 return -EBUSY;
2963         if (strict_blocks_to_sectors(buf, &sectors) < 0)
2964                 return -EINVAL;
2965         if (rdev->data_offset != rdev->new_data_offset)
2966                 return -EINVAL; /* too confusing */
2967         if (my_mddev->pers && rdev->raid_disk >= 0) {
2968                 if (my_mddev->persistent) {
2969                         sectors = super_types[my_mddev->major_version].
2970                                 rdev_size_change(rdev, sectors);
2971                         if (!sectors)
2972                                 return -EBUSY;
2973                 } else if (!sectors)
2974                         sectors = (i_size_read(rdev->bdev->bd_inode) >> 9) -
2975                                 rdev->data_offset;
2976                 if (!my_mddev->pers->resize)
2977                         /* Cannot change size for RAID0 or Linear etc */
2978                         return -EINVAL;
2979         }
2980         if (sectors < my_mddev->dev_sectors)
2981                 return -EINVAL; /* component must fit device */
2982
2983         rdev->sectors = sectors;
2984         if (sectors > oldsectors && my_mddev->external) {
2985                 /* Need to check that all other rdevs with the same
2986                  * ->bdev do not overlap.  'rcu' is sufficient to walk
2987                  * the rdev lists safely.
2988                  * This check does not provide a hard guarantee, it
2989                  * just helps avoid dangerous mistakes.
2990                  */
2991                 struct mddev *mddev;
2992                 int overlap = 0;
2993                 struct list_head *tmp;
2994
2995                 rcu_read_lock();
2996                 for_each_mddev(mddev, tmp) {
2997                         struct md_rdev *rdev2;
2998
2999                         rdev_for_each(rdev2, mddev)
3000                                 if (rdev->bdev == rdev2->bdev &&
3001                                     rdev != rdev2 &&
3002                                     overlaps(rdev->data_offset, rdev->sectors,
3003                                              rdev2->data_offset,
3004                                              rdev2->sectors)) {
3005                                         overlap = 1;
3006                                         break;
3007                                 }
3008                         if (overlap) {
3009                                 mddev_put(mddev);
3010                                 break;
3011                         }
3012                 }
3013                 rcu_read_unlock();
3014                 if (overlap) {
3015                         /* Someone else could have slipped in a size
3016                          * change here, but doing so is just silly.
3017                          * We put oldsectors back because we *know* it is
3018                          * safe, and trust userspace not to race with
3019                          * itself
3020                          */
3021                         rdev->sectors = oldsectors;
3022                         return -EBUSY;
3023                 }
3024         }
3025         return len;
3026 }
3027
3028 static struct rdev_sysfs_entry rdev_size =
3029 __ATTR(size, S_IRUGO|S_IWUSR, rdev_size_show, rdev_size_store);
3030
3031 static ssize_t recovery_start_show(struct md_rdev *rdev, char *page)
3032 {
3033         unsigned long long recovery_start = rdev->recovery_offset;
3034
3035         if (test_bit(In_sync, &rdev->flags) ||
3036             recovery_start == MaxSector)
3037                 return sprintf(page, "none\n");
3038
3039         return sprintf(page, "%llu\n", recovery_start);
3040 }
3041
3042 static ssize_t recovery_start_store(struct md_rdev *rdev, const char *buf, size_t len)
3043 {
3044         unsigned long long recovery_start;
3045
3046         if (cmd_match(buf, "none"))
3047                 recovery_start = MaxSector;
3048         else if (kstrtoull(buf, 10, &recovery_start))
3049                 return -EINVAL;
3050
3051         if (rdev->mddev->pers &&
3052             rdev->raid_disk >= 0)
3053                 return -EBUSY;
3054
3055         rdev->recovery_offset = recovery_start;
3056         if (recovery_start == MaxSector)
3057                 set_bit(In_sync, &rdev->flags);
3058         else
3059                 clear_bit(In_sync, &rdev->flags);
3060         return len;
3061 }
3062
3063 static struct rdev_sysfs_entry rdev_recovery_start =
3064 __ATTR(recovery_start, S_IRUGO|S_IWUSR, recovery_start_show, recovery_start_store);
3065
3066 /* sysfs access to bad-blocks list.
3067  * We present two files.
3068  * 'bad-blocks' lists sector numbers and lengths of ranges that
3069  *    are recorded as bad.  The list is truncated to fit within
3070  *    the one-page limit of sysfs.
3071  *    Writing "sector length" to this file adds an acknowledged
3072  *    bad block list.
3073  * 'unacknowledged-bad-blocks' lists bad blocks that have not yet
3074  *    been acknowledged.  Writing to this file adds bad blocks
3075  *    without acknowledging them.  This is largely for testing.
3076  */
3077 static ssize_t bb_show(struct md_rdev *rdev, char *page)
3078 {
3079         return badblocks_show(&rdev->badblocks, page, 0);
3080 }
3081 static ssize_t bb_store(struct md_rdev *rdev, const char *page, size_t len)
3082 {
3083         int rv = badblocks_store(&rdev->badblocks, page, len, 0);
3084         /* Maybe that ack was all we needed */
3085         if (test_and_clear_bit(BlockedBadBlocks, &rdev->flags))
3086                 wake_up(&rdev->blocked_wait);
3087         return rv;
3088 }
3089 static struct rdev_sysfs_entry rdev_bad_blocks =
3090 __ATTR(bad_blocks, S_IRUGO|S_IWUSR, bb_show, bb_store);
3091
3092 static ssize_t ubb_show(struct md_rdev *rdev, char *page)
3093 {
3094         return badblocks_show(&rdev->badblocks, page, 1);
3095 }
3096 static ssize_t ubb_store(struct md_rdev *rdev, const char *page, size_t len)
3097 {
3098         return badblocks_store(&rdev->badblocks, page, len, 1);
3099 }
3100 static struct rdev_sysfs_entry rdev_unack_bad_blocks =
3101 __ATTR(unacknowledged_bad_blocks, S_IRUGO|S_IWUSR, ubb_show, ubb_store);
3102
3103 static struct attribute *rdev_default_attrs[] = {
3104         &rdev_state.attr,
3105         &rdev_errors.attr,
3106         &rdev_slot.attr,
3107         &rdev_offset.attr,
3108         &rdev_new_offset.attr,
3109         &rdev_size.attr,
3110         &rdev_recovery_start.attr,
3111         &rdev_bad_blocks.attr,
3112         &rdev_unack_bad_blocks.attr,
3113         NULL,
3114 };
3115 static ssize_t
3116 rdev_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
3117 {
3118         struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
3119         struct md_rdev *rdev = container_of(kobj, struct md_rdev, kobj);
3120
3121         if (!entry->show)
3122                 return -EIO;
3123         if (!rdev->mddev)
3124                 return -EBUSY;
3125         return entry->show(rdev, page);
3126 }
3127
3128 static ssize_t
3129 rdev_attr_store(struct kobject *kobj, struct attribute *attr,
3130               const char *page, size_t length)
3131 {
3132         struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
3133         struct md_rdev *rdev = container_of(kobj, struct md_rdev, kobj);
3134         ssize_t rv;
3135         struct mddev *mddev = rdev->mddev;
3136
3137         if (!entry->store)
3138                 return -EIO;
3139         if (!capable(CAP_SYS_ADMIN))
3140                 return -EACCES;
3141         rv = mddev ? mddev_lock(mddev): -EBUSY;
3142         if (!rv) {
3143                 if (rdev->mddev == NULL)
3144                         rv = -EBUSY;
3145                 else
3146                         rv = entry->store(rdev, page, length);
3147                 mddev_unlock(mddev);
3148         }
3149         return rv;
3150 }
3151
3152 static void rdev_free(struct kobject *ko)
3153 {
3154         struct md_rdev *rdev = container_of(ko, struct md_rdev, kobj);
3155         kfree(rdev);
3156 }
3157 static const struct sysfs_ops rdev_sysfs_ops = {
3158         .show           = rdev_attr_show,
3159         .store          = rdev_attr_store,
3160 };
3161 static struct kobj_type rdev_ktype = {
3162         .release        = rdev_free,
3163         .sysfs_ops      = &rdev_sysfs_ops,
3164         .default_attrs  = rdev_default_attrs,
3165 };
3166
3167 int md_rdev_init(struct md_rdev *rdev)
3168 {
3169         rdev->desc_nr = -1;
3170         rdev->saved_raid_disk = -1;
3171         rdev->raid_disk = -1;
3172         rdev->flags = 0;
3173         rdev->data_offset = 0;
3174         rdev->new_data_offset = 0;
3175         rdev->sb_events = 0;
3176         rdev->last_read_error = 0;
3177         rdev->sb_loaded = 0;
3178         rdev->bb_page = NULL;
3179         atomic_set(&rdev->nr_pending, 0);
3180         atomic_set(&rdev->read_errors, 0);
3181         atomic_set(&rdev->corrected_errors, 0);
3182
3183         INIT_LIST_HEAD(&rdev->same_set);
3184         init_waitqueue_head(&rdev->blocked_wait);
3185
3186         /* Add space to store bad block list.
3187          * This reserves the space even on arrays where it cannot
3188          * be used - I wonder if that matters
3189          */
3190         return badblocks_init(&rdev->badblocks, 0);
3191 }
3192 EXPORT_SYMBOL_GPL(md_rdev_init);
3193 /*
3194  * Import a device. If 'super_format' >= 0, then sanity check the superblock
3195  *
3196  * mark the device faulty if:
3197  *
3198  *   - the device is nonexistent (zero size)
3199  *   - the device has no valid superblock
3200  *
3201  * a faulty rdev _never_ has rdev->sb set.
3202  */
3203 static struct md_rdev *md_import_device(dev_t newdev, int super_format, int super_minor)
3204 {
3205         char b[BDEVNAME_SIZE];
3206         int err;
3207         struct md_rdev *rdev;
3208         sector_t size;
3209
3210         rdev = kzalloc(sizeof(*rdev), GFP_KERNEL);
3211         if (!rdev)
3212                 return ERR_PTR(-ENOMEM);
3213
3214         err = md_rdev_init(rdev);
3215         if (err)
3216                 goto abort_free;
3217         err = alloc_disk_sb(rdev);
3218         if (err)
3219                 goto abort_free;
3220
3221         err = lock_rdev(rdev, newdev, super_format == -2);
3222         if (err)
3223                 goto abort_free;
3224
3225         kobject_init(&rdev->kobj, &rdev_ktype);
3226
3227         size = i_size_read(rdev->bdev->bd_inode) >> BLOCK_SIZE_BITS;
3228         if (!size) {
3229                 pr_warn("md: %s has zero or unknown size, marking faulty!\n",
3230                         bdevname(rdev->bdev,b));
3231                 err = -EINVAL;
3232                 goto abort_free;
3233         }
3234
3235         if (super_format >= 0) {
3236                 err = super_types[super_format].
3237                         load_super(rdev, NULL, super_minor);
3238                 if (err == -EINVAL) {
3239                         pr_warn("md: %s does not have a valid v%d.%d superblock, not importing!\n",
3240                                 bdevname(rdev->bdev,b),
3241                                 super_format, super_minor);
3242                         goto abort_free;
3243                 }
3244                 if (err < 0) {
3245                         pr_warn("md: could not read %s's sb, not importing!\n",
3246                                 bdevname(rdev->bdev,b));
3247                         goto abort_free;
3248                 }
3249         }
3250
3251         return rdev;
3252
3253 abort_free:
3254         if (rdev->bdev)
3255                 unlock_rdev(rdev);
3256         md_rdev_clear(rdev);
3257         kfree(rdev);
3258         return ERR_PTR(err);
3259 }
3260
3261 /*
3262  * Check a full RAID array for plausibility
3263  */
3264
3265 static void analyze_sbs(struct mddev *mddev)
3266 {
3267         int i;
3268         struct md_rdev *rdev, *freshest, *tmp;
3269         char b[BDEVNAME_SIZE];
3270
3271         freshest = NULL;
3272         rdev_for_each_safe(rdev, tmp, mddev)
3273                 switch (super_types[mddev->major_version].
3274                         load_super(rdev, freshest, mddev->minor_version)) {
3275                 case 1:
3276                         freshest = rdev;
3277                         break;
3278                 case 0:
3279                         break;
3280                 default:
3281                         pr_warn("md: fatal superblock inconsistency in %s -- removing from array\n",
3282                                 bdevname(rdev->bdev,b));
3283                         md_kick_rdev_from_array(rdev);
3284                 }
3285
3286         super_types[mddev->major_version].
3287                 validate_super(mddev, freshest);
3288
3289         i = 0;
3290         rdev_for_each_safe(rdev, tmp, mddev) {
3291                 if (mddev->max_disks &&
3292                     (rdev->desc_nr >= mddev->max_disks ||
3293                      i > mddev->max_disks)) {
3294                         pr_warn("md: %s: %s: only %d devices permitted\n",
3295                                 mdname(mddev), bdevname(rdev->bdev, b),
3296                                 mddev->max_disks);
3297                         md_kick_rdev_from_array(rdev);
3298                         continue;
3299                 }
3300                 if (rdev != freshest) {
3301                         if (super_types[mddev->major_version].
3302                             validate_super(mddev, rdev)) {
3303                                 pr_warn("md: kicking non-fresh %s from array!\n",
3304                                         bdevname(rdev->bdev,b));
3305                                 md_kick_rdev_from_array(rdev);
3306                                 continue;
3307                         }
3308                 }
3309                 if (mddev->level == LEVEL_MULTIPATH) {
3310                         rdev->desc_nr = i++;
3311                         rdev->raid_disk = rdev->desc_nr;
3312                         set_bit(In_sync, &rdev->flags);
3313                 } else if (rdev->raid_disk >=
3314                             (mddev->raid_disks - min(0, mddev->delta_disks)) &&
3315                            !test_bit(Journal, &rdev->flags)) {
3316                         rdev->raid_disk = -1;
3317                         clear_bit(In_sync, &rdev->flags);
3318                 }
3319         }
3320 }
3321
3322 /* Read a fixed-point number.
3323  * Numbers in sysfs attributes should be in "standard" units where
3324  * possible, so time should be in seconds.
3325  * However we internally use a a much smaller unit such as
3326  * milliseconds or jiffies.
3327  * This function takes a decimal number with a possible fractional
3328  * component, and produces an integer which is the result of
3329  * multiplying that number by 10^'scale'.
3330  * all without any floating-point arithmetic.
3331  */
3332 int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale)
3333 {
3334         unsigned long result = 0;
3335         long decimals = -1;
3336         while (isdigit(*cp) || (*cp == '.' && decimals < 0)) {
3337                 if (*cp == '.')
3338                         decimals = 0;
3339                 else if (decimals < scale) {
3340                         unsigned int value;
3341                         value = *cp - '0';
3342                         result = result * 10 + value;
3343                         if (decimals >= 0)
3344                                 decimals++;
3345                 }
3346                 cp++;
3347         }
3348         if (*cp == '\n')
3349                 cp++;
3350         if (*cp)
3351                 return -EINVAL;
3352         if (decimals < 0)
3353                 decimals = 0;
3354         while (decimals < scale) {
3355                 result *= 10;
3356                 decimals ++;
3357         }
3358         *res = result;
3359         return 0;
3360 }
3361
3362 static ssize_t
3363 safe_delay_show(struct mddev *mddev, char *page)
3364 {
3365         int msec = (mddev->safemode_delay*1000)/HZ;
3366         return sprintf(page, "%d.%03d\n", msec/1000, msec%1000);
3367 }
3368 static ssize_t
3369 safe_delay_store(struct mddev *mddev, const char *cbuf, size_t len)
3370 {
3371         unsigned long msec;
3372
3373         if (mddev_is_clustered(mddev)) {
3374                 pr_warn("md: Safemode is disabled for clustered mode\n");
3375                 return -EINVAL;
3376         }
3377
3378         if (strict_strtoul_scaled(cbuf, &msec, 3) < 0)
3379                 return -EINVAL;
3380         if (msec == 0)
3381                 mddev->safemode_delay = 0;
3382         else {
3383                 unsigned long old_delay = mddev->safemode_delay;
3384                 unsigned long new_delay = (msec*HZ)/1000;
3385
3386                 if (new_delay == 0)
3387                         new_delay = 1;
3388                 mddev->safemode_delay = new_delay;
3389                 if (new_delay < old_delay || old_delay == 0)
3390                         mod_timer(&mddev->safemode_timer, jiffies+1);
3391         }
3392         return len;
3393 }
3394 static struct md_sysfs_entry md_safe_delay =
3395 __ATTR(safe_mode_delay, S_IRUGO|S_IWUSR,safe_delay_show, safe_delay_store);
3396
3397 static ssize_t
3398 level_show(struct mddev *mddev, char *page)
3399 {
3400         struct md_personality *p;
3401         int ret;
3402         spin_lock(&mddev->lock);
3403         p = mddev->pers;
3404         if (p)
3405                 ret = sprintf(page, "%s\n", p->name);
3406         else if (mddev->clevel[0])
3407                 ret = sprintf(page, "%s\n", mddev->clevel);
3408         else if (mddev->level != LEVEL_NONE)
3409                 ret = sprintf(page, "%d\n", mddev->level);
3410         else
3411                 ret = 0;
3412         spin_unlock(&mddev->lock);
3413         return ret;
3414 }
3415
3416 static ssize_t
3417 level_store(struct mddev *mddev, const char *buf, size_t len)
3418 {
3419         char clevel[16];
3420         ssize_t rv;
3421         size_t slen = len;
3422         struct md_personality *pers, *oldpers;
3423         long level;
3424         void *priv, *oldpriv;
3425         struct md_rdev *rdev;
3426
3427         if (slen == 0 || slen >= sizeof(clevel))
3428                 return -EINVAL;
3429
3430         rv = mddev_lock(mddev);
3431         if (rv)
3432                 return rv;
3433
3434         if (mddev->pers == NULL) {
3435                 strncpy(mddev->clevel, buf, slen);
3436                 if (mddev->clevel[slen-1] == '\n')
3437                         slen--;
3438                 mddev->clevel[slen] = 0;
3439                 mddev->level = LEVEL_NONE;
3440                 rv = len;
3441                 goto out_unlock;
3442         }
3443         rv = -EROFS;
3444         if (mddev->ro)
3445                 goto out_unlock;
3446
3447         /* request to change the personality.  Need to ensure:
3448          *  - array is not engaged in resync/recovery/reshape
3449          *  - old personality can be suspended
3450          *  - new personality will access other array.
3451          */
3452
3453         rv = -EBUSY;
3454         if (mddev->sync_thread ||
3455             test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
3456             mddev->reshape_position != MaxSector ||
3457             mddev->sysfs_active)
3458                 goto out_unlock;
3459
3460         rv = -EINVAL;
3461         if (!mddev->pers->quiesce) {
3462                 pr_warn("md: %s: %s does not support online personality change\n",
3463                         mdname(mddev), mddev->pers->name);
3464                 goto out_unlock;
3465         }
3466
3467         /* Now find the new personality */
3468         strncpy(clevel, buf, slen);
3469         if (clevel[slen-1] == '\n')
3470                 slen--;
3471         clevel[slen] = 0;
3472         if (kstrtol(clevel, 10, &level))
3473                 level = LEVEL_NONE;
3474
3475         if (request_module("md-%s", clevel) != 0)
3476                 request_module("md-level-%s", clevel);
3477         spin_lock(&pers_lock);
3478         pers = find_pers(level, clevel);
3479         if (!pers || !try_module_get(pers->owner)) {
3480                 spin_unlock(&pers_lock);
3481                 pr_warn("md: personality %s not loaded\n", clevel);
3482                 rv = -EINVAL;
3483                 goto out_unlock;
3484         }
3485         spin_unlock(&pers_lock);
3486
3487         if (pers == mddev->pers) {
3488                 /* Nothing to do! */
3489                 module_put(pers->owner);
3490                 rv = len;
3491                 goto out_unlock;
3492         }
3493         if (!pers->takeover) {
3494                 module_put(pers->owner);
3495                 pr_warn("md: %s: %s does not support personality takeover\n",
3496                         mdname(mddev), clevel);
3497                 rv = -EINVAL;
3498                 goto out_unlock;
3499         }
3500
3501         rdev_for_each(rdev, mddev)
3502                 rdev->new_raid_disk = rdev->raid_disk;
3503
3504         /* ->takeover must set new_* and/or delta_disks
3505          * if it succeeds, and may set them when it fails.
3506          */
3507         priv = pers->takeover(mddev);
3508         if (IS_ERR(priv)) {
3509                 mddev->new_level = mddev->level;
3510                 mddev->new_layout = mddev->layout;
3511                 mddev->new_chunk_sectors = mddev->chunk_sectors;
3512                 mddev->raid_disks -= mddev->delta_disks;
3513                 mddev->delta_disks = 0;
3514                 mddev->reshape_backwards = 0;
3515                 module_put(pers->owner);
3516                 pr_warn("md: %s: %s would not accept array\n",
3517                         mdname(mddev), clevel);
3518                 rv = PTR_ERR(priv);
3519                 goto out_unlock;
3520         }
3521
3522         /* Looks like we have a winner */
3523         mddev_suspend(mddev);
3524         mddev_detach(mddev);
3525
3526         spin_lock(&mddev->lock);
3527         oldpers = mddev->pers;
3528         oldpriv = mddev->private;
3529         mddev->pers = pers;
3530         mddev->private = priv;
3531         strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel));
3532         mddev->level = mddev->new_level;
3533         mddev->layout = mddev->new_layout;
3534         mddev->chunk_sectors = mddev->new_chunk_sectors;
3535         mddev->delta_disks = 0;
3536         mddev->reshape_backwards = 0;
3537         mddev->degraded = 0;
3538         spin_unlock(&mddev->lock);
3539
3540         if (oldpers->sync_request == NULL &&
3541             mddev->external) {
3542                 /* We are converting from a no-redundancy array
3543                  * to a redundancy array and metadata is managed
3544                  * externally so we need to be sure that writes
3545                  * won't block due to a need to transition
3546                  *      clean->dirty
3547                  * until external management is started.
3548                  */
3549                 mddev->in_sync = 0;
3550                 mddev->safemode_delay = 0;
3551                 mddev->safemode = 0;
3552         }
3553
3554         oldpers->free(mddev, oldpriv);
3555
3556         if (oldpers->sync_request == NULL &&
3557             pers->sync_request != NULL) {
3558                 /* need to add the md_redundancy_group */
3559                 if (sysfs_create_group(&mddev->kobj, &md_redundancy_group))
3560                         pr_warn("md: cannot register extra attributes for %s\n",
3561                                 mdname(mddev));
3562                 mddev->sysfs_action = sysfs_get_dirent(mddev->kobj.sd, "sync_action");
3563         }
3564         if (oldpers->sync_request != NULL &&
3565             pers->sync_request == NULL) {
3566                 /* need to remove the md_redundancy_group */
3567                 if (mddev->to_remove == NULL)
3568                         mddev->to_remove = &md_redundancy_group;
3569         }
3570
3571         module_put(oldpers->owner);
3572
3573         rdev_for_each(rdev, mddev) {
3574                 if (rdev->raid_disk < 0)
3575                         continue;
3576                 if (rdev->new_raid_disk >= mddev->raid_disks)
3577                         rdev->new_raid_disk = -1;
3578                 if (rdev->new_raid_disk == rdev->raid_disk)
3579                         continue;
3580                 sysfs_unlink_rdev(mddev, rdev);
3581         }
3582         rdev_for_each(rdev, mddev) {
3583                 if (rdev->raid_disk < 0)
3584                         continue;
3585                 if (rdev->new_raid_disk == rdev->raid_disk)
3586                         continue;
3587                 rdev->raid_disk = rdev->new_raid_disk;
3588                 if (rdev->raid_disk < 0)
3589                         clear_bit(In_sync, &rdev->flags);
3590                 else {
3591                         if (sysfs_link_rdev(mddev, rdev))
3592                                 pr_warn("md: cannot register rd%d for %s after level change\n",
3593                                         rdev->raid_disk, mdname(mddev));
3594                 }
3595         }
3596
3597         if (pers->sync_request == NULL) {
3598                 /* this is now an array without redundancy, so
3599                  * it must always be in_sync
3600                  */
3601                 mddev->in_sync = 1;
3602                 del_timer_sync(&mddev->safemode_timer);
3603         }
3604         blk_set_stacking_limits(&mddev->queue->limits);
3605         pers->run(mddev);
3606         set_bit(MD_CHANGE_DEVS, &mddev->flags);
3607         mddev_resume(mddev);
3608         if (!mddev->thread)
3609                 md_update_sb(mddev, 1);
3610         sysfs_notify(&mddev->kobj, NULL, "level");
3611         md_new_event(mddev);
3612         rv = len;
3613 out_unlock:
3614         mddev_unlock(mddev);
3615         return rv;
3616 }
3617
3618 static struct md_sysfs_entry md_level =
3619 __ATTR(level, S_IRUGO|S_IWUSR, level_show, level_store);
3620
3621 static ssize_t
3622 layout_show(struct mddev *mddev, char *page)
3623 {
3624         /* just a number, not meaningful for all levels */
3625         if (mddev->reshape_position != MaxSector &&
3626             mddev->layout != mddev->new_layout)
3627                 return sprintf(page, "%d (%d)\n",
3628                                mddev->new_layout, mddev->layout);
3629         return sprintf(page, "%d\n", mddev->layout);
3630 }
3631
3632 static ssize_t
3633 layout_store(struct mddev *mddev, const char *buf, size_t len)
3634 {
3635         unsigned int n;
3636         int err;
3637
3638         err = kstrtouint(buf, 10, &n);
3639         if (err < 0)
3640                 return err;
3641         err = mddev_lock(mddev);
3642         if (err)
3643                 return err;
3644
3645         if (mddev->pers) {
3646                 if (mddev->pers->check_reshape == NULL)
3647                         err = -EBUSY;
3648                 else if (mddev->ro)
3649                         err = -EROFS;
3650                 else {
3651                         mddev->new_layout = n;
3652                         err = mddev->pers->check_reshape(mddev);
3653                         if (err)
3654                                 mddev->new_layout = mddev->layout;
3655                 }
3656         } else {
3657                 mddev->new_layout = n;
3658                 if (mddev->reshape_position == MaxSector)
3659                         mddev->layout = n;
3660         }
3661         mddev_unlock(mddev);
3662         return err ?: len;
3663 }
3664 static struct md_sysfs_entry md_layout =
3665 __ATTR(layout, S_IRUGO|S_IWUSR, layout_show, layout_store);
3666
3667 static ssize_t
3668 raid_disks_show(struct mddev *mddev, char *page)
3669 {
3670         if (mddev->raid_disks == 0)
3671                 return 0;
3672         if (mddev->reshape_position != MaxSector &&
3673             mddev->delta_disks != 0)
3674                 return sprintf(page, "%d (%d)\n", mddev->raid_disks,
3675                                mddev->raid_disks - mddev->delta_disks);
3676         return sprintf(page, "%d\n", mddev->raid_disks);
3677 }
3678
3679 static int update_raid_disks(struct mddev *mddev, int raid_disks);
3680
3681 static ssize_t
3682 raid_disks_store(struct mddev *mddev, const char *buf, size_t len)
3683 {
3684         unsigned int n;
3685         int err;
3686
3687         err = kstrtouint(buf, 10, &n);
3688         if (err < 0)
3689                 return err;
3690
3691         err = mddev_lock(mddev);
3692         if (err)
3693                 return err;
3694         if (mddev->pers)
3695                 err = update_raid_disks(mddev, n);
3696         else if (mddev->reshape_position != MaxSector) {
3697                 struct md_rdev *rdev;
3698                 int olddisks = mddev->raid_disks - mddev->delta_disks;
3699
3700                 err = -EINVAL;
3701                 rdev_for_each(rdev, mddev) {
3702                         if (olddisks < n &&
3703                             rdev->data_offset < rdev->new_data_offset)
3704                                 goto out_unlock;
3705                         if (olddisks > n &&
3706                             rdev->data_offset > rdev->new_data_offset)
3707                                 goto out_unlock;
3708                 }
3709                 err = 0;
3710                 mddev->delta_disks = n - olddisks;
3711                 mddev->raid_disks = n;
3712                 mddev->reshape_backwards = (mddev->delta_disks < 0);
3713         } else
3714                 mddev->raid_disks = n;
3715 out_unlock:
3716         mddev_unlock(mddev);
3717         return err ? err : len;
3718 }
3719 static struct md_sysfs_entry md_raid_disks =
3720 __ATTR(raid_disks, S_IRUGO|S_IWUSR, raid_disks_show, raid_disks_store);
3721
3722 static ssize_t
3723 chunk_size_show(struct mddev *mddev, char *page)
3724 {
3725         if (mddev->reshape_position != MaxSector &&
3726             mddev->chunk_sectors != mddev->new_chunk_sectors)
3727                 return sprintf(page, "%d (%d)\n",
3728                                mddev->new_chunk_sectors << 9,
3729                                mddev->chunk_sectors << 9);
3730         return sprintf(page, "%d\n", mddev->chunk_sectors << 9);
3731 }
3732
3733 static ssize_t
3734 chunk_size_store(struct mddev *mddev, const char *buf, size_t len)
3735 {
3736         unsigned long n;
3737         int err;
3738
3739         err = kstrtoul(buf, 10, &n);
3740         if (err < 0)
3741                 return err;
3742
3743         err = mddev_lock(mddev);
3744         if (err)
3745                 return err;
3746         if (mddev->pers) {
3747                 if (mddev->pers->check_reshape == NULL)
3748                         err = -EBUSY;
3749                 else if (mddev->ro)
3750                         err = -EROFS;
3751                 else {
3752                         mddev->new_chunk_sectors = n >> 9;
3753                         err = mddev->pers->check_reshape(mddev);
3754                         if (err)
3755                                 mddev->new_chunk_sectors = mddev->chunk_sectors;
3756                 }
3757         } else {
3758                 mddev->new_chunk_sectors = n >> 9;
3759                 if (mddev->reshape_position == MaxSector)
3760                         mddev->chunk_sectors = n >> 9;
3761         }
3762         mddev_unlock(mddev);
3763         return err ?: len;
3764 }
3765 static struct md_sysfs_entry md_chunk_size =
3766 __ATTR(chunk_size, S_IRUGO|S_IWUSR, chunk_size_show, chunk_size_store);
3767
3768 static ssize_t
3769 resync_start_show(struct mddev *mddev, char *page)
3770 {
3771         if (mddev->recovery_cp == MaxSector)
3772                 return sprintf(page, "none\n");
3773         return sprintf(page, "%llu\n", (unsigned long long)mddev->recovery_cp);
3774 }
3775
3776 static ssize_t
3777 resync_start_store(struct mddev *mddev, const char *buf, size_t len)
3778 {
3779         unsigned long long n;
3780         int err;
3781
3782         if (cmd_match(buf, "none"))
3783                 n = MaxSector;
3784         else {
3785                 err = kstrtoull(buf, 10, &n);
3786                 if (err < 0)
3787                         return err;
3788                 if (n != (sector_t)n)
3789                         return -EINVAL;
3790         }
3791
3792         err = mddev_lock(mddev);
3793         if (err)
3794                 return err;
3795         if (mddev->pers && !test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
3796                 err = -EBUSY;
3797
3798         if (!err) {
3799                 mddev->recovery_cp = n;
3800                 if (mddev->pers)
3801                         set_bit(MD_CHANGE_CLEAN, &mddev->flags);
3802         }
3803         mddev_unlock(mddev);
3804         return err ?: len;
3805 }
3806 static struct md_sysfs_entry md_resync_start =
3807 __ATTR_PREALLOC(resync_start, S_IRUGO|S_IWUSR,
3808                 resync_start_show, resync_start_store);
3809
3810 /*
3811  * The array state can be:
3812  *
3813  * clear
3814  *     No devices, no size, no level
3815  *     Equivalent to STOP_ARRAY ioctl
3816  * inactive
3817  *     May have some settings, but array is not active
3818  *        all IO results in error
3819  *     When written, doesn't tear down array, but just stops it
3820  * suspended (not supported yet)
3821  *     All IO requests will block. The array can be reconfigured.
3822  *     Writing this, if accepted, will block until array is quiescent
3823  * readonly
3824  *     no resync can happen.  no superblocks get written.
3825  *     write requests fail
3826  * read-auto
3827  *     like readonly, but behaves like 'clean' on a write request.
3828  *
3829  * clean - no pending writes, but otherwise active.
3830  *     When written to inactive array, starts without resync
3831  *     If a write request arrives then
3832  *       if metadata is known, mark 'dirty' and switch to 'active'.
3833  *       if not known, block and switch to write-pending
3834  *     If written to an active array that has pending writes, then fails.
3835  * active
3836  *     fully active: IO and resync can be happening.
3837  *     When written to inactive array, starts with resync
3838  *
3839  * write-pending
3840  *     clean, but writes are blocked waiting for 'active' to be written.
3841  *
3842  * active-idle
3843  *     like active, but no writes have been seen for a while (100msec).
3844  *
3845  */
3846 enum array_state { clear, inactive, suspended, readonly, read_auto, clean, active,
3847                    write_pending, active_idle, bad_word};
3848 static char *array_states[] = {
3849         "clear", "inactive", "suspended", "readonly", "read-auto", "clean", "active",
3850         "write-pending", "active-idle", NULL };
3851
3852 static int match_word(const char *word, char **list)
3853 {
3854         int n;
3855         for (n=0; list[n]; n++)
3856                 if (cmd_match(word, list[n]))
3857                         break;
3858         return n;
3859 }
3860
3861 static ssize_t
3862 array_state_show(struct mddev *mddev, char *page)
3863 {
3864         enum array_state st = inactive;
3865
3866         if (mddev->pers)
3867                 switch(mddev->ro) {
3868                 case 1:
3869                         st = readonly;
3870                         break;
3871                 case 2:
3872                         st = read_auto;
3873                         break;
3874                 case 0:
3875                         if (test_bit(MD_CHANGE_PENDING, &mddev->flags))
3876                                 st = write_pending;
3877                         else if (mddev->in_sync)
3878                                 st = clean;
3879                         else if (mddev->safemode)
3880                                 st = active_idle;
3881                         else
3882                                 st = active;
3883                 }
3884         else {
3885                 if (list_empty(&mddev->disks) &&
3886                     mddev->raid_disks == 0 &&
3887                     mddev->dev_sectors == 0)
3888                         st = clear;
3889                 else
3890                         st = inactive;
3891         }
3892         return sprintf(page, "%s\n", array_states[st]);
3893 }
3894
3895 static int do_md_stop(struct mddev *mddev, int ro, struct block_device *bdev);
3896 static int md_set_readonly(struct mddev *mddev, struct block_device *bdev);
3897 static int do_md_run(struct mddev *mddev);
3898 static int restart_array(struct mddev *mddev);
3899
3900 static ssize_t
3901 array_state_store(struct mddev *mddev, const char *buf, size_t len)
3902 {
3903         int err;
3904         enum array_state st = match_word(buf, array_states);
3905
3906         if (mddev->pers && (st == active || st == clean) && mddev->ro != 1) {
3907                 /* don't take reconfig_mutex when toggling between
3908                  * clean and active
3909                  */
3910                 spin_lock(&mddev->lock);
3911                 if (st == active) {
3912                         restart_array(mddev);
3913                         clear_bit(MD_CHANGE_PENDING, &mddev->flags);
3914                         md_wakeup_thread(mddev->thread);
3915                         wake_up(&mddev->sb_wait);
3916                         err = 0;
3917                 } else /* st == clean */ {
3918                         restart_array(mddev);
3919                         if (atomic_read(&mddev->writes_pending) == 0) {
3920                                 if (mddev->in_sync == 0) {
3921                                         mddev->in_sync = 1;
3922                                         if (mddev->safemode == 1)
3923                                                 mddev->safemode = 0;
3924                                         set_bit(MD_CHANGE_CLEAN, &mddev->flags);
3925                                 }
3926                                 err = 0;
3927                         } else
3928                                 err = -EBUSY;
3929                 }
3930                 if (!err)
3931                         sysfs_notify_dirent_safe(mddev->sysfs_state);
3932                 spin_unlock(&mddev->lock);
3933                 return err ?: len;
3934         }
3935         err = mddev_lock(mddev);
3936         if (err)
3937                 return err;
3938         err = -EINVAL;
3939         switch(st) {
3940         case bad_word:
3941                 break;
3942         case clear:
3943                 /* stopping an active array */
3944                 err = do_md_stop(mddev, 0, NULL);
3945                 break;
3946         case inactive:
3947                 /* stopping an active array */
3948                 if (mddev->pers)
3949                         err = do_md_stop(mddev, 2, NULL);
3950                 else
3951                         err = 0; /* already inactive */
3952                 break;
3953         case suspended:
3954                 break; /* not supported yet */
3955         case readonly:
3956                 if (mddev->pers)
3957                         err = md_set_readonly(mddev, NULL);
3958                 else {
3959                         mddev->ro = 1;
3960                         set_disk_ro(mddev->gendisk, 1);
3961                         err = do_md_run(mddev);
3962                 }
3963                 break;
3964         case read_auto:
3965                 if (mddev->pers) {
3966                         if (mddev->ro == 0)
3967                                 err = md_set_readonly(mddev, NULL);
3968                         else if (mddev->ro == 1)
3969                                 err = restart_array(mddev);
3970                         if (err == 0) {
3971                                 mddev->ro = 2;
3972                                 set_disk_ro(mddev->gendisk, 0);
3973                         }
3974                 } else {
3975                         mddev->ro = 2;
3976                         err = do_md_run(mddev);
3977                 }
3978                 break;
3979         case clean:
3980                 if (mddev->pers) {
3981                         err = restart_array(mddev);
3982                         if (err)
3983                                 break;
3984                         spin_lock(&mddev->lock);
3985                         if (atomic_read(&mddev->writes_pending) == 0) {
3986                                 if (mddev->in_sync == 0) {
3987                                         mddev->in_sync = 1;
3988                                         if (mddev->safemode == 1)
3989                                                 mddev->safemode = 0;
3990                                         set_bit(MD_CHANGE_CLEAN, &mddev->flags);
3991                                 }
3992                                 err = 0;
3993                         } else
3994                                 err = -EBUSY;
3995                         spin_unlock(&mddev->lock);
3996                 } else
3997                         err = -EINVAL;
3998                 break;
3999         case active:
4000                 if (mddev->pers) {
4001                         err = restart_array(mddev);
4002                         if (err)
4003                                 break;
4004                         clear_bit(MD_CHANGE_PENDING, &mddev->flags);
4005                         wake_up(&mddev->sb_wait);
4006                         err = 0;
4007                 } else {
4008                         mddev->ro = 0;
4009                         set_disk_ro(mddev->gendisk, 0);
4010                         err = do_md_run(mddev);
4011                 }
4012                 break;
4013         case write_pending:
4014         case active_idle:
4015                 /* these cannot be set */
4016                 break;
4017         }
4018
4019         if (!err) {
4020                 if (mddev->hold_active == UNTIL_IOCTL)
4021                         mddev->hold_active = 0;
4022                 sysfs_notify_dirent_safe(mddev->sysfs_state);
4023         }
4024         mddev_unlock(mddev);
4025         return err ?: len;
4026 }
4027 static struct md_sysfs_entry md_array_state =
4028 __ATTR_PREALLOC(array_state, S_IRUGO|S_IWUSR, array_state_show, array_state_store);
4029
4030 static ssize_t
4031 max_corrected_read_errors_show(struct mddev *mddev, char *page) {
4032         return sprintf(page, "%d\n",
4033                        atomic_read(&mddev->max_corr_read_errors));
4034 }
4035
4036 static ssize_t
4037 max_corrected_read_errors_store(struct mddev *mddev, const char *buf, size_t len)
4038 {
4039         unsigned int n;
4040         int rv;
4041
4042         rv = kstrtouint(buf, 10, &n);
4043         if (rv < 0)
4044                 return rv;
4045         atomic_set(&mddev->max_corr_read_errors, n);
4046         return len;
4047 }
4048
4049 static struct md_sysfs_entry max_corr_read_errors =
4050 __ATTR(max_read_errors, S_IRUGO|S_IWUSR, max_corrected_read_errors_show,
4051         max_corrected_read_errors_store);
4052
4053 static ssize_t
4054 null_show(struct mddev *mddev, char *page)
4055 {
4056         return -EINVAL;
4057 }
4058
4059 static ssize_t
4060 new_dev_store(struct mddev *mddev, const char *buf, size_t len)
4061 {
4062         /* buf must be %d:%d\n? giving major and minor numbers */
4063         /* The new device is added to the array.
4064          * If the array has a persistent superblock, we read the
4065          * superblock to initialise info and check validity.
4066          * Otherwise, only checking done is that in bind_rdev_to_array,
4067          * which mainly checks size.
4068          */
4069         char *e;
4070         int major = simple_strtoul(buf, &e, 10);
4071         int minor;
4072         dev_t dev;
4073         struct md_rdev *rdev;
4074         int err;
4075
4076         if (!*buf || *e != ':' || !e[1] || e[1] == '\n')
4077                 return -EINVAL;
4078         minor = simple_strtoul(e+1, &e, 10);
4079         if (*e && *e != '\n')
4080                 return -EINVAL;
4081         dev = MKDEV(major, minor);
4082         if (major != MAJOR(dev) ||
4083             minor != MINOR(dev))
4084                 return -EOVERFLOW;
4085
4086         flush_workqueue(md_misc_wq);
4087
4088         err = mddev_lock(mddev);
4089         if (err)
4090                 return err;
4091         if (mddev->persistent) {
4092                 rdev = md_import_device(dev, mddev->major_version,
4093                                         mddev->minor_version);
4094                 if (!IS_ERR(rdev) && !list_empty(&mddev->disks)) {
4095                         struct md_rdev *rdev0
4096                                 = list_entry(mddev->disks.next,
4097                                              struct md_rdev, same_set);
4098                         err = super_types[mddev->major_version]
4099                                 .load_super(rdev, rdev0, mddev->minor_version);
4100                         if (err < 0)
4101                                 goto out;
4102                 }
4103         } else if (mddev->external)
4104                 rdev = md_import_device(dev, -2, -1);
4105         else
4106                 rdev = md_import_device(dev, -1, -1);
4107
4108         if (IS_ERR(rdev)) {
4109                 mddev_unlock(mddev);
4110                 return PTR_ERR(rdev);
4111         }
4112         err = bind_rdev_to_array(rdev, mddev);
4113  out:
4114         if (err)
4115                 export_rdev(rdev);
4116         mddev_unlock(mddev);
4117         return err ? err : len;
4118 }
4119
4120 static struct md_sysfs_entry md_new_device =
4121 __ATTR(new_dev, S_IWUSR, null_show, new_dev_store);
4122
4123 static ssize_t
4124 bitmap_store(struct mddev *mddev, const char *buf, size_t len)
4125 {
4126         char *end;
4127         unsigned long chunk, end_chunk;
4128         int err;
4129
4130         err = mddev_lock(mddev);
4131         if (err)
4132                 return err;
4133         if (!mddev->bitmap)
4134                 goto out;
4135         /* buf should be <chunk> <chunk> ... or <chunk>-<chunk> ... (range) */
4136         while (*buf) {
4137                 chunk = end_chunk = simple_strtoul(buf, &end, 0);
4138                 if (buf == end) break;
4139                 if (*end == '-') { /* range */
4140                         buf = end + 1;
4141                         end_chunk = simple_strtoul(buf, &end, 0);
4142                         if (buf == end) break;
4143                 }
4144                 if (*end && !isspace(*end)) break;
4145                 bitmap_dirty_bits(mddev->bitmap, chunk, end_chunk);
4146                 buf = skip_spaces(end);
4147         }
4148         bitmap_unplug(mddev->bitmap); /* flush the bits to disk */
4149 out:
4150         mddev_unlock(mddev);
4151         return len;
4152 }
4153
4154 static struct md_sysfs_entry md_bitmap =
4155 __ATTR(bitmap_set_bits, S_IWUSR, null_show, bitmap_store);
4156
4157 static ssize_t
4158 size_show(struct mddev *mddev, char *page)
4159 {
4160         return sprintf(page, "%llu\n",
4161                 (unsigned long long)mddev->dev_sectors / 2);
4162 }
4163
4164 static int update_size(struct mddev *mddev, sector_t num_sectors);
4165
4166 static ssize_t
4167 size_store(struct mddev *mddev, const char *buf, size_t len)
4168 {
4169         /* If array is inactive, we can reduce the component size, but
4170          * not increase it (except from 0).
4171          * If array is active, we can try an on-line resize
4172          */
4173         sector_t sectors;
4174         int err = strict_blocks_to_sectors(buf, &sectors);
4175
4176         if (err < 0)
4177                 return err;
4178         err = mddev_lock(mddev);
4179         if (err)
4180                 return err;
4181         if (mddev->pers) {
4182                 err = update_size(mddev, sectors);
4183                 if (err == 0)
4184                         md_update_sb(mddev, 1);
4185         } else {
4186                 if (mddev->dev_sectors == 0 ||
4187                     mddev->dev_sectors > sectors)
4188                         mddev->dev_sectors = sectors;
4189                 else
4190                         err = -ENOSPC;
4191         }
4192         mddev_unlock(mddev);
4193         return err ? err : len;
4194 }
4195
4196 static struct md_sysfs_entry md_size =
4197 __ATTR(component_size, S_IRUGO|S_IWUSR, size_show, size_store);
4198
4199 /* Metadata version.
4200  * This is one of
4201  *   'none' for arrays with no metadata (good luck...)
4202  *   'external' for arrays with externally managed metadata,
4203  * or N.M for internally known formats
4204  */
4205 static ssize_t
4206 metadata_show(struct mddev *mddev, char *page)
4207 {
4208         if (mddev->persistent)
4209                 return sprintf(page, "%d.%d\n",
4210                                mddev->major_version, mddev->minor_version);
4211         else if (mddev->external)
4212                 return sprintf(page, "external:%s\n", mddev->metadata_type);
4213         else
4214                 return sprintf(page, "none\n");
4215 }
4216
4217 static ssize_t
4218 metadata_store(struct mddev *mddev, const char *buf, size_t len)
4219 {
4220         int major, minor;
4221         char *e;
4222         int err;
4223         /* Changing the details of 'external' metadata is
4224          * always permitted.  Otherwise there must be
4225          * no devices attached to the array.
4226          */
4227
4228         err = mddev_lock(mddev);
4229         if (err)
4230                 return err;
4231         err = -EBUSY;
4232         if (mddev->external && strncmp(buf, "external:", 9) == 0)
4233                 ;
4234         else if (!list_empty(&mddev->disks))
4235                 goto out_unlock;
4236
4237         err = 0;
4238         if (cmd_match(buf, "none")) {
4239                 mddev->persistent = 0;
4240                 mddev->external = 0;
4241                 mddev->major_version = 0;
4242                 mddev->minor_version = 90;
4243                 goto out_unlock;
4244         }
4245         if (strncmp(buf, "external:", 9) == 0) {
4246                 size_t namelen = len-9;
4247                 if (namelen >= sizeof(mddev->metadata_type))
4248                         namelen = sizeof(mddev->metadata_type)-1;
4249                 strncpy(mddev->metadata_type, buf+9, namelen);
4250                 mddev->metadata_type[namelen] = 0;
4251                 if (namelen && mddev->metadata_type[namelen-1] == '\n')
4252                         mddev->metadata_type[--namelen] = 0;
4253                 mddev->persistent = 0;
4254                 mddev->external = 1;
4255                 mddev->major_version = 0;
4256                 mddev->minor_version = 90;
4257                 goto out_unlock;
4258         }
4259         major = simple_strtoul(buf, &e, 10);
4260         err = -EINVAL;
4261         if (e==buf || *e != '.')
4262                 goto out_unlock;
4263         buf = e+1;
4264         minor = simple_strtoul(buf, &e, 10);
4265         if (e==buf || (*e && *e != '\n') )
4266                 goto out_unlock;
4267         err = -ENOENT;
4268         if (major >= ARRAY_SIZE(super_types) || super_types[major].name == NULL)
4269                 goto out_unlock;
4270         mddev->major_version = major;
4271         mddev->minor_version = minor;
4272         mddev->persistent = 1;
4273         mddev->external = 0;
4274         err = 0;
4275 out_unlock:
4276         mddev_unlock(mddev);
4277         return err ?: len;
4278 }
4279
4280 static struct md_sysfs_entry md_metadata =
4281 __ATTR_PREALLOC(metadata_version, S_IRUGO|S_IWUSR, metadata_show, metadata_store);
4282
4283 static ssize_t
4284 action_show(struct mddev *mddev, char *page)
4285 {
4286         char *type = "idle";
4287         unsigned long recovery = mddev->recovery;
4288         if (test_bit(MD_RECOVERY_FROZEN, &recovery))
4289                 type = "frozen";
4290         else if (test_bit(MD_RECOVERY_RUNNING, &recovery) ||
4291             (!mddev->ro && test_bit(MD_RECOVERY_NEEDED, &recovery))) {
4292                 if (test_bit(MD_RECOVERY_RESHAPE, &recovery))
4293                         type = "reshape";
4294                 else if (test_bit(MD_RECOVERY_SYNC, &recovery)) {
4295                         if (!test_bit(MD_RECOVERY_REQUESTED, &recovery))
4296                                 type = "resync";
4297                         else if (test_bit(MD_RECOVERY_CHECK, &recovery))
4298                                 type = "check";
4299                         else
4300                                 type = "repair";
4301                 } else if (test_bit(MD_RECOVERY_RECOVER, &recovery))
4302                         type = "recover";
4303                 else if (mddev->reshape_position != MaxSector)
4304                         type = "reshape";
4305         }
4306         return sprintf(page, "%s\n", type);
4307 }
4308
4309 static ssize_t
4310 action_store(struct mddev *mddev, const char *page, size_t len)
4311 {
4312         if (!mddev->pers || !mddev->pers->sync_request)
4313                 return -EINVAL;
4314
4315
4316         if (cmd_match(page, "idle") || cmd_match(page, "frozen")) {
4317                 if (cmd_match(page, "frozen"))
4318                         set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4319                 else
4320                         clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4321                 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
4322                     mddev_lock(mddev) == 0) {
4323                         flush_workqueue(md_misc_wq);
4324                         if (mddev->sync_thread) {
4325                                 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
4326                                 md_reap_sync_thread(mddev);
4327                         }
4328                         mddev_unlock(mddev);
4329                 }
4330         } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
4331                 return -EBUSY;
4332         else if (cmd_match(page, "resync"))
4333                 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4334         else if (cmd_match(page, "recover")) {
4335                 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4336                 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
4337         } else if (cmd_match(page, "reshape")) {
4338                 int err;
4339                 if (mddev->pers->start_reshape == NULL)
4340                         return -EINVAL;
4341                 err = mddev_lock(mddev);
4342                 if (!err) {
4343                         if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
4344                                 err =  -EBUSY;
4345                         else {
4346                                 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4347                                 err = mddev->pers->start_reshape(mddev);
4348                         }
4349                         mddev_unlock(mddev);
4350                 }
4351                 if (err)
4352                         return err;
4353                 sysfs_notify(&mddev->kobj, NULL, "degraded");
4354         } else {
4355                 if (cmd_match(page, "check"))
4356                         set_bit(MD_RECOVERY_CHECK, &mddev->recovery);
4357                 else if (!cmd_match(page, "repair"))
4358                         return -EINVAL;
4359                 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4360                 set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
4361                 set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
4362         }
4363         if (mddev->ro == 2) {
4364                 /* A write to sync_action is enough to justify
4365                  * canceling read-auto mode
4366                  */
4367                 mddev->ro = 0;
4368                 md_wakeup_thread(mddev->sync_thread);
4369         }
4370         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4371         md_wakeup_thread(mddev->thread);
4372         sysfs_notify_dirent_safe(mddev->sysfs_action);
4373         return len;
4374 }
4375
4376 static struct md_sysfs_entry md_scan_mode =
4377 __ATTR_PREALLOC(sync_action, S_IRUGO|S_IWUSR, action_show, action_store);
4378
4379 static ssize_t
4380 last_sync_action_show(struct mddev *mddev, char *page)
4381 {
4382         return sprintf(page, "%s\n", mddev->last_sync_action);
4383 }
4384
4385 static struct md_sysfs_entry md_last_scan_mode = __ATTR_RO(last_sync_action);
4386
4387 static ssize_t
4388 mismatch_cnt_show(struct mddev *mddev, char *page)
4389 {
4390         return sprintf(page, "%llu\n",
4391                        (unsigned long long)
4392                        atomic64_read(&mddev->resync_mismatches));
4393 }
4394
4395 static struct md_sysfs_entry md_mismatches = __ATTR_RO(mismatch_cnt);
4396
4397 static ssize_t
4398 sync_min_show(struct mddev *mddev, char *page)
4399 {
4400         return sprintf(page, "%d (%s)\n", speed_min(mddev),
4401                        mddev->sync_speed_min ? "local": "system");
4402 }
4403
4404 static ssize_t
4405 sync_min_store(struct mddev *mddev, const char *buf, size_t len)
4406 {
4407         unsigned int min;
4408         int rv;
4409
4410         if (strncmp(buf, "system", 6)==0) {
4411                 min = 0;
4412         } else {
4413                 rv = kstrtouint(buf, 10, &min);
4414                 if (rv < 0)
4415                         return rv;
4416                 if (min == 0)
4417                         return -EINVAL;
4418         }
4419         mddev->sync_speed_min = min;
4420         return len;
4421 }
4422
4423 static struct md_sysfs_entry md_sync_min =
4424 __ATTR(sync_speed_min, S_IRUGO|S_IWUSR, sync_min_show, sync_min_store);
4425
4426 static ssize_t
4427 sync_max_show(struct mddev *mddev, char *page)
4428 {
4429         return sprintf(page, "%d (%s)\n", speed_max(mddev),
4430                        mddev->sync_speed_max ? "local": "system");
4431 }
4432
4433 static ssize_t
4434 sync_max_store(struct mddev *mddev, const char *buf, size_t len)
4435 {
4436         unsigned int max;
4437         int rv;
4438
4439         if (strncmp(buf, "system", 6)==0) {
4440                 max = 0;
4441         } else {
4442                 rv = kstrtouint(buf, 10, &max);
4443                 if (rv < 0)
4444                         return rv;
4445                 if (max == 0)
4446                         return -EINVAL;
4447         }
4448         mddev->sync_speed_max = max;
4449         return len;
4450 }
4451
4452 static struct md_sysfs_entry md_sync_max =
4453 __ATTR(sync_speed_max, S_IRUGO|S_IWUSR, sync_max_show, sync_max_store);
4454
4455 static ssize_t
4456 degraded_show(struct mddev *mddev, char *page)
4457 {
4458         return sprintf(page, "%d\n", mddev->degraded);
4459 }
4460 static struct md_sysfs_entry md_degraded = __ATTR_RO(degraded);
4461
4462 static ssize_t
4463 sync_force_parallel_show(struct mddev *mddev, char *page)
4464 {
4465         return sprintf(page, "%d\n", mddev->parallel_resync);
4466 }
4467
4468 static ssize_t
4469 sync_force_parallel_store(struct mddev *mddev, const char *buf, size_t len)
4470 {
4471         long n;
4472
4473         if (kstrtol(buf, 10, &n))
4474                 return -EINVAL;
4475
4476         if (n != 0 && n != 1)
4477                 return -EINVAL;
4478
4479         mddev->parallel_resync = n;
4480
4481         if (mddev->sync_thread)
4482                 wake_up(&resync_wait);
4483
4484         return len;
4485 }
4486
4487 /* force parallel resync, even with shared block devices */
4488 static struct md_sysfs_entry md_sync_force_parallel =
4489 __ATTR(sync_force_parallel, S_IRUGO|S_IWUSR,
4490        sync_force_parallel_show, sync_force_parallel_store);
4491
4492 static ssize_t
4493 sync_speed_show(struct mddev *mddev, char *page)
4494 {
4495         unsigned long resync, dt, db;
4496         if (mddev->curr_resync == 0)
4497                 return sprintf(page, "none\n");
4498         resync = mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active);
4499         dt = (jiffies - mddev->resync_mark) / HZ;
4500         if (!dt) dt++;
4501         db = resync - mddev->resync_mark_cnt;
4502         return sprintf(page, "%lu\n", db/dt/2); /* K/sec */
4503 }
4504
4505 static struct md_sysfs_entry md_sync_speed = __ATTR_RO(sync_speed);
4506
4507 static ssize_t
4508 sync_completed_show(struct mddev *mddev, char *page)
4509 {
4510         unsigned long long max_sectors, resync;
4511
4512         if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
4513                 return sprintf(page, "none\n");
4514
4515         if (mddev->curr_resync == 1 ||
4516             mddev->curr_resync == 2)
4517                 return sprintf(page, "delayed\n");
4518
4519         if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
4520             test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
4521                 max_sectors = mddev->resync_max_sectors;
4522         else
4523                 max_sectors = mddev->dev_sectors;
4524
4525         resync = mddev->curr_resync_completed;
4526         return sprintf(page, "%llu / %llu\n", resync, max_sectors);
4527 }
4528
4529 static struct md_sysfs_entry md_sync_completed =
4530         __ATTR_PREALLOC(sync_completed, S_IRUGO, sync_completed_show, NULL);
4531
4532 static ssize_t
4533 min_sync_show(struct mddev *mddev, char *page)
4534 {
4535         return sprintf(page, "%llu\n",
4536                        (unsigned long long)mddev->resync_min);
4537 }
4538 static ssize_t
4539 min_sync_store(struct mddev *mddev, const char *buf, size_t len)
4540 {
4541         unsigned long long min;
4542         int err;
4543
4544         if (kstrtoull(buf, 10, &min))
4545                 return -EINVAL;
4546
4547         spin_lock(&mddev->lock);
4548         err = -EINVAL;
4549         if (min > mddev->resync_max)
4550                 goto out_unlock;
4551
4552         err = -EBUSY;
4553         if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
4554                 goto out_unlock;
4555
4556         /* Round down to multiple of 4K for safety */
4557         mddev->resync_min = round_down(min, 8);
4558         err = 0;
4559
4560 out_unlock:
4561         spin_unlock(&mddev->lock);
4562         return err ?: len;
4563 }
4564
4565 static struct md_sysfs_entry md_min_sync =
4566 __ATTR(sync_min, S_IRUGO|S_IWUSR, min_sync_show, min_sync_store);
4567
4568 static ssize_t
4569 max_sync_show(struct mddev *mddev, char *page)
4570 {
4571         if (mddev->resync_max == MaxSector)
4572                 return sprintf(page, "max\n");
4573         else
4574                 return sprintf(page, "%llu\n",
4575                                (unsigned long long)mddev->resync_max);
4576 }
4577 static ssize_t
4578 max_sync_store(struct mddev *mddev, const char *buf, size_t len)
4579 {
4580         int err;
4581         spin_lock(&mddev->lock);
4582         if (strncmp(buf, "max", 3) == 0)
4583                 mddev->resync_max = MaxSector;
4584         else {
4585                 unsigned long long max;
4586                 int chunk;
4587
4588                 err = -EINVAL;
4589                 if (kstrtoull(buf, 10, &max))
4590                         goto out_unlock;
4591                 if (max < mddev->resync_min)
4592                         goto out_unlock;
4593
4594                 err = -EBUSY;
4595                 if (max < mddev->resync_max &&
4596                     mddev->ro == 0 &&
4597                     test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
4598                         goto out_unlock;
4599
4600                 /* Must be a multiple of chunk_size */
4601                 chunk = mddev->chunk_sectors;
4602                 if (chunk) {
4603                         sector_t temp = max;
4604
4605                         err = -EINVAL;
4606                         if (sector_div(temp, chunk))
4607                                 goto out_unlock;
4608                 }
4609                 mddev->resync_max = max;
4610         }
4611         wake_up(&mddev->recovery_wait);
4612         err = 0;
4613 out_unlock:
4614         spin_unlock(&mddev->lock);
4615         return err ?: len;
4616 }
4617
4618 static struct md_sysfs_entry md_max_sync =
4619 __ATTR(sync_max, S_IRUGO|S_IWUSR, max_sync_show, max_sync_store);
4620
4621 static ssize_t
4622 suspend_lo_show(struct mddev *mddev, char *page)
4623 {
4624         return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_lo);
4625 }
4626
4627 static ssize_t
4628 suspend_lo_store(struct mddev *mddev, const char *buf, size_t len)
4629 {
4630         unsigned long long old, new;
4631         int err;
4632
4633         err = kstrtoull(buf, 10, &new);
4634         if (err < 0)
4635                 return err;
4636         if (new != (sector_t)new)
4637                 return -EINVAL;
4638
4639         err = mddev_lock(mddev);
4640         if (err)
4641                 return err;
4642         err = -EINVAL;
4643         if (mddev->pers == NULL ||
4644             mddev->pers->quiesce == NULL)
4645                 goto unlock;
4646         old = mddev->suspend_lo;
4647         mddev->suspend_lo = new;
4648         if (new >= old)
4649                 /* Shrinking suspended region */
4650                 mddev->pers->quiesce(mddev, 2);
4651         else {
4652                 /* Expanding suspended region - need to wait */
4653                 mddev->pers->quiesce(mddev, 1);
4654                 mddev->pers->quiesce(mddev, 0);
4655         }
4656         err = 0;
4657 unlock:
4658         mddev_unlock(mddev);
4659         return err ?: len;
4660 }
4661 static struct md_sysfs_entry md_suspend_lo =
4662 __ATTR(suspend_lo, S_IRUGO|S_IWUSR, suspend_lo_show, suspend_lo_store);
4663
4664 static ssize_t
4665 suspend_hi_show(struct mddev *mddev, char *page)
4666 {
4667         return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_hi);
4668 }
4669
4670 static ssize_t
4671 suspend_hi_store(struct mddev *mddev, const char *buf, size_t len)
4672 {
4673         unsigned long long old, new;
4674         int err;
4675
4676         err = kstrtoull(buf, 10, &new);
4677         if (err < 0)
4678                 return err;
4679         if (new != (sector_t)new)
4680                 return -EINVAL;
4681
4682         err = mddev_lock(mddev);
4683         if (err)
4684                 return err;
4685         err = -EINVAL;
4686         if (mddev->pers == NULL ||
4687             mddev->pers->quiesce == NULL)
4688                 goto unlock;
4689         old = mddev->suspend_hi;
4690         mddev->suspend_hi = new;
4691         if (new <= old)
4692                 /* Shrinking suspended region */
4693                 mddev->pers->quiesce(mddev, 2);
4694         else {
4695                 /* Expanding suspended region - need to wait */
4696                 mddev->pers->quiesce(mddev, 1);
4697                 mddev->pers->quiesce(mddev, 0);
4698         }
4699         err = 0;
4700 unlock:
4701         mddev_unlock(mddev);
4702         return err ?: len;
4703 }
4704 static struct md_sysfs_entry md_suspend_hi =
4705 __ATTR(suspend_hi, S_IRUGO|S_IWUSR, suspend_hi_show, suspend_hi_store);
4706
4707 static ssize_t
4708 reshape_position_show(struct mddev *mddev, char *page)
4709 {
4710         if (mddev->reshape_position != MaxSector)
4711                 return sprintf(page, "%llu\n",
4712                                (unsigned long long)mddev->reshape_position);
4713         strcpy(page, "none\n");
4714         return 5;
4715 }
4716
4717 static ssize_t
4718 reshape_position_store(struct mddev *mddev, const char *buf, size_t len)
4719 {
4720         struct md_rdev *rdev;
4721         unsigned long long new;
4722         int err;
4723
4724         err = kstrtoull(buf, 10, &new);
4725         if (err < 0)
4726                 return err;
4727         if (new != (sector_t)new)
4728                 return -EINVAL;
4729         err = mddev_lock(mddev);
4730         if (err)
4731                 return err;
4732         err = -EBUSY;
4733         if (mddev->pers)
4734                 goto unlock;
4735         mddev->reshape_position = new;
4736         mddev->delta_disks = 0;
4737         mddev->reshape_backwards = 0;
4738         mddev->new_level = mddev->level;
4739         mddev->new_layout = mddev->layout;
4740         mddev->new_chunk_sectors = mddev->chunk_sectors;
4741         rdev_for_each(rdev, mddev)
4742                 rdev->new_data_offset = rdev->data_offset;
4743         err = 0;
4744 unlock:
4745         mddev_unlock(mddev);
4746         return err ?: len;
4747 }
4748
4749 static struct md_sysfs_entry md_reshape_position =
4750 __ATTR(reshape_position, S_IRUGO|S_IWUSR, reshape_position_show,
4751        reshape_position_store);
4752
4753 static ssize_t
4754 reshape_direction_show(struct mddev *mddev, char *page)
4755 {
4756         return sprintf(page, "%s\n",
4757                        mddev->reshape_backwards ? "backwards" : "forwards");
4758 }
4759
4760 static ssize_t
4761 reshape_direction_store(struct mddev *mddev, const char *buf, size_t len)
4762 {
4763         int backwards = 0;
4764         int err;
4765
4766         if (cmd_match(buf, "forwards"))
4767                 backwards = 0;
4768         else if (cmd_match(buf, "backwards"))
4769                 backwards = 1;
4770         else
4771                 return -EINVAL;
4772         if (mddev->reshape_backwards == backwards)
4773                 return len;
4774
4775         err = mddev_lock(mddev);
4776         if (err)
4777                 return err;
4778         /* check if we are allowed to change */
4779         if (mddev->delta_disks)
4780                 err = -EBUSY;
4781         else if (mddev->persistent &&
4782             mddev->major_version == 0)
4783                 err =  -EINVAL;
4784         else
4785                 mddev->reshape_backwards = backwards;
4786         mddev_unlock(mddev);
4787         return err ?: len;
4788 }
4789
4790 static struct md_sysfs_entry md_reshape_direction =
4791 __ATTR(reshape_direction, S_IRUGO|S_IWUSR, reshape_direction_show,
4792        reshape_direction_store);
4793
4794 static ssize_t
4795 array_size_show(struct mddev *mddev, char *page)
4796 {
4797         if (mddev->external_size)
4798                 return sprintf(page, "%llu\n",
4799                                (unsigned long long)mddev->array_sectors/2);
4800         else
4801                 return sprintf(page, "default\n");
4802 }
4803
4804 static ssize_t
4805 array_size_store(struct mddev *mddev, const char *buf, size_t len)
4806 {
4807         sector_t sectors;
4808         int err;
4809
4810         err = mddev_lock(mddev);
4811         if (err)
4812                 return err;
4813
4814         /* cluster raid doesn't support change array_sectors */
4815         if (mddev_is_clustered(mddev))
4816                 return -EINVAL;
4817
4818         if (strncmp(buf, "default", 7) == 0) {
4819                 if (mddev->pers)
4820                         sectors = mddev->pers->size(mddev, 0, 0);
4821                 else
4822                         sectors = mddev->array_sectors;
4823
4824                 mddev->external_size = 0;
4825         } else {
4826                 if (strict_blocks_to_sectors(buf, &sectors) < 0)
4827                         err = -EINVAL;
4828                 else if (mddev->pers && mddev->pers->size(mddev, 0, 0) < sectors)
4829                         err = -E2BIG;
4830                 else
4831                         mddev->external_size = 1;
4832         }
4833
4834         if (!err) {
4835                 mddev->array_sectors = sectors;
4836                 if (mddev->pers) {
4837                         set_capacity(mddev->gendisk, mddev->array_sectors);
4838                         revalidate_disk(mddev->gendisk);
4839                 }
4840         }
4841         mddev_unlock(mddev);
4842         return err ?: len;
4843 }
4844
4845 static struct md_sysfs_entry md_array_size =
4846 __ATTR(array_size, S_IRUGO|S_IWUSR, array_size_show,
4847        array_size_store);
4848
4849 static struct attribute *md_default_attrs[] = {
4850         &md_level.attr,
4851         &md_layout.attr,
4852         &md_raid_disks.attr,
4853         &md_chunk_size.attr,
4854         &md_size.attr,
4855         &md_resync_start.attr,
4856         &md_metadata.attr,
4857         &md_new_device.attr,
4858         &md_safe_delay.attr,
4859         &md_array_state.attr,
4860         &md_reshape_position.attr,
4861         &md_reshape_direction.attr,
4862         &md_array_size.attr,
4863         &max_corr_read_errors.attr,
4864         NULL,
4865 };
4866
4867 static struct attribute *md_redundancy_attrs[] = {
4868         &md_scan_mode.attr,
4869         &md_last_scan_mode.attr,
4870         &md_mismatches.attr,
4871         &md_sync_min.attr,
4872         &md_sync_max.attr,
4873         &md_sync_speed.attr,
4874         &md_sync_force_parallel.attr,
4875         &md_sync_completed.attr,
4876         &md_min_sync.attr,
4877         &md_max_sync.attr,
4878         &md_suspend_lo.attr,
4879         &md_suspend_hi.attr,
4880         &md_bitmap.attr,
4881         &md_degraded.attr,
4882         NULL,
4883 };
4884 static struct attribute_group md_redundancy_group = {
4885         .name = NULL,
4886         .attrs = md_redundancy_attrs,
4887 };
4888
4889 static ssize_t
4890 md_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
4891 {
4892         struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
4893         struct mddev *mddev = container_of(kobj, struct mddev, kobj);
4894         ssize_t rv;
4895
4896         if (!entry->show)
4897                 return -EIO;
4898         spin_lock(&all_mddevs_lock);
4899         if (list_empty(&mddev->all_mddevs)) {
4900                 spin_unlock(&all_mddevs_lock);
4901                 return -EBUSY;
4902         }
4903         mddev_get(mddev);
4904         spin_unlock(&all_mddevs_lock);
4905
4906         rv = entry->show(mddev, page);
4907         mddev_put(mddev);
4908         return rv;
4909 }
4910
4911 static ssize_t
4912 md_attr_store(struct kobject *kobj, struct attribute *attr,
4913               const char *page, size_t length)
4914 {
4915         struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
4916         struct mddev *mddev = container_of(kobj, struct mddev, kobj);
4917         ssize_t rv;
4918
4919         if (!entry->store)
4920                 return -EIO;
4921         if (!capable(CAP_SYS_ADMIN))
4922                 return -EACCES;
4923         spin_lock(&all_mddevs_lock);
4924         if (list_empty(&mddev->all_mddevs)) {
4925                 spin_unlock(&all_mddevs_lock);
4926                 return -EBUSY;
4927         }
4928         mddev_get(mddev);
4929         spin_unlock(&all_mddevs_lock);
4930         rv = entry->store(mddev, page, length);
4931         mddev_put(mddev);
4932         return rv;
4933 }
4934
4935 static void md_free(struct kobject *ko)
4936 {
4937         struct mddev *mddev = container_of(ko, struct mddev, kobj);
4938
4939         if (mddev->sysfs_state)
4940                 sysfs_put(mddev->sysfs_state);
4941
4942         if (mddev->queue)
4943                 blk_cleanup_queue(mddev->queue);
4944         if (mddev->gendisk) {
4945                 del_gendisk(mddev->gendisk);
4946                 put_disk(mddev->gendisk);
4947         }
4948
4949         kfree(mddev);
4950 }
4951
4952 static const struct sysfs_ops md_sysfs_ops = {
4953         .show   = md_attr_show,
4954         .store  = md_attr_store,
4955 };
4956 static struct kobj_type md_ktype = {
4957         .release        = md_free,
4958         .sysfs_ops      = &md_sysfs_ops,
4959         .default_attrs  = md_default_attrs,
4960 };
4961
4962 int mdp_major = 0;
4963
4964 static void mddev_delayed_delete(struct work_struct *ws)
4965 {
4966         struct mddev *mddev = container_of(ws, struct mddev, del_work);
4967
4968         sysfs_remove_group(&mddev->kobj, &md_bitmap_group);
4969         kobject_del(&mddev->kobj);
4970         kobject_put(&mddev->kobj);
4971 }
4972
4973 static int md_alloc(dev_t dev, char *name)
4974 {
4975         static DEFINE_MUTEX(disks_mutex);
4976         struct mddev *mddev = mddev_find(dev);
4977         struct gendisk *disk;
4978         int partitioned;
4979         int shift;
4980         int unit;
4981         int error;
4982
4983         if (!mddev)
4984                 return -ENODEV;
4985
4986         partitioned = (MAJOR(mddev->unit) != MD_MAJOR);
4987         shift = partitioned ? MdpMinorShift : 0;
4988         unit = MINOR(mddev->unit) >> shift;
4989
4990         /* wait for any previous instance of this device to be
4991          * completely removed (mddev_delayed_delete).
4992          */
4993         flush_workqueue(md_misc_wq);
4994
4995         mutex_lock(&disks_mutex);
4996         error = -EEXIST;
4997         if (mddev->gendisk)
4998                 goto abort;
4999
5000         if (name) {
5001                 /* Need to ensure that 'name' is not a duplicate.
5002                  */
5003                 struct mddev *mddev2;
5004                 spin_lock(&all_mddevs_lock);
5005
5006                 list_for_each_entry(mddev2, &all_mddevs, all_mddevs)
5007                         if (mddev2->gendisk &&
5008                             strcmp(mddev2->gendisk->disk_name, name) == 0) {
5009                                 spin_unlock(&all_mddevs_lock);
5010                                 goto abort;
5011                         }
5012                 spin_unlock(&all_mddevs_lock);
5013         }
5014
5015         error = -ENOMEM;
5016         mddev->queue = blk_alloc_queue(GFP_KERNEL);
5017         if (!mddev->queue)
5018                 goto abort;
5019         mddev->queue->queuedata = mddev;
5020
5021         blk_queue_make_request(mddev->queue, md_make_request);
5022         blk_set_stacking_limits(&mddev->queue->limits);
5023
5024         disk = alloc_disk(1 << shift);
5025         if (!disk) {
5026                 blk_cleanup_queue(mddev->queue);
5027                 mddev->queue = NULL;
5028                 goto abort;
5029         }
5030         disk->major = MAJOR(mddev->unit);
5031         disk->first_minor = unit << shift;
5032         if (name)
5033                 strcpy(disk->disk_name, name);
5034         else if (partitioned)
5035                 sprintf(disk->disk_name, "md_d%d", unit);
5036         else
5037                 sprintf(disk->disk_name, "md%d", unit);
5038         disk->fops = &md_fops;
5039         disk->private_data = mddev;
5040         disk->queue = mddev->queue;
5041         blk_queue_write_cache(mddev->queue, true, true);
5042         /* Allow extended partitions.  This makes the
5043          * 'mdp' device redundant, but we can't really
5044          * remove it now.
5045          */
5046         disk->flags |= GENHD_FL_EXT_DEVT;
5047         mddev->gendisk = disk;
5048         /* As soon as we call add_disk(), another thread could get
5049          * through to md_open, so make sure it doesn't get too far
5050          */
5051         mutex_lock(&mddev->open_mutex);
5052         add_disk(disk);
5053
5054         error = kobject_init_and_add(&mddev->kobj, &md_ktype,
5055                                      &disk_to_dev(disk)->kobj, "%s", "md");
5056         if (error) {
5057                 /* This isn't possible, but as kobject_init_and_add is marked
5058                  * __must_check, we must do something with the result
5059                  */
5060                 pr_debug("md: cannot register %s/md - name in use\n",
5061                          disk->disk_name);
5062                 error = 0;
5063         }
5064         if (mddev->kobj.sd &&
5065             sysfs_create_group(&mddev->kobj, &md_bitmap_group))
5066                 pr_debug("pointless warning\n");
5067         mutex_unlock(&mddev->open_mutex);
5068  abort:
5069         mutex_unlock(&disks_mutex);
5070         if (!error && mddev->kobj.sd) {
5071                 kobject_uevent(&mddev->kobj, KOBJ_ADD);
5072                 mddev->sysfs_state = sysfs_get_dirent_safe(mddev->kobj.sd, "array_state");
5073         }
5074         mddev_put(mddev);
5075         return error;
5076 }
5077
5078 static struct kobject *md_probe(dev_t dev, int *part, void *data)
5079 {
5080         md_alloc(dev, NULL);
5081         return NULL;
5082 }
5083
5084 static int add_named_array(const char *val, struct kernel_param *kp)
5085 {
5086         /* val must be "md_*" where * is not all digits.
5087          * We allocate an array with a large free minor number, and
5088          * set the name to val.  val must not already be an active name.
5089          */
5090         int len = strlen(val);
5091         char buf[DISK_NAME_LEN];
5092
5093         while (len && val[len-1] == '\n')
5094                 len--;
5095         if (len >= DISK_NAME_LEN)
5096                 return -E2BIG;
5097         strlcpy(buf, val, len+1);
5098         if (strncmp(buf, "md_", 3) != 0)
5099                 return -EINVAL;
5100         return md_alloc(0, buf);
5101 }
5102
5103 static void md_safemode_timeout(unsigned long data)
5104 {
5105         struct mddev *mddev = (struct mddev *) data;
5106
5107         if (!atomic_read(&mddev->writes_pending)) {
5108                 mddev->safemode = 1;
5109                 if (mddev->external)
5110                         sysfs_notify_dirent_safe(mddev->sysfs_state);
5111         }
5112         md_wakeup_thread(mddev->thread);
5113 }
5114
5115 static int start_dirty_degraded;
5116
5117 int md_run(struct mddev *mddev)
5118 {
5119         int err;
5120         struct md_rdev *rdev;
5121         struct md_personality *pers;
5122
5123         if (list_empty(&mddev->disks))
5124                 /* cannot run an array with no devices.. */
5125                 return -EINVAL;
5126
5127         if (mddev->pers)
5128                 return -EBUSY;
5129         /* Cannot run until previous stop completes properly */
5130         if (mddev->sysfs_active)
5131                 return -EBUSY;
5132
5133         /*
5134          * Analyze all RAID superblock(s)
5135          */
5136         if (!mddev->raid_disks) {
5137                 if (!mddev->persistent)
5138                         return -EINVAL;
5139                 analyze_sbs(mddev);
5140         }
5141
5142         if (mddev->level != LEVEL_NONE)
5143                 request_module("md-level-%d", mddev->level);
5144         else if (mddev->clevel[0])
5145                 request_module("md-%s", mddev->clevel);
5146
5147         /*
5148          * Drop all container device buffers, from now on
5149          * the only valid external interface is through the md
5150          * device.
5151          */
5152         rdev_for_each(rdev, mddev) {
5153                 if (test_bit(Faulty, &rdev->flags))
5154                         continue;
5155                 sync_blockdev(rdev->bdev);
5156                 invalidate_bdev(rdev->bdev);
5157
5158                 /* perform some consistency tests on the device.
5159                  * We don't want the data to overlap the metadata,
5160                  * Internal Bitmap issues have been handled elsewhere.
5161                  */
5162                 if (rdev->meta_bdev) {
5163                         /* Nothing to check */;
5164                 } else if (rdev->data_offset < rdev->sb_start) {
5165                         if (mddev->dev_sectors &&
5166                             rdev->data_offset + mddev->dev_sectors
5167                             > rdev->sb_start) {
5168                                 pr_warn("md: %s: data overlaps metadata\n",
5169                                         mdname(mddev));
5170                                 return -EINVAL;
5171                         }
5172                 } else {
5173                         if (rdev->sb_start + rdev->sb_size/512
5174                             > rdev->data_offset) {
5175                                 pr_warn("md: %s: metadata overlaps data\n",
5176                                         mdname(mddev));
5177                                 return -EINVAL;
5178                         }
5179                 }
5180                 sysfs_notify_dirent_safe(rdev->sysfs_state);
5181         }
5182
5183         if (mddev->bio_set == NULL)
5184                 mddev->bio_set = bioset_create(BIO_POOL_SIZE, 0);
5185
5186         spin_lock(&pers_lock);
5187         pers = find_pers(mddev->level, mddev->clevel);
5188         if (!pers || !try_module_get(pers->owner)) {
5189                 spin_unlock(&pers_lock);
5190                 if (mddev->level != LEVEL_NONE)
5191                         pr_warn("md: personality for level %d is not loaded!\n",
5192                                 mddev->level);
5193                 else
5194                         pr_warn("md: personality for level %s is not loaded!\n",
5195                                 mddev->clevel);
5196                 return -EINVAL;
5197         }
5198         spin_unlock(&pers_lock);
5199         if (mddev->level != pers->level) {
5200                 mddev->level = pers->level;
5201                 mddev->new_level = pers->level;
5202         }
5203         strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel));
5204
5205         if (mddev->reshape_position != MaxSector &&
5206             pers->start_reshape == NULL) {
5207                 /* This personality cannot handle reshaping... */
5208                 module_put(pers->owner);
5209                 return -EINVAL;
5210         }
5211
5212         if (pers->sync_request) {
5213                 /* Warn if this is a potentially silly
5214                  * configuration.
5215                  */
5216                 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
5217                 struct md_rdev *rdev2;
5218                 int warned = 0;
5219
5220                 rdev_for_each(rdev, mddev)
5221                         rdev_for_each(rdev2, mddev) {
5222                                 if (rdev < rdev2 &&
5223                                     rdev->bdev->bd_contains ==
5224                                     rdev2->bdev->bd_contains) {
5225                                         pr_warn("%s: WARNING: %s appears to be on the same physical disk as %s.\n",
5226                                                 mdname(mddev),
5227                                                 bdevname(rdev->bdev,b),
5228                                                 bdevname(rdev2->bdev,b2));
5229                                         warned = 1;
5230                                 }
5231                         }
5232
5233                 if (warned)
5234                         pr_warn("True protection against single-disk failure might be compromised.\n");
5235         }
5236
5237         mddev->recovery = 0;
5238         /* may be over-ridden by personality */
5239         mddev->resync_max_sectors = mddev->dev_sectors;
5240
5241         mddev->ok_start_degraded = start_dirty_degraded;
5242
5243         if (start_readonly && mddev->ro == 0)
5244                 mddev->ro = 2; /* read-only, but switch on first write */
5245
5246         err = pers->run(mddev);
5247         if (err)
5248                 pr_warn("md: pers->run() failed ...\n");
5249         else if (pers->size(mddev, 0, 0) < mddev->array_sectors) {
5250                 WARN_ONCE(!mddev->external_size,
5251                           "%s: default size too small, but 'external_size' not in effect?\n",
5252                           __func__);
5253                 pr_warn("md: invalid array_size %llu > default size %llu\n",
5254                         (unsigned long long)mddev->array_sectors / 2,
5255                         (unsigned long long)pers->size(mddev, 0, 0) / 2);
5256                 err = -EINVAL;
5257         }
5258         if (err == 0 && pers->sync_request &&
5259             (mddev->bitmap_info.file || mddev->bitmap_info.offset)) {
5260                 struct bitmap *bitmap;
5261
5262                 bitmap = bitmap_create(mddev, -1);
5263                 if (IS_ERR(bitmap)) {
5264                         err = PTR_ERR(bitmap);
5265                         pr_warn("%s: failed to create bitmap (%d)\n",
5266                                 mdname(mddev), err);
5267                 } else
5268                         mddev->bitmap = bitmap;
5269
5270         }
5271         if (err) {
5272                 mddev_detach(mddev);
5273                 if (mddev->private)
5274                         pers->free(mddev, mddev->private);
5275                 mddev->private = NULL;
5276                 module_put(pers->owner);
5277                 bitmap_destroy(mddev);
5278                 return err;
5279         }
5280         if (mddev->queue) {
5281                 bool nonrot = true;
5282
5283                 rdev_for_each(rdev, mddev) {
5284                         if (rdev->raid_disk >= 0 &&
5285                             !blk_queue_nonrot(bdev_get_queue(rdev->bdev))) {
5286                                 nonrot = false;
5287                                 break;
5288                         }
5289                 }
5290                 if (mddev->degraded)
5291                         nonrot = false;
5292                 if (nonrot)
5293                         queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mddev->queue);
5294                 else
5295                         queue_flag_clear_unlocked(QUEUE_FLAG_NONROT, mddev->queue);
5296                 mddev->queue->backing_dev_info.congested_data = mddev;
5297                 mddev->queue->backing_dev_info.congested_fn = md_congested;
5298         }
5299         if (pers->sync_request) {
5300                 if (mddev->kobj.sd &&
5301                     sysfs_create_group(&mddev->kobj, &md_redundancy_group))
5302                         pr_warn("md: cannot register extra attributes for %s\n",
5303                                 mdname(mddev));
5304                 mddev->sysfs_action = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_action");
5305         } else if (mddev->ro == 2) /* auto-readonly not meaningful */
5306                 mddev->ro = 0;
5307
5308         atomic_set(&mddev->writes_pending,0);
5309         atomic_set(&mddev->max_corr_read_errors,
5310                    MD_DEFAULT_MAX_CORRECTED_READ_ERRORS);
5311         mddev->safemode = 0;
5312         if (mddev_is_clustered(mddev))
5313                 mddev->safemode_delay = 0;
5314         else
5315                 mddev->safemode_delay = (200 * HZ)/1000 +1; /* 200 msec delay */
5316         mddev->in_sync = 1;
5317         smp_wmb();
5318         spin_lock(&mddev->lock);
5319         mddev->pers = pers;
5320         spin_unlock(&mddev->lock);
5321         rdev_for_each(rdev, mddev)
5322                 if (rdev->raid_disk >= 0)
5323                         if (sysfs_link_rdev(mddev, rdev))
5324                                 /* failure here is OK */;
5325
5326         if (mddev->degraded && !mddev->ro)
5327                 /* This ensures that recovering status is reported immediately
5328                  * via sysfs - until a lack of spares is confirmed.
5329                  */
5330                 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
5331         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5332
5333         if (mddev->flags & MD_UPDATE_SB_FLAGS)
5334                 md_update_sb(mddev, 0);
5335
5336         md_new_event(mddev);
5337         sysfs_notify_dirent_safe(mddev->sysfs_state);
5338         sysfs_notify_dirent_safe(mddev->sysfs_action);
5339         sysfs_notify(&mddev->kobj, NULL, "degraded");
5340         return 0;
5341 }
5342 EXPORT_SYMBOL_GPL(md_run);
5343
5344 static int do_md_run(struct mddev *mddev)
5345 {
5346         int err;
5347
5348         err = md_run(mddev);
5349         if (err)
5350                 goto out;
5351         err = bitmap_load(mddev);
5352         if (err) {
5353                 bitmap_destroy(mddev);
5354                 goto out;
5355         }
5356
5357         if (mddev_is_clustered(mddev))
5358                 md_allow_write(mddev);
5359
5360         md_wakeup_thread(mddev->thread);
5361         md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
5362
5363         set_capacity(mddev->gendisk, mddev->array_sectors);
5364         revalidate_disk(mddev->gendisk);
5365         mddev->changed = 1;
5366         kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
5367 out:
5368         return err;
5369 }
5370
5371 static int restart_array(struct mddev *mddev)
5372 {
5373         struct gendisk *disk = mddev->gendisk;
5374
5375         /* Complain if it has no devices */
5376         if (list_empty(&mddev->disks))
5377                 return -ENXIO;
5378         if (!mddev->pers)
5379                 return -EINVAL;
5380         if (!mddev->ro)
5381                 return -EBUSY;
5382         if (test_bit(MD_HAS_JOURNAL, &mddev->flags)) {
5383                 struct md_rdev *rdev;
5384                 bool has_journal = false;
5385
5386                 rcu_read_lock();
5387                 rdev_for_each_rcu(rdev, mddev) {
5388                         if (test_bit(Journal, &rdev->flags) &&
5389                             !test_bit(Faulty, &rdev->flags)) {
5390                                 has_journal = true;
5391                                 break;
5392                         }
5393                 }
5394                 rcu_read_unlock();
5395
5396                 /* Don't restart rw with journal missing/faulty */
5397                 if (!has_journal)
5398                         return -EINVAL;
5399         }
5400
5401         mddev->safemode = 0;
5402         mddev->ro = 0;
5403         set_disk_ro(disk, 0);
5404         pr_debug("md: %s switched to read-write mode.\n", mdname(mddev));
5405         /* Kick recovery or resync if necessary */
5406         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5407         md_wakeup_thread(mddev->thread);
5408         md_wakeup_thread(mddev->sync_thread);
5409         sysfs_notify_dirent_safe(mddev->sysfs_state);
5410         return 0;
5411 }
5412
5413 static void md_clean(struct mddev *mddev)
5414 {
5415         mddev->array_sectors = 0;
5416         mddev->external_size = 0;
5417         mddev->dev_sectors = 0;
5418         mddev->raid_disks = 0;
5419         mddev->recovery_cp = 0;
5420         mddev->resync_min = 0;
5421         mddev->resync_max = MaxSector;
5422         mddev->reshape_position = MaxSector;
5423         mddev->external = 0;
5424         mddev->persistent = 0;
5425         mddev->level = LEVEL_NONE;
5426         mddev->clevel[0] = 0;
5427         mddev->flags = 0;
5428         mddev->ro = 0;
5429         mddev->metadata_type[0] = 0;
5430         mddev->chunk_sectors = 0;
5431         mddev->ctime = mddev->utime = 0;
5432         mddev->layout = 0;
5433         mddev->max_disks = 0;
5434         mddev->events = 0;
5435         mddev->can_decrease_events = 0;
5436         mddev->delta_disks = 0;
5437         mddev->reshape_backwards = 0;
5438         mddev->new_level = LEVEL_NONE;
5439         mddev->new_layout = 0;
5440         mddev->new_chunk_sectors = 0;
5441         mddev->curr_resync = 0;
5442         atomic64_set(&mddev->resync_mismatches, 0);
5443         mddev->suspend_lo = mddev->suspend_hi = 0;
5444         mddev->sync_speed_min = mddev->sync_speed_max = 0;
5445         mddev->recovery = 0;
5446         mddev->in_sync = 0;
5447         mddev->changed = 0;
5448         mddev->degraded = 0;
5449         mddev->safemode = 0;
5450         mddev->private = NULL;
5451         mddev->cluster_info = NULL;
5452         mddev->bitmap_info.offset = 0;
5453         mddev->bitmap_info.default_offset = 0;
5454         mddev->bitmap_info.default_space = 0;
5455         mddev->bitmap_info.chunksize = 0;
5456         mddev->bitmap_info.daemon_sleep = 0;
5457         mddev->bitmap_info.max_write_behind = 0;
5458         mddev->bitmap_info.nodes = 0;
5459 }
5460
5461 static void __md_stop_writes(struct mddev *mddev)
5462 {
5463         set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
5464         flush_workqueue(md_misc_wq);
5465         if (mddev->sync_thread) {
5466                 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
5467                 md_reap_sync_thread(mddev);
5468         }
5469
5470         del_timer_sync(&mddev->safemode_timer);
5471
5472         bitmap_flush(mddev);
5473         md_super_wait(mddev);
5474
5475         if (mddev->ro == 0 &&
5476             ((!mddev->in_sync && !mddev_is_clustered(mddev)) ||
5477              (mddev->flags & MD_UPDATE_SB_FLAGS))) {
5478                 /* mark array as shutdown cleanly */
5479                 if (!mddev_is_clustered(mddev))
5480                         mddev->in_sync = 1;
5481                 md_update_sb(mddev, 1);
5482         }
5483 }
5484
5485 void md_stop_writes(struct mddev *mddev)
5486 {
5487         mddev_lock_nointr(mddev);
5488         __md_stop_writes(mddev);
5489         mddev_unlock(mddev);
5490 }
5491 EXPORT_SYMBOL_GPL(md_stop_writes);
5492
5493 static void mddev_detach(struct mddev *mddev)
5494 {
5495         struct bitmap *bitmap = mddev->bitmap;
5496         /* wait for behind writes to complete */
5497         if (bitmap && atomic_read(&bitmap->behind_writes) > 0) {
5498                 pr_debug("md:%s: behind writes in progress - waiting to stop.\n",
5499                          mdname(mddev));
5500                 /* need to kick something here to make sure I/O goes? */
5501                 wait_event(bitmap->behind_wait,
5502                            atomic_read(&bitmap->behind_writes) == 0);
5503         }
5504         if (mddev->pers && mddev->pers->quiesce) {
5505                 mddev->pers->quiesce(mddev, 1);
5506                 mddev->pers->quiesce(mddev, 0);
5507         }
5508         md_unregister_thread(&mddev->thread);
5509         if (mddev->queue)
5510                 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
5511 }
5512
5513 static void __md_stop(struct mddev *mddev)
5514 {
5515         struct md_personality *pers = mddev->pers;
5516         mddev_detach(mddev);
5517         /* Ensure ->event_work is done */
5518         flush_workqueue(md_misc_wq);
5519         spin_lock(&mddev->lock);
5520         mddev->pers = NULL;
5521         spin_unlock(&mddev->lock);
5522         pers->free(mddev, mddev->private);
5523         mddev->private = NULL;
5524         if (pers->sync_request && mddev->to_remove == NULL)
5525                 mddev->to_remove = &md_redundancy_group;
5526         module_put(pers->owner);
5527         clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
5528 }
5529
5530 void md_stop(struct mddev *mddev)
5531 {
5532         /* stop the array and free an attached data structures.
5533          * This is called from dm-raid
5534          */
5535         __md_stop(mddev);
5536         bitmap_destroy(mddev);
5537         if (mddev->bio_set)
5538                 bioset_free(mddev->bio_set);
5539 }
5540
5541 EXPORT_SYMBOL_GPL(md_stop);
5542
5543 static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
5544 {
5545         int err = 0;
5546         int did_freeze = 0;
5547
5548         if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) {
5549                 did_freeze = 1;
5550                 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
5551                 md_wakeup_thread(mddev->thread);
5552         }
5553         if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
5554                 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
5555         if (mddev->sync_thread)
5556                 /* Thread might be blocked waiting for metadata update
5557                  * which will now never happen */
5558                 wake_up_process(mddev->sync_thread->tsk);
5559
5560         if (mddev->external && test_bit(MD_CHANGE_PENDING, &mddev->flags))
5561                 return -EBUSY;
5562         mddev_unlock(mddev);
5563         wait_event(resync_wait, !test_bit(MD_RECOVERY_RUNNING,
5564                                           &mddev->recovery));
5565         wait_event(mddev->sb_wait,
5566                    !test_bit(MD_CHANGE_PENDING, &mddev->flags));
5567         mddev_lock_nointr(mddev);
5568
5569         mutex_lock(&mddev->open_mutex);
5570         if ((mddev->pers && atomic_read(&mddev->openers) > !!bdev) ||
5571             mddev->sync_thread ||
5572             test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) {
5573                 pr_warn("md: %s still in use.\n",mdname(mddev));
5574                 if (did_freeze) {
5575                         clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
5576                         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5577                         md_wakeup_thread(mddev->thread);
5578                 }
5579                 err = -EBUSY;
5580                 goto out;
5581         }
5582         if (mddev->pers) {
5583                 __md_stop_writes(mddev);
5584
5585                 err  = -ENXIO;
5586                 if (mddev->ro==1)
5587                         goto out;
5588                 mddev->ro = 1;
5589                 set_disk_ro(mddev->gendisk, 1);
5590                 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
5591                 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5592                 md_wakeup_thread(mddev->thread);
5593                 sysfs_notify_dirent_safe(mddev->sysfs_state);
5594                 err = 0;
5595         }
5596 out:
5597         mutex_unlock(&mddev->open_mutex);
5598         return err;
5599 }
5600
5601 /* mode:
5602  *   0 - completely stop and dis-assemble array
5603  *   2 - stop but do not disassemble array
5604  */
5605 static int do_md_stop(struct mddev *mddev, int mode,
5606                       struct block_device *bdev)
5607 {
5608         struct gendisk *disk = mddev->gendisk;
5609         struct md_rdev *rdev;
5610         int did_freeze = 0;
5611
5612         if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) {
5613                 did_freeze = 1;
5614                 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
5615                 md_wakeup_thread(mddev->thread);
5616         }
5617         if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
5618                 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
5619         if (mddev->sync_thread)
5620                 /* Thread might be blocked waiting for metadata update
5621                  * which will now never happen */
5622                 wake_up_process(mddev->sync_thread->tsk);
5623
5624         mddev_unlock(mddev);
5625         wait_event(resync_wait, (mddev->sync_thread == NULL &&
5626                                  !test_bit(MD_RECOVERY_RUNNING,
5627                                            &mddev->recovery)));
5628         mddev_lock_nointr(mddev);
5629
5630         mutex_lock(&mddev->open_mutex);
5631         if ((mddev->pers && atomic_read(&mddev->openers) > !!bdev) ||
5632             mddev->sysfs_active ||
5633             mddev->sync_thread ||
5634             test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) {
5635                 pr_warn("md: %s still in use.\n",mdname(mddev));
5636                 mutex_unlock(&mddev->open_mutex);
5637                 if (did_freeze) {
5638                         clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
5639                         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5640                         md_wakeup_thread(mddev->thread);
5641                 }
5642                 return -EBUSY;
5643         }
5644         if (mddev->pers) {
5645                 if (mddev->ro)
5646                         set_disk_ro(disk, 0);
5647
5648                 __md_stop_writes(mddev);
5649                 __md_stop(mddev);
5650                 mddev->queue->backing_dev_info.congested_fn = NULL;
5651
5652                 /* tell userspace to handle 'inactive' */
5653                 sysfs_notify_dirent_safe(mddev->sysfs_state);
5654
5655                 rdev_for_each(rdev, mddev)
5656                         if (rdev->raid_disk >= 0)
5657                                 sysfs_unlink_rdev(mddev, rdev);
5658
5659                 set_capacity(disk, 0);
5660                 mutex_unlock(&mddev->open_mutex);
5661                 mddev->changed = 1;
5662                 revalidate_disk(disk);
5663
5664                 if (mddev->ro)
5665                         mddev->ro = 0;
5666         } else
5667                 mutex_unlock(&mddev->open_mutex);
5668         /*
5669          * Free resources if final stop
5670          */
5671         if (mode == 0) {
5672                 pr_info("md: %s stopped.\n", mdname(mddev));
5673
5674                 bitmap_destroy(mddev);
5675                 if (mddev->bitmap_info.file) {
5676                         struct file *f = mddev->bitmap_info.file;
5677                         spin_lock(&mddev->lock);
5678                         mddev->bitmap_info.file = NULL;
5679                         spin_unlock(&mddev->lock);
5680                         fput(f);
5681                 }
5682                 mddev->bitmap_info.offset = 0;
5683
5684                 export_array(mddev);
5685
5686                 md_clean(mddev);
5687                 if (mddev->hold_active == UNTIL_STOP)
5688                         mddev->hold_active = 0;
5689         }
5690         md_new_event(mddev);
5691         sysfs_notify_dirent_safe(mddev->sysfs_state);
5692         return 0;
5693 }
5694
5695 #ifndef MODULE
5696 static void autorun_array(struct mddev *mddev)
5697 {
5698         struct md_rdev *rdev;
5699         int err;
5700
5701         if (list_empty(&mddev->disks))
5702                 return;
5703
5704         pr_info("md: running: ");
5705
5706         rdev_for_each(rdev, mddev) {
5707                 char b[BDEVNAME_SIZE];
5708                 pr_cont("<%s>", bdevname(rdev->bdev,b));
5709         }
5710         pr_cont("\n");
5711
5712         err = do_md_run(mddev);
5713         if (err) {
5714                 pr_warn("md: do_md_run() returned %d\n", err);
5715                 do_md_stop(mddev, 0, NULL);
5716         }
5717 }
5718
5719 /*
5720  * lets try to run arrays based on all disks that have arrived
5721  * until now. (those are in pending_raid_disks)
5722  *
5723  * the method: pick the first pending disk, collect all disks with
5724  * the same UUID, remove all from the pending list and put them into
5725  * the 'same_array' list. Then order this list based on superblock
5726  * update time (freshest comes first), kick out 'old' disks and
5727  * compare superblocks. If everything's fine then run it.
5728  *
5729  * If "unit" is allocated, then bump its reference count
5730  */
5731 static void autorun_devices(int part)
5732 {
5733         struct md_rdev *rdev0, *rdev, *tmp;
5734         struct mddev *mddev;
5735         char b[BDEVNAME_SIZE];
5736
5737         pr_info("md: autorun ...\n");
5738         while (!list_empty(&pending_raid_disks)) {
5739                 int unit;
5740                 dev_t dev;
5741                 LIST_HEAD(candidates);
5742                 rdev0 = list_entry(pending_raid_disks.next,
5743                                          struct md_rdev, same_set);
5744
5745                 pr_debug("md: considering %s ...\n", bdevname(rdev0->bdev,b));
5746                 INIT_LIST_HEAD(&candidates);
5747                 rdev_for_each_list(rdev, tmp, &pending_raid_disks)
5748                         if (super_90_load(rdev, rdev0, 0) >= 0) {
5749                                 pr_debug("md:  adding %s ...\n",
5750                                          bdevname(rdev->bdev,b));
5751                                 list_move(&rdev->same_set, &candidates);
5752                         }
5753                 /*
5754                  * now we have a set of devices, with all of them having
5755                  * mostly sane superblocks. It's time to allocate the
5756                  * mddev.
5757                  */
5758                 if (part) {
5759                         dev = MKDEV(mdp_major,
5760                                     rdev0->preferred_minor << MdpMinorShift);
5761                         unit = MINOR(dev) >> MdpMinorShift;
5762                 } else {
5763                         dev = MKDEV(MD_MAJOR, rdev0->preferred_minor);
5764                         unit = MINOR(dev);
5765                 }
5766                 if (rdev0->preferred_minor != unit) {
5767                         pr_warn("md: unit number in %s is bad: %d\n",
5768                                 bdevname(rdev0->bdev, b), rdev0->preferred_minor);
5769                         break;
5770                 }
5771
5772                 md_probe(dev, NULL, NULL);
5773                 mddev = mddev_find(dev);
5774                 if (!mddev || !mddev->gendisk) {
5775                         if (mddev)
5776                                 mddev_put(mddev);
5777                         break;
5778                 }
5779                 if (mddev_lock(mddev))
5780                         pr_warn("md: %s locked, cannot run\n", mdname(mddev));
5781                 else if (mddev->raid_disks || mddev->major_version
5782                          || !list_empty(&mddev->disks)) {
5783                         pr_warn("md: %s already running, cannot run %s\n",
5784                                 mdname(mddev), bdevname(rdev0->bdev,b));
5785                         mddev_unlock(mddev);
5786                 } else {
5787                         pr_debug("md: created %s\n", mdname(mddev));
5788                         mddev->persistent = 1;
5789                         rdev_for_each_list(rdev, tmp, &candidates) {
5790                                 list_del_init(&rdev->same_set);
5791                                 if (bind_rdev_to_array(rdev, mddev))
5792                                         export_rdev(rdev);
5793                         }
5794                         autorun_array(mddev);
5795                         mddev_unlock(mddev);
5796                 }
5797                 /* on success, candidates will be empty, on error
5798                  * it won't...
5799                  */
5800                 rdev_for_each_list(rdev, tmp, &candidates) {
5801                         list_del_init(&rdev->same_set);
5802                         export_rdev(rdev);
5803                 }
5804                 mddev_put(mddev);
5805         }
5806         pr_info("md: ... autorun DONE.\n");
5807 }
5808 #endif /* !MODULE */
5809
5810 static int get_version(void __user *arg)
5811 {
5812         mdu_version_t ver;
5813
5814         ver.major = MD_MAJOR_VERSION;
5815         ver.minor = MD_MINOR_VERSION;
5816         ver.patchlevel = MD_PATCHLEVEL_VERSION;
5817
5818         if (copy_to_user(arg, &ver, sizeof(ver)))
5819                 return -EFAULT;
5820
5821         return 0;
5822 }
5823
5824 static int get_array_info(struct mddev *mddev, void __user *arg)
5825 {
5826         mdu_array_info_t info;
5827         int nr,working,insync,failed,spare;
5828         struct md_rdev *rdev;
5829
5830         nr = working = insync = failed = spare = 0;
5831         rcu_read_lock();
5832         rdev_for_each_rcu(rdev, mddev) {
5833                 nr++;
5834                 if (test_bit(Faulty, &rdev->flags))
5835                         failed++;
5836                 else {
5837                         working++;
5838                         if (test_bit(In_sync, &rdev->flags))
5839                                 insync++;
5840                         else if (test_bit(Journal, &rdev->flags))
5841                                 /* TODO: add journal count to md_u.h */
5842                                 ;
5843                         else
5844                                 spare++;
5845                 }
5846         }
5847         rcu_read_unlock();
5848
5849         info.major_version = mddev->major_version;
5850         info.minor_version = mddev->minor_version;
5851         info.patch_version = MD_PATCHLEVEL_VERSION;
5852         info.ctime         = clamp_t(time64_t, mddev->ctime, 0, U32_MAX);
5853         info.level         = mddev->level;
5854         info.size          = mddev->dev_sectors / 2;
5855         if (info.size != mddev->dev_sectors / 2) /* overflow */
5856                 info.size = -1;
5857         info.nr_disks      = nr;
5858         info.raid_disks    = mddev->raid_disks;
5859         info.md_minor      = mddev->md_minor;
5860         info.not_persistent= !mddev->persistent;
5861
5862         info.utime         = clamp_t(time64_t, mddev->utime, 0, U32_MAX);
5863         info.state         = 0;
5864         if (mddev->in_sync)
5865                 info.state = (1<<MD_SB_CLEAN);
5866         if (mddev->bitmap && mddev->bitmap_info.offset)
5867                 info.state |= (1<<MD_SB_BITMAP_PRESENT);
5868         if (mddev_is_clustered(mddev))
5869                 info.state |= (1<<MD_SB_CLUSTERED);
5870         info.active_disks  = insync;
5871         info.working_disks = working;
5872         info.failed_disks  = failed;
5873         info.spare_disks   = spare;
5874
5875         info.layout        = mddev->layout;
5876         info.chunk_size    = mddev->chunk_sectors << 9;
5877
5878         if (copy_to_user(arg, &info, sizeof(info)))
5879                 return -EFAULT;
5880
5881         return 0;
5882 }
5883
5884 static int get_bitmap_file(struct mddev *mddev, void __user * arg)
5885 {
5886         mdu_bitmap_file_t *file = NULL; /* too big for stack allocation */
5887         char *ptr;
5888         int err;
5889
5890         file = kzalloc(sizeof(*file), GFP_NOIO);
5891         if (!file)
5892                 return -ENOMEM;
5893
5894         err = 0;
5895         spin_lock(&mddev->lock);
5896         /* bitmap enabled */
5897         if (mddev->bitmap_info.file) {
5898                 ptr = file_path(mddev->bitmap_info.file, file->pathname,
5899                                 sizeof(file->pathname));
5900                 if (IS_ERR(ptr))
5901                         err = PTR_ERR(ptr);
5902                 else
5903                         memmove(file->pathname, ptr,
5904                                 sizeof(file->pathname)-(ptr-file->pathname));
5905         }
5906         spin_unlock(&mddev->lock);
5907
5908         if (err == 0 &&
5909             copy_to_user(arg, file, sizeof(*file)))
5910                 err = -EFAULT;
5911
5912         kfree(file);
5913         return err;
5914 }
5915
5916 static int get_disk_info(struct mddev *mddev, void __user * arg)
5917 {
5918         mdu_disk_info_t info;
5919         struct md_rdev *rdev;
5920
5921         if (copy_from_user(&info, arg, sizeof(info)))
5922                 return -EFAULT;
5923
5924         rcu_read_lock();
5925         rdev = md_find_rdev_nr_rcu(mddev, info.number);
5926         if (rdev) {
5927                 info.major = MAJOR(rdev->bdev->bd_dev);
5928                 info.minor = MINOR(rdev->bdev->bd_dev);
5929                 info.raid_disk = rdev->raid_disk;
5930                 info.state = 0;
5931                 if (test_bit(Faulty, &rdev->flags))
5932                         info.state |= (1<<MD_DISK_FAULTY);
5933                 else if (test_bit(In_sync, &rdev->flags)) {
5934                         info.state |= (1<<MD_DISK_ACTIVE);
5935                         info.state |= (1<<MD_DISK_SYNC);
5936                 }
5937                 if (test_bit(Journal, &rdev->flags))
5938                         info.state |= (1<<MD_DISK_JOURNAL);
5939                 if (test_bit(WriteMostly, &rdev->flags))
5940                         info.state |= (1<<MD_DISK_WRITEMOSTLY);
5941         } else {
5942                 info.major = info.minor = 0;
5943                 info.raid_disk = -1;
5944                 info.state = (1<<MD_DISK_REMOVED);
5945         }
5946         rcu_read_unlock();
5947
5948         if (copy_to_user(arg, &info, sizeof(info)))
5949                 return -EFAULT;
5950
5951         return 0;
5952 }
5953
5954 static int add_new_disk(struct mddev *mddev, mdu_disk_info_t *info)
5955 {
5956         char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
5957         struct md_rdev *rdev;
5958         dev_t dev = MKDEV(info->major,info->minor);
5959
5960         if (mddev_is_clustered(mddev) &&
5961                 !(info->state & ((1 << MD_DISK_CLUSTER_ADD) | (1 << MD_DISK_CANDIDATE)))) {
5962                 pr_warn("%s: Cannot add to clustered mddev.\n",
5963                         mdname(mddev));
5964                 return -EINVAL;
5965         }
5966
5967         if (info->major != MAJOR(dev) || info->minor != MINOR(dev))
5968                 return -EOVERFLOW;
5969
5970         if (!mddev->raid_disks) {
5971                 int err;
5972                 /* expecting a device which has a superblock */
5973                 rdev = md_import_device(dev, mddev->major_version, mddev->minor_version);
5974                 if (IS_ERR(rdev)) {
5975                         pr_warn("md: md_import_device returned %ld\n",
5976                                 PTR_ERR(rdev));
5977                         return PTR_ERR(rdev);
5978                 }
5979                 if (!list_empty(&mddev->disks)) {
5980                         struct md_rdev *rdev0
5981                                 = list_entry(mddev->disks.next,
5982                                              struct md_rdev, same_set);
5983                         err = super_types[mddev->major_version]
5984                                 .load_super(rdev, rdev0, mddev->minor_version);
5985                         if (err < 0) {
5986                                 pr_warn("md: %s has different UUID to %s\n",
5987                                         bdevname(rdev->bdev,b),
5988                                         bdevname(rdev0->bdev,b2));
5989                                 export_rdev(rdev);
5990                                 return -EINVAL;
5991                         }
5992                 }
5993                 err = bind_rdev_to_array(rdev, mddev);
5994                 if (err)
5995                         export_rdev(rdev);
5996                 return err;
5997         }
5998
5999         /*
6000          * add_new_disk can be used once the array is assembled
6001          * to add "hot spares".  They must already have a superblock
6002          * written
6003          */
6004         if (mddev->pers) {
6005                 int err;
6006                 if (!mddev->pers->hot_add_disk) {
6007                         pr_warn("%s: personality does not support diskops!\n",
6008                                 mdname(mddev));
6009                         return -EINVAL;
6010                 }
6011                 if (mddev->persistent)
6012                         rdev = md_import_device(dev, mddev->major_version,
6013                                                 mddev->minor_version);
6014                 else
6015                         rdev = md_import_device(dev, -1, -1);
6016                 if (IS_ERR(rdev)) {
6017                         pr_warn("md: md_import_device returned %ld\n",
6018                                 PTR_ERR(rdev));
6019                         return PTR_ERR(rdev);
6020                 }
6021                 /* set saved_raid_disk if appropriate */
6022                 if (!mddev->persistent) {
6023                         if (info->state & (1<<MD_DISK_SYNC)  &&
6024                             info->raid_disk < mddev->raid_disks) {
6025                                 rdev->raid_disk = info->raid_disk;
6026                                 set_bit(In_sync, &rdev->flags);
6027                                 clear_bit(Bitmap_sync, &rdev->flags);
6028                         } else
6029                                 rdev->raid_disk = -1;
6030                         rdev->saved_raid_disk = rdev->raid_disk;
6031                 } else
6032                         super_types[mddev->major_version].
6033                                 validate_super(mddev, rdev);
6034                 if ((info->state & (1<<MD_DISK_SYNC)) &&
6035                      rdev->raid_disk != info->raid_disk) {
6036                         /* This was a hot-add request, but events doesn't
6037                          * match, so reject it.
6038                          */
6039                         export_rdev(rdev);
6040                         return -EINVAL;
6041                 }
6042
6043                 clear_bit(In_sync, &rdev->flags); /* just to be sure */
6044                 if (info->state & (1<<MD_DISK_WRITEMOSTLY))
6045                         set_bit(WriteMostly, &rdev->flags);
6046                 else
6047                         clear_bit(WriteMostly, &rdev->flags);
6048
6049                 if (info->state & (1<<MD_DISK_JOURNAL)) {
6050                         struct md_rdev *rdev2;
6051                         bool has_journal = false;
6052
6053                         /* make sure no existing journal disk */
6054                         rdev_for_each(rdev2, mddev) {
6055                                 if (test_bit(Journal, &rdev2->flags)) {
6056                                         has_journal = true;
6057                                         break;
6058                                 }
6059                         }
6060                         if (has_journal) {
6061                                 export_rdev(rdev);
6062                                 return -EBUSY;
6063                         }
6064                         set_bit(Journal, &rdev->flags);
6065                 }
6066                 /*
6067                  * check whether the device shows up in other nodes
6068                  */
6069                 if (mddev_is_clustered(mddev)) {
6070                         if (info->state & (1 << MD_DISK_CANDIDATE))
6071                                 set_bit(Candidate, &rdev->flags);
6072                         else if (info->state & (1 << MD_DISK_CLUSTER_ADD)) {
6073                                 /* --add initiated by this node */
6074                                 err = md_cluster_ops->add_new_disk(mddev, rdev);
6075                                 if (err) {
6076                                         export_rdev(rdev);
6077                                         return err;
6078                                 }
6079                         }
6080                 }
6081
6082                 rdev->raid_disk = -1;
6083                 err = bind_rdev_to_array(rdev, mddev);
6084
6085                 if (err)
6086                         export_rdev(rdev);
6087
6088                 if (mddev_is_clustered(mddev)) {
6089                         if (info->state & (1 << MD_DISK_CANDIDATE)) {
6090                                 if (!err) {
6091                                         err = md_cluster_ops->new_disk_ack(mddev,
6092                                                 err == 0);
6093                                         if (err)
6094                                                 md_kick_rdev_from_array(rdev);
6095                                 }
6096                         } else {
6097                                 if (err)
6098                                         md_cluster_ops->add_new_disk_cancel(mddev);
6099                                 else
6100                                         err = add_bound_rdev(rdev);
6101                         }
6102
6103                 } else if (!err)
6104                         err = add_bound_rdev(rdev);
6105
6106                 return err;
6107         }
6108
6109         /* otherwise, add_new_disk is only allowed
6110          * for major_version==0 superblocks
6111          */
6112         if (mddev->major_version != 0) {
6113                 pr_warn("%s: ADD_NEW_DISK not supported\n", mdname(mddev));
6114                 return -EINVAL;
6115         }
6116
6117         if (!(info->state & (1<<MD_DISK_FAULTY))) {
6118                 int err;
6119                 rdev = md_import_device(dev, -1, 0);
6120                 if (IS_ERR(rdev)) {
6121                         pr_warn("md: error, md_import_device() returned %ld\n",
6122                                 PTR_ERR(rdev));
6123                         return PTR_ERR(rdev);
6124                 }
6125                 rdev->desc_nr = info->number;
6126                 if (info->raid_disk < mddev->raid_disks)
6127                         rdev->raid_disk = info->raid_disk;
6128                 else
6129                         rdev->raid_disk = -1;
6130
6131                 if (rdev->raid_disk < mddev->raid_disks)
6132                         if (info->state & (1<<MD_DISK_SYNC))
6133                                 set_bit(In_sync, &rdev->flags);
6134
6135                 if (info->state & (1<<MD_DISK_WRITEMOSTLY))
6136                         set_bit(WriteMostly, &rdev->flags);
6137
6138                 if (!mddev->persistent) {
6139                         pr_debug("md: nonpersistent superblock ...\n");
6140                         rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512;
6141                 } else
6142                         rdev->sb_start = calc_dev_sboffset(rdev);
6143                 rdev->sectors = rdev->sb_start;
6144
6145                 err = bind_rdev_to_array(rdev, mddev);
6146                 if (err) {
6147                         export_rdev(rdev);
6148                         return err;
6149                 }
6150         }
6151
6152         return 0;
6153 }
6154
6155 static int hot_remove_disk(struct mddev *mddev, dev_t dev)
6156 {
6157         char b[BDEVNAME_SIZE];
6158         struct md_rdev *rdev;
6159
6160         rdev = find_rdev(mddev, dev);
6161         if (!rdev)
6162                 return -ENXIO;
6163
6164         if (rdev->raid_disk < 0)
6165                 goto kick_rdev;
6166
6167         clear_bit(Blocked, &rdev->flags);
6168         remove_and_add_spares(mddev, rdev);
6169
6170         if (rdev->raid_disk >= 0)
6171                 goto busy;
6172
6173 kick_rdev:
6174         if (mddev_is_clustered(mddev))
6175                 md_cluster_ops->remove_disk(mddev, rdev);
6176
6177         md_kick_rdev_from_array(rdev);
6178         md_update_sb(mddev, 1);
6179         md_new_event(mddev);
6180
6181         return 0;
6182 busy:
6183         pr_debug("md: cannot remove active disk %s from %s ...\n",
6184                  bdevname(rdev->bdev,b), mdname(mddev));
6185         return -EBUSY;
6186 }
6187
6188 static int hot_add_disk(struct mddev *mddev, dev_t dev)
6189 {
6190         char b[BDEVNAME_SIZE];
6191         int err;
6192         struct md_rdev *rdev;
6193
6194         if (!mddev->pers)
6195                 return -ENODEV;
6196
6197         if (mddev->major_version != 0) {
6198                 pr_warn("%s: HOT_ADD may only be used with version-0 superblocks.\n",
6199                         mdname(mddev));
6200                 return -EINVAL;
6201         }
6202         if (!mddev->pers->hot_add_disk) {
6203                 pr_warn("%s: personality does not support diskops!\n",
6204                         mdname(mddev));
6205                 return -EINVAL;
6206         }
6207
6208         rdev = md_import_device(dev, -1, 0);
6209         if (IS_ERR(rdev)) {
6210                 pr_warn("md: error, md_import_device() returned %ld\n",
6211                         PTR_ERR(rdev));
6212                 return -EINVAL;
6213         }
6214
6215         if (mddev->persistent)
6216                 rdev->sb_start = calc_dev_sboffset(rdev);
6217         else
6218                 rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512;
6219
6220         rdev->sectors = rdev->sb_start;
6221
6222         if (test_bit(Faulty, &rdev->flags)) {
6223                 pr_warn("md: can not hot-add faulty %s disk to %s!\n",
6224                         bdevname(rdev->bdev,b), mdname(mddev));
6225                 err = -EINVAL;
6226                 goto abort_export;
6227         }
6228
6229         clear_bit(In_sync, &rdev->flags);
6230         rdev->desc_nr = -1;
6231         rdev->saved_raid_disk = -1;
6232         err = bind_rdev_to_array(rdev, mddev);
6233         if (err)
6234                 goto abort_export;
6235
6236         /*
6237          * The rest should better be atomic, we can have disk failures
6238          * noticed in interrupt contexts ...
6239          */
6240
6241         rdev->raid_disk = -1;
6242
6243         md_update_sb(mddev, 1);
6244         /*
6245          * Kick recovery, maybe this spare has to be added to the
6246          * array immediately.
6247          */
6248         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
6249         md_wakeup_thread(mddev->thread);
6250         md_new_event(mddev);
6251         return 0;
6252
6253 abort_export:
6254         export_rdev(rdev);
6255         return err;
6256 }
6257
6258 static int set_bitmap_file(struct mddev *mddev, int fd)
6259 {
6260         int err = 0;
6261
6262         if (mddev->pers) {
6263                 if (!mddev->pers->quiesce || !mddev->thread)
6264                         return -EBUSY;
6265                 if (mddev->recovery || mddev->sync_thread)
6266                         return -EBUSY;
6267                 /* we should be able to change the bitmap.. */
6268         }
6269
6270         if (fd >= 0) {
6271                 struct inode *inode;
6272                 struct file *f;
6273
6274                 if (mddev->bitmap || mddev->bitmap_info.file)
6275                         return -EEXIST; /* cannot add when bitmap is present */
6276                 f = fget(fd);
6277
6278                 if (f == NULL) {
6279                         pr_warn("%s: error: failed to get bitmap file\n",
6280                                 mdname(mddev));
6281                         return -EBADF;
6282                 }
6283
6284                 inode = f->f_mapping->host;
6285                 if (!S_ISREG(inode->i_mode)) {
6286                         pr_warn("%s: error: bitmap file must be a regular file\n",
6287                                 mdname(mddev));
6288                         err = -EBADF;
6289                 } else if (!(f->f_mode & FMODE_WRITE)) {
6290                         pr_warn("%s: error: bitmap file must open for write\n",
6291                                 mdname(mddev));
6292                         err = -EBADF;
6293                 } else if (atomic_read(&inode->i_writecount) != 1) {
6294                         pr_warn("%s: error: bitmap file is already in use\n",
6295                                 mdname(mddev));
6296                         err = -EBUSY;
6297                 }
6298                 if (err) {
6299                         fput(f);
6300                         return err;
6301                 }
6302                 mddev->bitmap_info.file = f;
6303                 mddev->bitmap_info.offset = 0; /* file overrides offset */
6304         } else if (mddev->bitmap == NULL)
6305                 return -ENOENT; /* cannot remove what isn't there */
6306         err = 0;
6307         if (mddev->pers) {
6308                 mddev->pers->quiesce(mddev, 1);
6309                 if (fd >= 0) {
6310                         struct bitmap *bitmap;
6311
6312                         bitmap = bitmap_create(mddev, -1);
6313                         if (!IS_ERR(bitmap)) {
6314                                 mddev->bitmap = bitmap;
6315                                 err = bitmap_load(mddev);
6316                         } else
6317                                 err = PTR_ERR(bitmap);
6318                 }
6319                 if (fd < 0 || err) {
6320                         bitmap_destroy(mddev);
6321                         fd = -1; /* make sure to put the file */
6322                 }
6323                 mddev->pers->quiesce(mddev, 0);
6324         }
6325         if (fd < 0) {
6326                 struct file *f = mddev->bitmap_info.file;
6327                 if (f) {
6328                         spin_lock(&mddev->lock);
6329                         mddev->bitmap_info.file = NULL;
6330                         spin_unlock(&mddev->lock);
6331                         fput(f);
6332                 }
6333         }
6334
6335         return err;
6336 }
6337
6338 /*
6339  * set_array_info is used two different ways
6340  * The original usage is when creating a new array.
6341  * In this usage, raid_disks is > 0 and it together with
6342  *  level, size, not_persistent,layout,chunksize determine the
6343  *  shape of the array.
6344  *  This will always create an array with a type-0.90.0 superblock.
6345  * The newer usage is when assembling an array.
6346  *  In this case raid_disks will be 0, and the major_version field is
6347  *  use to determine which style super-blocks are to be found on the devices.
6348  *  The minor and patch _version numbers are also kept incase the
6349  *  super_block handler wishes to interpret them.
6350  */
6351 static int set_array_info(struct mddev *mddev, mdu_array_info_t *info)
6352 {
6353
6354         if (info->raid_disks == 0) {
6355                 /* just setting version number for superblock loading */
6356                 if (info->major_version < 0 ||
6357                     info->major_version >= ARRAY_SIZE(super_types) ||
6358                     super_types[info->major_version].name == NULL) {
6359                         /* maybe try to auto-load a module? */
6360                         pr_warn("md: superblock version %d not known\n",
6361                                 info->major_version);
6362                         return -EINVAL;
6363                 }
6364                 mddev->major_version = info->major_version;
6365                 mddev->minor_version = info->minor_version;
6366                 mddev->patch_version = info->patch_version;
6367                 mddev->persistent = !info->not_persistent;
6368                 /* ensure mddev_put doesn't delete this now that there
6369                  * is some minimal configuration.
6370                  */
6371                 mddev->ctime         = ktime_get_real_seconds();
6372                 return 0;
6373         }
6374         mddev->major_version = MD_MAJOR_VERSION;
6375         mddev->minor_version = MD_MINOR_VERSION;
6376         mddev->patch_version = MD_PATCHLEVEL_VERSION;
6377         mddev->ctime         = ktime_get_real_seconds();
6378
6379         mddev->level         = info->level;
6380         mddev->clevel[0]     = 0;
6381         mddev->dev_sectors   = 2 * (sector_t)info->size;
6382         mddev->raid_disks    = info->raid_disks;
6383         /* don't set md_minor, it is determined by which /dev/md* was
6384          * openned
6385          */
6386         if (info->state & (1<<MD_SB_CLEAN))
6387                 mddev->recovery_cp = MaxSector;
6388         else
6389                 mddev->recovery_cp = 0;
6390         mddev->persistent    = ! info->not_persistent;
6391         mddev->external      = 0;
6392
6393         mddev->layout        = info->layout;
6394         mddev->chunk_sectors = info->chunk_size >> 9;
6395
6396         mddev->max_disks     = MD_SB_DISKS;
6397
6398         if (mddev->persistent)
6399                 mddev->flags         = 0;
6400         set_bit(MD_CHANGE_DEVS, &mddev->flags);
6401
6402         mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9;
6403         mddev->bitmap_info.default_space = 64*2 - (MD_SB_BYTES >> 9);
6404         mddev->bitmap_info.offset = 0;
6405
6406         mddev->reshape_position = MaxSector;
6407
6408         /*
6409          * Generate a 128 bit UUID
6410          */
6411         get_random_bytes(mddev->uuid, 16);
6412
6413         mddev->new_level = mddev->level;
6414         mddev->new_chunk_sectors = mddev->chunk_sectors;
6415         mddev->new_layout = mddev->layout;
6416         mddev->delta_disks = 0;
6417         mddev->reshape_backwards = 0;
6418
6419         return 0;
6420 }
6421
6422 void md_set_array_sectors(struct mddev *mddev, sector_t array_sectors)
6423 {
6424         WARN(!mddev_is_locked(mddev), "%s: unlocked mddev!\n", __func__);
6425
6426         if (mddev->external_size)
6427                 return;
6428
6429         mddev->array_sectors = array_sectors;
6430 }
6431 EXPORT_SYMBOL(md_set_array_sectors);
6432
6433 static int update_size(struct mddev *mddev, sector_t num_sectors)
6434 {
6435         struct md_rdev *rdev;
6436         int rv;
6437         int fit = (num_sectors == 0);
6438
6439         /* cluster raid doesn't support update size */
6440         if (mddev_is_clustered(mddev))
6441                 return -EINVAL;
6442
6443         if (mddev->pers->resize == NULL)
6444                 return -EINVAL;
6445         /* The "num_sectors" is the number of sectors of each device that
6446          * is used.  This can only make sense for arrays with redundancy.
6447          * linear and raid0 always use whatever space is available. We can only
6448          * consider changing this number if no resync or reconstruction is
6449          * happening, and if the new size is acceptable. It must fit before the
6450          * sb_start or, if that is <data_offset, it must fit before the size
6451          * of each device.  If num_sectors is zero, we find the largest size
6452          * that fits.
6453          */
6454         if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
6455             mddev->sync_thread)
6456                 return -EBUSY;
6457         if (mddev->ro)
6458                 return -EROFS;
6459
6460         rdev_for_each(rdev, mddev) {
6461                 sector_t avail = rdev->sectors;
6462
6463                 if (fit && (num_sectors == 0 || num_sectors > avail))
6464                         num_sectors = avail;
6465                 if (avail < num_sectors)
6466                         return -ENOSPC;
6467         }
6468         rv = mddev->pers->resize(mddev, num_sectors);
6469         if (!rv)
6470                 revalidate_disk(mddev->gendisk);
6471         return rv;
6472 }
6473
6474 static int update_raid_disks(struct mddev *mddev, int raid_disks)
6475 {
6476         int rv;
6477         struct md_rdev *rdev;
6478         /* change the number of raid disks */
6479         if (mddev->pers->check_reshape == NULL)
6480                 return -EINVAL;
6481         if (mddev->ro)
6482                 return -EROFS;
6483         if (raid_disks <= 0 ||
6484             (mddev->max_disks && raid_disks >= mddev->max_disks))
6485                 return -EINVAL;
6486         if (mddev->sync_thread ||
6487             test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
6488             mddev->reshape_position != MaxSector)
6489                 return -EBUSY;
6490
6491         rdev_for_each(rdev, mddev) {
6492                 if (mddev->raid_disks < raid_disks &&
6493                     rdev->data_offset < rdev->new_data_offset)
6494                         return -EINVAL;
6495                 if (mddev->raid_disks > raid_disks &&
6496                     rdev->data_offset > rdev->new_data_offset)
6497                         return -EINVAL;
6498         }
6499
6500         mddev->delta_disks = raid_disks - mddev->raid_disks;
6501         if (mddev->delta_disks < 0)
6502                 mddev->reshape_backwards = 1;
6503         else if (mddev->delta_disks > 0)
6504                 mddev->reshape_backwards = 0;
6505
6506         rv = mddev->pers->check_reshape(mddev);
6507         if (rv < 0) {
6508                 mddev->delta_disks = 0;
6509                 mddev->reshape_backwards = 0;
6510         }
6511         return rv;
6512 }
6513
6514 /*
6515  * update_array_info is used to change the configuration of an
6516  * on-line array.
6517  * The version, ctime,level,size,raid_disks,not_persistent, layout,chunk_size
6518  * fields in the info are checked against the array.
6519  * Any differences that cannot be handled will cause an error.
6520  * Normally, only one change can be managed at a time.
6521  */
6522 static int update_array_info(struct mddev *mddev, mdu_array_info_t *info)
6523 {
6524         int rv = 0;
6525         int cnt = 0;
6526         int state = 0;
6527
6528         /* calculate expected state,ignoring low bits */
6529         if (mddev->bitmap && mddev->bitmap_info.offset)
6530                 state |= (1 << MD_SB_BITMAP_PRESENT);
6531
6532         if (mddev->major_version != info->major_version ||
6533             mddev->minor_version != info->minor_version ||
6534 /*          mddev->patch_version != info->patch_version || */
6535             mddev->ctime         != info->ctime         ||
6536             mddev->level         != info->level         ||
6537 /*          mddev->layout        != info->layout        || */
6538             mddev->persistent    != !info->not_persistent ||
6539             mddev->chunk_sectors != info->chunk_size >> 9 ||
6540             /* ignore bottom 8 bits of state, and allow SB_BITMAP_PRESENT to change */
6541             ((state^info->state) & 0xfffffe00)
6542                 )
6543                 return -EINVAL;
6544         /* Check there is only one change */
6545         if (info->size >= 0 && mddev->dev_sectors / 2 != info->size)
6546                 cnt++;
6547         if (mddev->raid_disks != info->raid_disks)
6548                 cnt++;
6549         if (mddev->layout != info->layout)
6550                 cnt++;
6551         if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT))
6552                 cnt++;
6553         if (cnt == 0)
6554                 return 0;
6555         if (cnt > 1)
6556                 return -EINVAL;
6557
6558         if (mddev->layout != info->layout) {
6559                 /* Change layout
6560                  * we don't need to do anything at the md level, the
6561                  * personality will take care of it all.
6562                  */
6563                 if (mddev->pers->check_reshape == NULL)
6564                         return -EINVAL;
6565                 else {
6566                         mddev->new_layout = info->layout;
6567                         rv = mddev->pers->check_reshape(mddev);
6568                         if (rv)
6569                                 mddev->new_layout = mddev->layout;
6570                         return rv;
6571                 }
6572         }
6573         if (info->size >= 0 && mddev->dev_sectors / 2 != info->size)
6574                 rv = update_size(mddev, (sector_t)info->size * 2);
6575
6576         if (mddev->raid_disks    != info->raid_disks)
6577                 rv = update_raid_disks(mddev, info->raid_disks);
6578
6579         if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) {
6580                 if (mddev->pers->quiesce == NULL || mddev->thread == NULL) {
6581                         rv = -EINVAL;
6582                         goto err;
6583                 }
6584                 if (mddev->recovery || mddev->sync_thread) {
6585                         rv = -EBUSY;
6586                         goto err;
6587                 }
6588                 if (info->state & (1<<MD_SB_BITMAP_PRESENT)) {
6589                         struct bitmap *bitmap;
6590                         /* add the bitmap */
6591                         if (mddev->bitmap) {
6592                                 rv = -EEXIST;
6593                                 goto err;
6594                         }
6595                         if (mddev->bitmap_info.default_offset == 0) {
6596                                 rv = -EINVAL;
6597                                 goto err;
6598                         }
6599                         mddev->bitmap_info.offset =
6600                                 mddev->bitmap_info.default_offset;
6601                         mddev->bitmap_info.space =
6602                                 mddev->bitmap_info.default_space;
6603                         mddev->pers->quiesce(mddev, 1);
6604                         bitmap = bitmap_create(mddev, -1);
6605                         if (!IS_ERR(bitmap)) {
6606                                 mddev->bitmap = bitmap;
6607                                 rv = bitmap_load(mddev);
6608                         } else
6609                                 rv = PTR_ERR(bitmap);
6610                         if (rv)
6611                                 bitmap_destroy(mddev);
6612                         mddev->pers->quiesce(mddev, 0);
6613                 } else {
6614                         /* remove the bitmap */
6615                         if (!mddev->bitmap) {
6616                                 rv = -ENOENT;
6617                                 goto err;
6618                         }
6619                         if (mddev->bitmap->storage.file) {
6620                                 rv = -EINVAL;
6621                                 goto err;
6622                         }
6623                         if (mddev->bitmap_info.nodes) {
6624                                 /* hold PW on all the bitmap lock */
6625                                 if (md_cluster_ops->lock_all_bitmaps(mddev) <= 0) {
6626                                         pr_warn("md: can't change bitmap to none since the array is in use by more than one node\n");
6627                                         rv = -EPERM;
6628                                         md_cluster_ops->unlock_all_bitmaps(mddev);
6629                                         goto err;
6630                                 }
6631
6632                                 mddev->bitmap_info.nodes = 0;
6633                                 md_cluster_ops->leave(mddev);
6634                         }
6635                         mddev->pers->quiesce(mddev, 1);
6636                         bitmap_destroy(mddev);
6637                         mddev->pers->quiesce(mddev, 0);
6638                         mddev->bitmap_info.offset = 0;
6639                 }
6640         }
6641         md_update_sb(mddev, 1);
6642         return rv;
6643 err:
6644         return rv;
6645 }
6646
6647 static int set_disk_faulty(struct mddev *mddev, dev_t dev)
6648 {
6649         struct md_rdev *rdev;
6650         int err = 0;
6651
6652         if (mddev->pers == NULL)
6653                 return -ENODEV;
6654
6655         rcu_read_lock();
6656         rdev = find_rdev_rcu(mddev, dev);
6657         if (!rdev)
6658                 err =  -ENODEV;
6659         else {
6660                 md_error(mddev, rdev);
6661                 if (!test_bit(Faulty, &rdev->flags))
6662                         err = -EBUSY;
6663         }
6664         rcu_read_unlock();
6665         return err;
6666 }
6667
6668 /*
6669  * We have a problem here : there is no easy way to give a CHS
6670  * virtual geometry. We currently pretend that we have a 2 heads
6671  * 4 sectors (with a BIG number of cylinders...). This drives
6672  * dosfs just mad... ;-)
6673  */
6674 static int md_getgeo(struct block_device *bdev, struct hd_geometry *geo)
6675 {
6676         struct mddev *mddev = bdev->bd_disk->private_data;
6677
6678         geo->heads = 2;
6679         geo->sectors = 4;
6680         geo->cylinders = mddev->array_sectors / 8;
6681         return 0;
6682 }
6683
6684 static inline bool md_ioctl_valid(unsigned int cmd)
6685 {
6686         switch (cmd) {
6687         case ADD_NEW_DISK:
6688         case BLKROSET:
6689         case GET_ARRAY_INFO:
6690         case GET_BITMAP_FILE:
6691         case GET_DISK_INFO:
6692         case HOT_ADD_DISK:
6693         case HOT_REMOVE_DISK:
6694         case RAID_AUTORUN:
6695         case RAID_VERSION:
6696         case RESTART_ARRAY_RW:
6697         case RUN_ARRAY:
6698         case SET_ARRAY_INFO:
6699         case SET_BITMAP_FILE:
6700         case SET_DISK_FAULTY:
6701         case STOP_ARRAY:
6702         case STOP_ARRAY_RO:
6703         case CLUSTERED_DISK_NACK:
6704                 return true;
6705         default:
6706                 return false;
6707         }
6708 }
6709
6710 static int md_ioctl(struct block_device *bdev, fmode_t mode,
6711                         unsigned int cmd, unsigned long arg)
6712 {
6713         int err = 0;
6714         void __user *argp = (void __user *)arg;
6715         struct mddev *mddev = NULL;
6716         int ro;
6717
6718         if (!md_ioctl_valid(cmd))
6719                 return -ENOTTY;
6720
6721         switch (cmd) {
6722         case RAID_VERSION:
6723         case GET_ARRAY_INFO:
6724         case GET_DISK_INFO:
6725                 break;
6726         default:
6727                 if (!capable(CAP_SYS_ADMIN))
6728                         return -EACCES;
6729         }
6730
6731         /*
6732          * Commands dealing with the RAID driver but not any
6733          * particular array:
6734          */
6735         switch (cmd) {
6736         case RAID_VERSION:
6737                 err = get_version(argp);
6738                 goto out;
6739
6740 #ifndef MODULE
6741         case RAID_AUTORUN:
6742                 err = 0;
6743                 autostart_arrays(arg);
6744                 goto out;
6745 #endif
6746         default:;
6747         }
6748
6749         /*
6750          * Commands creating/starting a new array:
6751          */
6752
6753         mddev = bdev->bd_disk->private_data;
6754
6755         if (!mddev) {
6756                 BUG();
6757                 goto out;
6758         }
6759
6760         /* Some actions do not requires the mutex */
6761         switch (cmd) {
6762         case GET_ARRAY_INFO:
6763                 if (!mddev->raid_disks && !mddev->external)
6764                         err = -ENODEV;
6765                 else
6766                         err = get_array_info(mddev, argp);
6767                 goto out;
6768
6769         case GET_DISK_INFO:
6770                 if (!mddev->raid_disks && !mddev->external)
6771                         err = -ENODEV;
6772                 else
6773                         err = get_disk_info(mddev, argp);
6774                 goto out;
6775
6776         case SET_DISK_FAULTY:
6777                 err = set_disk_faulty(mddev, new_decode_dev(arg));
6778                 goto out;
6779
6780         case GET_BITMAP_FILE:
6781                 err = get_bitmap_file(mddev, argp);
6782                 goto out;
6783
6784         }
6785
6786         if (cmd == ADD_NEW_DISK)
6787                 /* need to ensure md_delayed_delete() has completed */
6788                 flush_workqueue(md_misc_wq);
6789
6790         if (cmd == HOT_REMOVE_DISK)
6791                 /* need to ensure recovery thread has run */
6792                 wait_event_interruptible_timeout(mddev->sb_wait,
6793                                                  !test_bit(MD_RECOVERY_NEEDED,
6794                                                            &mddev->flags),
6795                                                  msecs_to_jiffies(5000));
6796         if (cmd == STOP_ARRAY || cmd == STOP_ARRAY_RO) {
6797                 /* Need to flush page cache, and ensure no-one else opens
6798                  * and writes
6799                  */
6800                 mutex_lock(&mddev->open_mutex);
6801                 if (mddev->pers && atomic_read(&mddev->openers) > 1) {
6802                         mutex_unlock(&mddev->open_mutex);
6803                         err = -EBUSY;
6804                         goto out;
6805                 }
6806                 set_bit(MD_CLOSING, &mddev->flags);
6807                 mutex_unlock(&mddev->open_mutex);
6808                 sync_blockdev(bdev);
6809         }
6810         err = mddev_lock(mddev);
6811         if (err) {
6812                 pr_debug("md: ioctl lock interrupted, reason %d, cmd %d\n",
6813                          err, cmd);
6814                 goto out;
6815         }
6816
6817         if (cmd == SET_ARRAY_INFO) {
6818                 mdu_array_info_t info;
6819                 if (!arg)
6820                         memset(&info, 0, sizeof(info));
6821                 else if (copy_from_user(&info, argp, sizeof(info))) {
6822                         err = -EFAULT;
6823                         goto unlock;
6824                 }
6825                 if (mddev->pers) {
6826                         err = update_array_info(mddev, &info);
6827                         if (err) {
6828                                 pr_warn("md: couldn't update array info. %d\n", err);
6829                                 goto unlock;
6830                         }
6831                         goto unlock;
6832                 }
6833                 if (!list_empty(&mddev->disks)) {
6834                         pr_warn("md: array %s already has disks!\n", mdname(mddev));
6835                         err = -EBUSY;
6836                         goto unlock;
6837                 }
6838                 if (mddev->raid_disks) {
6839                         pr_warn("md: array %s already initialised!\n", mdname(mddev));
6840                         err = -EBUSY;
6841                         goto unlock;
6842                 }
6843                 err = set_array_info(mddev, &info);
6844                 if (err) {
6845                         pr_warn("md: couldn't set array info. %d\n", err);
6846                         goto unlock;
6847                 }
6848                 goto unlock;
6849         }
6850
6851         /*
6852          * Commands querying/configuring an existing array:
6853          */
6854         /* if we are not initialised yet, only ADD_NEW_DISK, STOP_ARRAY,
6855          * RUN_ARRAY, and GET_ and SET_BITMAP_FILE are allowed */
6856         if ((!mddev->raid_disks && !mddev->external)
6857             && cmd != ADD_NEW_DISK && cmd != STOP_ARRAY
6858             && cmd != RUN_ARRAY && cmd != SET_BITMAP_FILE
6859             && cmd != GET_BITMAP_FILE) {
6860                 err = -ENODEV;
6861                 goto unlock;
6862         }
6863
6864         /*
6865          * Commands even a read-only array can execute:
6866          */
6867         switch (cmd) {
6868         case RESTART_ARRAY_RW:
6869                 err = restart_array(mddev);
6870                 goto unlock;
6871
6872         case STOP_ARRAY:
6873                 err = do_md_stop(mddev, 0, bdev);
6874                 goto unlock;
6875
6876         case STOP_ARRAY_RO:
6877                 err = md_set_readonly(mddev, bdev);
6878                 goto unlock;
6879
6880         case HOT_REMOVE_DISK:
6881                 err = hot_remove_disk(mddev, new_decode_dev(arg));
6882                 goto unlock;
6883
6884         case ADD_NEW_DISK:
6885                 /* We can support ADD_NEW_DISK on read-only arrays
6886                  * only if we are re-adding a preexisting device.
6887                  * So require mddev->pers and MD_DISK_SYNC.
6888                  */
6889                 if (mddev->pers) {
6890                         mdu_disk_info_t info;
6891                         if (copy_from_user(&info, argp, sizeof(info)))
6892                                 err = -EFAULT;
6893                         else if (!(info.state & (1<<MD_DISK_SYNC)))
6894                                 /* Need to clear read-only for this */
6895                                 break;
6896                         else
6897                                 err = add_new_disk(mddev, &info);
6898                         goto unlock;
6899                 }
6900                 break;
6901
6902         case BLKROSET:
6903                 if (get_user(ro, (int __user *)(arg))) {
6904                         err = -EFAULT;
6905                         goto unlock;
6906                 }
6907                 err = -EINVAL;
6908
6909                 /* if the bdev is going readonly the value of mddev->ro
6910                  * does not matter, no writes are coming
6911                  */
6912                 if (ro)
6913                         goto unlock;
6914
6915                 /* are we are already prepared for writes? */
6916                 if (mddev->ro != 1)
6917                         goto unlock;
6918
6919                 /* transitioning to readauto need only happen for
6920                  * arrays that call md_write_start
6921                  */
6922                 if (mddev->pers) {
6923                         err = restart_array(mddev);
6924                         if (err == 0) {
6925                                 mddev->ro = 2;
6926                                 set_disk_ro(mddev->gendisk, 0);
6927                         }
6928                 }
6929                 goto unlock;
6930         }
6931
6932         /*
6933          * The remaining ioctls are changing the state of the
6934          * superblock, so we do not allow them on read-only arrays.
6935          */
6936         if (mddev->ro && mddev->pers) {
6937                 if (mddev->ro == 2) {
6938                         mddev->ro = 0;
6939                         sysfs_notify_dirent_safe(mddev->sysfs_state);
6940                         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
6941                         /* mddev_unlock will wake thread */
6942                         /* If a device failed while we were read-only, we
6943                          * need to make sure the metadata is updated now.
6944                          */
6945                         if (test_bit(MD_CHANGE_DEVS, &mddev->flags)) {
6946                                 mddev_unlock(mddev);
6947                                 wait_event(mddev->sb_wait,
6948                                            !test_bit(MD_CHANGE_DEVS, &mddev->flags) &&
6949                                            !test_bit(MD_CHANGE_PENDING, &mddev->flags));
6950                                 mddev_lock_nointr(mddev);
6951                         }
6952                 } else {
6953                         err = -EROFS;
6954                         goto unlock;
6955                 }
6956         }
6957
6958         switch (cmd) {
6959         case ADD_NEW_DISK:
6960         {
6961                 mdu_disk_info_t info;
6962                 if (copy_from_user(&info, argp, sizeof(info)))
6963                         err = -EFAULT;
6964                 else
6965                         err = add_new_disk(mddev, &info);
6966                 goto unlock;
6967         }
6968
6969         case CLUSTERED_DISK_NACK:
6970                 if (mddev_is_clustered(mddev))
6971                         md_cluster_ops->new_disk_ack(mddev, false);
6972                 else
6973                         err = -EINVAL;
6974                 goto unlock;
6975
6976         case HOT_ADD_DISK:
6977                 err = hot_add_disk(mddev, new_decode_dev(arg));
6978                 goto unlock;
6979
6980         case RUN_ARRAY:
6981                 err = do_md_run(mddev);
6982                 goto unlock;
6983
6984         case SET_BITMAP_FILE:
6985                 err = set_bitmap_file(mddev, (int)arg);
6986                 goto unlock;
6987
6988         default:
6989                 err = -EINVAL;
6990                 goto unlock;
6991         }
6992
6993 unlock:
6994         if (mddev->hold_active == UNTIL_IOCTL &&
6995             err != -EINVAL)
6996                 mddev->hold_active = 0;
6997         mddev_unlock(mddev);
6998 out:
6999         return err;
7000 }
7001 #ifdef CONFIG_COMPAT
7002 static int md_compat_ioctl(struct block_device *bdev, fmode_t mode,
7003                     unsigned int cmd, unsigned long arg)
7004 {
7005         switch (cmd) {
7006         case HOT_REMOVE_DISK:
7007         case HOT_ADD_DISK:
7008         case SET_DISK_FAULTY:
7009         case SET_BITMAP_FILE:
7010                 /* These take in integer arg, do not convert */
7011                 break;
7012         default:
7013                 arg = (unsigned long)compat_ptr(arg);
7014                 break;
7015         }
7016
7017         return md_ioctl(bdev, mode, cmd, arg);
7018 }
7019 #endif /* CONFIG_COMPAT */
7020
7021 static int md_open(struct block_device *bdev, fmode_t mode)
7022 {
7023         /*
7024          * Succeed if we can lock the mddev, which confirms that
7025          * it isn't being stopped right now.
7026          */
7027         struct mddev *mddev = mddev_find(bdev->bd_dev);
7028         int err;
7029
7030         if (!mddev)
7031                 return -ENODEV;
7032
7033         if (mddev->gendisk != bdev->bd_disk) {
7034                 /* we are racing with mddev_put which is discarding this
7035                  * bd_disk.
7036                  */
7037                 mddev_put(mddev);
7038                 /* Wait until bdev->bd_disk is definitely gone */
7039                 flush_workqueue(md_misc_wq);
7040                 /* Then retry the open from the top */
7041                 return -ERESTARTSYS;
7042         }
7043         BUG_ON(mddev != bdev->bd_disk->private_data);
7044
7045         if ((err = mutex_lock_interruptible(&mddev->open_mutex)))
7046                 goto out;
7047
7048         if (test_bit(MD_CLOSING, &mddev->flags)) {
7049                 mutex_unlock(&mddev->open_mutex);
7050                 return -ENODEV;
7051         }
7052
7053         err = 0;
7054         atomic_inc(&mddev->openers);
7055         mutex_unlock(&mddev->open_mutex);
7056
7057         check_disk_change(bdev);
7058  out:
7059         return err;
7060 }
7061
7062 static void md_release(struct gendisk *disk, fmode_t mode)
7063 {
7064         struct mddev *mddev = disk->private_data;
7065
7066         BUG_ON(!mddev);
7067         atomic_dec(&mddev->openers);
7068         mddev_put(mddev);
7069 }
7070
7071 static int md_media_changed(struct gendisk *disk)
7072 {
7073         struct mddev *mddev = disk->private_data;
7074
7075         return mddev->changed;
7076 }
7077
7078 static int md_revalidate(struct gendisk *disk)
7079 {
7080         struct mddev *mddev = disk->private_data;
7081
7082         mddev->changed = 0;
7083         return 0;
7084 }
7085 static const struct block_device_operations md_fops =
7086 {
7087         .owner          = THIS_MODULE,
7088         .open           = md_open,
7089         .release        = md_release,
7090         .ioctl          = md_ioctl,
7091 #ifdef CONFIG_COMPAT
7092         .compat_ioctl   = md_compat_ioctl,
7093 #endif
7094         .getgeo         = md_getgeo,
7095         .media_changed  = md_media_changed,
7096         .revalidate_disk= md_revalidate,
7097 };
7098
7099 static int md_thread(void *arg)
7100 {
7101         struct md_thread *thread = arg;
7102
7103         /*
7104          * md_thread is a 'system-thread', it's priority should be very
7105          * high. We avoid resource deadlocks individually in each
7106          * raid personality. (RAID5 does preallocation) We also use RR and
7107          * the very same RT priority as kswapd, thus we will never get
7108          * into a priority inversion deadlock.
7109          *
7110          * we definitely have to have equal or higher priority than
7111          * bdflush, otherwise bdflush will deadlock if there are too
7112          * many dirty RAID5 blocks.
7113          */
7114
7115         allow_signal(SIGKILL);
7116         while (!kthread_should_stop()) {
7117
7118                 /* We need to wait INTERRUPTIBLE so that
7119                  * we don't add to the load-average.
7120                  * That means we need to be sure no signals are
7121                  * pending
7122                  */
7123                 if (signal_pending(current))
7124                         flush_signals(current);
7125
7126                 wait_event_interruptible_timeout
7127                         (thread->wqueue,
7128                          test_bit(THREAD_WAKEUP, &thread->flags)
7129                          || kthread_should_stop(),
7130                          thread->timeout);
7131
7132                 clear_bit(THREAD_WAKEUP, &thread->flags);
7133                 if (!kthread_should_stop())
7134                         thread->run(thread);
7135         }
7136
7137         return 0;
7138 }
7139
7140 void md_wakeup_thread(struct md_thread *thread)
7141 {
7142         if (thread) {
7143                 pr_debug("md: waking up MD thread %s.\n", thread->tsk->comm);
7144                 set_bit(THREAD_WAKEUP, &thread->flags);
7145                 wake_up(&thread->wqueue);
7146         }
7147 }
7148 EXPORT_SYMBOL(md_wakeup_thread);
7149
7150 struct md_thread *md_register_thread(void (*run) (struct md_thread *),
7151                 struct mddev *mddev, const char *name)
7152 {
7153         struct md_thread *thread;
7154
7155         thread = kzalloc(sizeof(struct md_thread), GFP_KERNEL);
7156         if (!thread)
7157                 return NULL;
7158
7159         init_waitqueue_head(&thread->wqueue);
7160
7161         thread->run = run;
7162         thread->mddev = mddev;
7163         thread->timeout = MAX_SCHEDULE_TIMEOUT;
7164         thread->tsk = kthread_run(md_thread, thread,
7165                                   "%s_%s",
7166                                   mdname(thread->mddev),
7167                                   name);
7168         if (IS_ERR(thread->tsk)) {
7169                 kfree(thread);
7170                 return NULL;
7171         }
7172         return thread;
7173 }
7174 EXPORT_SYMBOL(md_register_thread);
7175
7176 void md_unregister_thread(struct md_thread **threadp)
7177 {
7178         struct md_thread *thread = *threadp;
7179         if (!thread)
7180                 return;
7181         pr_debug("interrupting MD-thread pid %d\n", task_pid_nr(thread->tsk));
7182         /* Locking ensures that mddev_unlock does not wake_up a
7183          * non-existent thread
7184          */
7185         spin_lock(&pers_lock);
7186         *threadp = NULL;
7187         spin_unlock(&pers_lock);
7188
7189         kthread_stop(thread->tsk);
7190         kfree(thread);
7191 }
7192 EXPORT_SYMBOL(md_unregister_thread);
7193
7194 void md_error(struct mddev *mddev, struct md_rdev *rdev)
7195 {
7196         if (!rdev || test_bit(Faulty, &rdev->flags))
7197                 return;
7198
7199         if (!mddev->pers || !mddev->pers->error_handler)
7200                 return;
7201         mddev->pers->error_handler(mddev,rdev);
7202         if (mddev->degraded)
7203                 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
7204         sysfs_notify_dirent_safe(rdev->sysfs_state);
7205         set_bit(MD_RECOVERY_INTR, &mddev->recovery);
7206         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
7207         md_wakeup_thread(mddev->thread);
7208         if (mddev->event_work.func)
7209                 queue_work(md_misc_wq, &mddev->event_work);
7210         md_new_event(mddev);
7211 }
7212 EXPORT_SYMBOL(md_error);
7213
7214 /* seq_file implementation /proc/mdstat */
7215
7216 static void status_unused(struct seq_file *seq)
7217 {
7218         int i = 0;
7219         struct md_rdev *rdev;
7220
7221         seq_printf(seq, "unused devices: ");
7222
7223         list_for_each_entry(rdev, &pending_raid_disks, same_set) {
7224                 char b[BDEVNAME_SIZE];
7225                 i++;
7226                 seq_printf(seq, "%s ",
7227                               bdevname(rdev->bdev,b));
7228         }
7229         if (!i)
7230                 seq_printf(seq, "<none>");
7231
7232         seq_printf(seq, "\n");
7233 }
7234
7235 static int status_resync(struct seq_file *seq, struct mddev *mddev)
7236 {
7237         sector_t max_sectors, resync, res;
7238         unsigned long dt, db;
7239         sector_t rt;
7240         int scale;
7241         unsigned int per_milli;
7242
7243         if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
7244             test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
7245                 max_sectors = mddev->resync_max_sectors;
7246         else
7247                 max_sectors = mddev->dev_sectors;
7248
7249         resync = mddev->curr_resync;
7250         if (resync <= 3) {
7251                 if (test_bit(MD_RECOVERY_DONE, &mddev->recovery))
7252                         /* Still cleaning up */
7253                         resync = max_sectors;
7254         } else
7255                 resync -= atomic_read(&mddev->recovery_active);
7256
7257         if (resync == 0) {
7258                 if (mddev->recovery_cp < MaxSector) {
7259                         seq_printf(seq, "\tresync=PENDING");
7260                         return 1;
7261                 }
7262                 return 0;
7263         }
7264         if (resync < 3) {
7265                 seq_printf(seq, "\tresync=DELAYED");
7266                 return 1;
7267         }
7268
7269         WARN_ON(max_sectors == 0);
7270         /* Pick 'scale' such that (resync>>scale)*1000 will fit
7271          * in a sector_t, and (max_sectors>>scale) will fit in a
7272          * u32, as those are the requirements for sector_div.
7273          * Thus 'scale' must be at least 10
7274          */
7275         scale = 10;
7276         if (sizeof(sector_t) > sizeof(unsigned long)) {
7277                 while ( max_sectors/2 > (1ULL<<(scale+32)))
7278                         scale++;
7279         }
7280         res = (resync>>scale)*1000;
7281         sector_div(res, (u32)((max_sectors>>scale)+1));
7282
7283         per_milli = res;
7284         {
7285                 int i, x = per_milli/50, y = 20-x;
7286                 seq_printf(seq, "[");
7287                 for (i = 0; i < x; i++)
7288                         seq_printf(seq, "=");
7289                 seq_printf(seq, ">");
7290                 for (i = 0; i < y; i++)
7291                         seq_printf(seq, ".");
7292                 seq_printf(seq, "] ");
7293         }
7294         seq_printf(seq, " %s =%3u.%u%% (%llu/%llu)",
7295                    (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)?
7296                     "reshape" :
7297                     (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)?
7298                      "check" :
7299                      (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ?
7300                       "resync" : "recovery"))),
7301                    per_milli/10, per_milli % 10,
7302                    (unsigned long long) resync/2,
7303                    (unsigned long long) max_sectors/2);
7304
7305         /*
7306          * dt: time from mark until now
7307          * db: blocks written from mark until now
7308          * rt: remaining time
7309          *
7310          * rt is a sector_t, so could be 32bit or 64bit.
7311          * So we divide before multiply in case it is 32bit and close
7312          * to the limit.
7313          * We scale the divisor (db) by 32 to avoid losing precision
7314          * near the end of resync when the number of remaining sectors
7315          * is close to 'db'.
7316          * We then divide rt by 32 after multiplying by db to compensate.
7317          * The '+1' avoids division by zero if db is very small.
7318          */
7319         dt = ((jiffies - mddev->resync_mark) / HZ);
7320         if (!dt) dt++;
7321         db = (mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active))
7322                 - mddev->resync_mark_cnt;
7323
7324         rt = max_sectors - resync;    /* number of remaining sectors */
7325         sector_div(rt, db/32+1);
7326         rt *= dt;
7327         rt >>= 5;
7328
7329         seq_printf(seq, " finish=%lu.%lumin", (unsigned long)rt / 60,
7330                    ((unsigned long)rt % 60)/6);
7331
7332         seq_printf(seq, " speed=%ldK/sec", db/2/dt);
7333         return 1;
7334 }
7335
7336 static void *md_seq_start(struct seq_file *seq, loff_t *pos)
7337 {
7338         struct list_head *tmp;
7339         loff_t l = *pos;
7340         struct mddev *mddev;
7341
7342         if (l >= 0x10000)
7343                 return NULL;
7344         if (!l--)
7345                 /* header */
7346                 return (void*)1;
7347
7348         spin_lock(&all_mddevs_lock);
7349         list_for_each(tmp,&all_mddevs)
7350                 if (!l--) {
7351                         mddev = list_entry(tmp, struct mddev, all_mddevs);
7352                         mddev_get(mddev);
7353                         spin_unlock(&all_mddevs_lock);
7354                         return mddev;
7355                 }
7356         spin_unlock(&all_mddevs_lock);
7357         if (!l--)
7358                 return (void*)2;/* tail */
7359         return NULL;
7360 }
7361
7362 static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos)
7363 {
7364         struct list_head *tmp;
7365         struct mddev *next_mddev, *mddev = v;
7366
7367         ++*pos;
7368         if (v == (void*)2)
7369                 return NULL;
7370
7371         spin_lock(&all_mddevs_lock);
7372         if (v == (void*)1)
7373                 tmp = all_mddevs.next;
7374         else
7375                 tmp = mddev->all_mddevs.next;
7376         if (tmp != &all_mddevs)
7377                 next_mddev = mddev_get(list_entry(tmp,struct mddev,all_mddevs));
7378         else {
7379                 next_mddev = (void*)2;
7380                 *pos = 0x10000;
7381         }
7382         spin_unlock(&all_mddevs_lock);
7383
7384         if (v != (void*)1)
7385                 mddev_put(mddev);
7386         return next_mddev;
7387
7388 }
7389
7390 static void md_seq_stop(struct seq_file *seq, void *v)
7391 {
7392         struct mddev *mddev = v;
7393
7394         if (mddev && v != (void*)1 && v != (void*)2)
7395                 mddev_put(mddev);
7396 }
7397
7398 static int md_seq_show(struct seq_file *seq, void *v)
7399 {
7400         struct mddev *mddev = v;
7401         sector_t sectors;
7402         struct md_rdev *rdev;
7403
7404         if (v == (void*)1) {
7405                 struct md_personality *pers;
7406                 seq_printf(seq, "Personalities : ");
7407                 spin_lock(&pers_lock);
7408                 list_for_each_entry(pers, &pers_list, list)
7409                         seq_printf(seq, "[%s] ", pers->name);
7410
7411                 spin_unlock(&pers_lock);
7412                 seq_printf(seq, "\n");
7413                 seq->poll_event = atomic_read(&md_event_count);
7414                 return 0;
7415         }
7416         if (v == (void*)2) {
7417                 status_unused(seq);
7418                 return 0;
7419         }
7420
7421         spin_lock(&mddev->lock);
7422         if (mddev->pers || mddev->raid_disks || !list_empty(&mddev->disks)) {
7423                 seq_printf(seq, "%s : %sactive", mdname(mddev),
7424                                                 mddev->pers ? "" : "in");
7425                 if (mddev->pers) {
7426                         if (mddev->ro==1)
7427                                 seq_printf(seq, " (read-only)");
7428                         if (mddev->ro==2)
7429                                 seq_printf(seq, " (auto-read-only)");
7430                         seq_printf(seq, " %s", mddev->pers->name);
7431                 }
7432
7433                 sectors = 0;
7434                 rcu_read_lock();
7435                 rdev_for_each_rcu(rdev, mddev) {
7436                         char b[BDEVNAME_SIZE];
7437                         seq_printf(seq, " %s[%d]",
7438                                 bdevname(rdev->bdev,b), rdev->desc_nr);
7439                         if (test_bit(WriteMostly, &rdev->flags))
7440                                 seq_printf(seq, "(W)");
7441                         if (test_bit(Journal, &rdev->flags))
7442                                 seq_printf(seq, "(J)");
7443                         if (test_bit(Faulty, &rdev->flags)) {
7444                                 seq_printf(seq, "(F)");
7445                                 continue;
7446                         }
7447                         if (rdev->raid_disk < 0)
7448                                 seq_printf(seq, "(S)"); /* spare */
7449                         if (test_bit(Replacement, &rdev->flags))
7450                                 seq_printf(seq, "(R)");
7451                         sectors += rdev->sectors;
7452                 }
7453                 rcu_read_unlock();
7454
7455                 if (!list_empty(&mddev->disks)) {
7456                         if (mddev->pers)
7457                                 seq_printf(seq, "\n      %llu blocks",
7458                                            (unsigned long long)
7459                                            mddev->array_sectors / 2);
7460                         else
7461                                 seq_printf(seq, "\n      %llu blocks",
7462                                            (unsigned long long)sectors / 2);
7463                 }
7464                 if (mddev->persistent) {
7465                         if (mddev->major_version != 0 ||
7466                             mddev->minor_version != 90) {
7467                                 seq_printf(seq," super %d.%d",
7468                                            mddev->major_version,
7469                                            mddev->minor_version);
7470                         }
7471                 } else if (mddev->external)
7472                         seq_printf(seq, " super external:%s",
7473                                    mddev->metadata_type);
7474                 else
7475                         seq_printf(seq, " super non-persistent");
7476
7477                 if (mddev->pers) {
7478                         mddev->pers->status(seq, mddev);
7479                         seq_printf(seq, "\n      ");
7480                         if (mddev->pers->sync_request) {
7481                                 if (status_resync(seq, mddev))
7482                                         seq_printf(seq, "\n      ");
7483                         }
7484                 } else
7485                         seq_printf(seq, "\n       ");
7486
7487                 bitmap_status(seq, mddev->bitmap);
7488
7489                 seq_printf(seq, "\n");
7490         }
7491         spin_unlock(&mddev->lock);
7492
7493         return 0;
7494 }
7495
7496 static const struct seq_operations md_seq_ops = {
7497         .start  = md_seq_start,
7498         .next   = md_seq_next,
7499         .stop   = md_seq_stop,
7500         .show   = md_seq_show,
7501 };
7502
7503 static int md_seq_open(struct inode *inode, struct file *file)
7504 {
7505         struct seq_file *seq;
7506         int error;
7507
7508         error = seq_open(file, &md_seq_ops);
7509         if (error)
7510                 return error;
7511
7512         seq = file->private_data;
7513         seq->poll_event = atomic_read(&md_event_count);
7514         return error;
7515 }
7516
7517 static int md_unloading;
7518 static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
7519 {
7520         struct seq_file *seq = filp->private_data;
7521         int mask;
7522
7523         if (md_unloading)
7524                 return POLLIN|POLLRDNORM|POLLERR|POLLPRI;
7525         poll_wait(filp, &md_event_waiters, wait);
7526
7527         /* always allow read */
7528         mask = POLLIN | POLLRDNORM;
7529
7530         if (seq->poll_event != atomic_read(&md_event_count))
7531                 mask |= POLLERR | POLLPRI;
7532         return mask;
7533 }
7534
7535 static const struct file_operations md_seq_fops = {
7536         .owner          = THIS_MODULE,
7537         .open           = md_seq_open,
7538         .read           = seq_read,
7539         .llseek         = seq_lseek,
7540         .release        = seq_release_private,
7541         .poll           = mdstat_poll,
7542 };
7543
7544 int register_md_personality(struct md_personality *p)
7545 {
7546         pr_debug("md: %s personality registered for level %d\n",
7547                  p->name, p->level);
7548         spin_lock(&pers_lock);
7549         list_add_tail(&p->list, &pers_list);
7550         spin_unlock(&pers_lock);
7551         return 0;
7552 }
7553 EXPORT_SYMBOL(register_md_personality);
7554
7555 int unregister_md_personality(struct md_personality *p)
7556 {
7557         pr_debug("md: %s personality unregistered\n", p->name);
7558         spin_lock(&pers_lock);
7559         list_del_init(&p->list);
7560         spin_unlock(&pers_lock);
7561         return 0;
7562 }
7563 EXPORT_SYMBOL(unregister_md_personality);
7564
7565 int register_md_cluster_operations(struct md_cluster_operations *ops,
7566                                    struct module *module)
7567 {
7568         int ret = 0;
7569         spin_lock(&pers_lock);
7570         if (md_cluster_ops != NULL)
7571                 ret = -EALREADY;
7572         else {
7573                 md_cluster_ops = ops;
7574                 md_cluster_mod = module;
7575         }
7576         spin_unlock(&pers_lock);
7577         return ret;
7578 }
7579 EXPORT_SYMBOL(register_md_cluster_operations);
7580
7581 int unregister_md_cluster_operations(void)
7582 {
7583         spin_lock(&pers_lock);
7584         md_cluster_ops = NULL;
7585         spin_unlock(&pers_lock);
7586         return 0;
7587 }
7588 EXPORT_SYMBOL(unregister_md_cluster_operations);
7589
7590 int md_setup_cluster(struct mddev *mddev, int nodes)
7591 {
7592         if (!md_cluster_ops)
7593                 request_module("md-cluster");
7594         spin_lock(&pers_lock);
7595         /* ensure module won't be unloaded */
7596         if (!md_cluster_ops || !try_module_get(md_cluster_mod)) {
7597                 pr_warn("can't find md-cluster module or get it's reference.\n");
7598                 spin_unlock(&pers_lock);
7599                 return -ENOENT;
7600         }
7601         spin_unlock(&pers_lock);
7602
7603         return md_cluster_ops->join(mddev, nodes);
7604 }
7605
7606 void md_cluster_stop(struct mddev *mddev)
7607 {
7608         if (!md_cluster_ops)
7609                 return;
7610         md_cluster_ops->leave(mddev);
7611         module_put(md_cluster_mod);
7612 }
7613
7614 static int is_mddev_idle(struct mddev *mddev, int init)
7615 {
7616         struct md_rdev *rdev;
7617         int idle;
7618         int curr_events;
7619
7620         idle = 1;
7621         rcu_read_lock();
7622         rdev_for_each_rcu(rdev, mddev) {
7623                 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
7624                 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
7625                               (int)part_stat_read(&disk->part0, sectors[1]) -
7626                               atomic_read(&disk->sync_io);
7627                 /* sync IO will cause sync_io to increase before the disk_stats
7628                  * as sync_io is counted when a request starts, and
7629                  * disk_stats is counted when it completes.
7630                  * So resync activity will cause curr_events to be smaller than
7631                  * when there was no such activity.
7632                  * non-sync IO will cause disk_stat to increase without
7633                  * increasing sync_io so curr_events will (eventually)
7634                  * be larger than it was before.  Once it becomes
7635                  * substantially larger, the test below will cause
7636                  * the array to appear non-idle, and resync will slow
7637                  * down.
7638                  * If there is a lot of outstanding resync activity when
7639                  * we set last_event to curr_events, then all that activity
7640                  * completing might cause the array to appear non-idle
7641                  * and resync will be slowed down even though there might
7642                  * not have been non-resync activity.  This will only
7643                  * happen once though.  'last_events' will soon reflect
7644                  * the state where there is little or no outstanding
7645                  * resync requests, and further resync activity will
7646                  * always make curr_events less than last_events.
7647                  *
7648                  */
7649                 if (init || curr_events - rdev->last_events > 64) {
7650                         rdev->last_events = curr_events;
7651                         idle = 0;
7652                 }
7653         }
7654         rcu_read_unlock();
7655         return idle;
7656 }
7657
7658 void md_done_sync(struct mddev *mddev, int blocks, int ok)
7659 {
7660         /* another "blocks" (512byte) blocks have been synced */
7661         atomic_sub(blocks, &mddev->recovery_active);
7662         wake_up(&mddev->recovery_wait);
7663         if (!ok) {
7664                 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
7665                 set_bit(MD_RECOVERY_ERROR, &mddev->recovery);
7666                 md_wakeup_thread(mddev->thread);
7667                 // stop recovery, signal do_sync ....
7668         }
7669 }
7670 EXPORT_SYMBOL(md_done_sync);
7671
7672 /* md_write_start(mddev, bi)
7673  * If we need to update some array metadata (e.g. 'active' flag
7674  * in superblock) before writing, schedule a superblock update
7675  * and wait for it to complete.
7676  */
7677 void md_write_start(struct mddev *mddev, struct bio *bi)
7678 {
7679         int did_change = 0;
7680         if (bio_data_dir(bi) != WRITE)
7681                 return;
7682
7683         BUG_ON(mddev->ro == 1);
7684         if (mddev->ro == 2) {
7685                 /* need to switch to read/write */
7686                 mddev->ro = 0;
7687                 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
7688                 md_wakeup_thread(mddev->thread);
7689                 md_wakeup_thread(mddev->sync_thread);
7690                 did_change = 1;
7691         }
7692         atomic_inc(&mddev->writes_pending);
7693         if (mddev->safemode == 1)
7694                 mddev->safemode = 0;
7695         if (mddev->in_sync) {
7696                 spin_lock(&mddev->lock);
7697                 if (mddev->in_sync) {
7698                         mddev->in_sync = 0;
7699                         set_bit(MD_CHANGE_CLEAN, &mddev->flags);
7700                         set_bit(MD_CHANGE_PENDING, &mddev->flags);
7701                         md_wakeup_thread(mddev->thread);
7702                         did_change = 1;
7703                 }
7704                 spin_unlock(&mddev->lock);
7705         }
7706         if (did_change)
7707                 sysfs_notify_dirent_safe(mddev->sysfs_state);
7708         wait_event(mddev->sb_wait,
7709                    !test_bit(MD_CHANGE_PENDING, &mddev->flags));
7710 }
7711 EXPORT_SYMBOL(md_write_start);
7712
7713 void md_write_end(struct mddev *mddev)
7714 {
7715         if (atomic_dec_and_test(&mddev->writes_pending)) {
7716                 if (mddev->safemode == 2)
7717                         md_wakeup_thread(mddev->thread);
7718                 else if (mddev->safemode_delay)
7719                         mod_timer(&mddev->safemode_timer, jiffies + mddev->safemode_delay);
7720         }
7721 }
7722 EXPORT_SYMBOL(md_write_end);
7723
7724 /* md_allow_write(mddev)
7725  * Calling this ensures that the array is marked 'active' so that writes
7726  * may proceed without blocking.  It is important to call this before
7727  * attempting a GFP_KERNEL allocation while holding the mddev lock.
7728  * Must be called with mddev_lock held.
7729  *
7730  * In the ->external case MD_CHANGE_PENDING can not be cleared until mddev->lock
7731  * is dropped, so return -EAGAIN after notifying userspace.
7732  */
7733 int md_allow_write(struct mddev *mddev)
7734 {
7735         if (!mddev->pers)
7736                 return 0;
7737         if (mddev->ro)
7738                 return 0;
7739         if (!mddev->pers->sync_request)
7740                 return 0;
7741
7742         spin_lock(&mddev->lock);
7743         if (mddev->in_sync) {
7744                 mddev->in_sync = 0;
7745                 set_bit(MD_CHANGE_CLEAN, &mddev->flags);
7746                 set_bit(MD_CHANGE_PENDING, &mddev->flags);
7747                 if (mddev->safemode_delay &&
7748                     mddev->safemode == 0)
7749                         mddev->safemode = 1;
7750                 spin_unlock(&mddev->lock);
7751                 md_update_sb(mddev, 0);
7752                 sysfs_notify_dirent_safe(mddev->sysfs_state);
7753         } else
7754                 spin_unlock(&mddev->lock);
7755
7756         if (test_bit(MD_CHANGE_PENDING, &mddev->flags))
7757                 return -EAGAIN;
7758         else
7759                 return 0;
7760 }
7761 EXPORT_SYMBOL_GPL(md_allow_write);
7762
7763 #define SYNC_MARKS      10
7764 #define SYNC_MARK_STEP  (3*HZ)
7765 #define UPDATE_FREQUENCY (5*60*HZ)
7766 void md_do_sync(struct md_thread *thread)
7767 {
7768         struct mddev *mddev = thread->mddev;
7769         struct mddev *mddev2;
7770         unsigned int currspeed = 0,
7771                  window;
7772         sector_t max_sectors,j, io_sectors, recovery_done;
7773         unsigned long mark[SYNC_MARKS];
7774         unsigned long update_time;
7775         sector_t mark_cnt[SYNC_MARKS];
7776         int last_mark,m;
7777         struct list_head *tmp;
7778         sector_t last_check;
7779         int skipped = 0;
7780         struct md_rdev *rdev;
7781         char *desc, *action = NULL;
7782         struct blk_plug plug;
7783         int ret;
7784
7785         /* just incase thread restarts... */
7786         if (test_bit(MD_RECOVERY_DONE, &mddev->recovery))
7787                 return;
7788         if (mddev->ro) {/* never try to sync a read-only array */
7789                 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
7790                 return;
7791         }
7792
7793         if (mddev_is_clustered(mddev)) {
7794                 ret = md_cluster_ops->resync_start(mddev);
7795                 if (ret)
7796                         goto skip;
7797
7798                 set_bit(MD_CLUSTER_RESYNC_LOCKED, &mddev->flags);
7799                 if (!(test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
7800                         test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) ||
7801                         test_bit(MD_RECOVERY_RECOVER, &mddev->recovery))
7802                      && ((unsigned long long)mddev->curr_resync_completed
7803                          < (unsigned long long)mddev->resync_max_sectors))
7804                         goto skip;
7805         }
7806
7807         if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
7808                 if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) {
7809                         desc = "data-check";
7810                         action = "check";
7811                 } else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
7812                         desc = "requested-resync";
7813                         action = "repair";
7814                 } else
7815                         desc = "resync";
7816         } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
7817                 desc = "reshape";
7818         else
7819                 desc = "recovery";
7820
7821         mddev->last_sync_action = action ?: desc;
7822
7823         /* we overload curr_resync somewhat here.
7824          * 0 == not engaged in resync at all
7825          * 2 == checking that there is no conflict with another sync
7826          * 1 == like 2, but have yielded to allow conflicting resync to
7827          *              commense
7828          * other == active in resync - this many blocks
7829          *
7830          * Before starting a resync we must have set curr_resync to
7831          * 2, and then checked that every "conflicting" array has curr_resync
7832          * less than ours.  When we find one that is the same or higher
7833          * we wait on resync_wait.  To avoid deadlock, we reduce curr_resync
7834          * to 1 if we choose to yield (based arbitrarily on address of mddev structure).
7835          * This will mean we have to start checking from the beginning again.
7836          *
7837          */
7838
7839         do {
7840                 int mddev2_minor = -1;
7841                 mddev->curr_resync = 2;
7842
7843         try_again:
7844                 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
7845                         goto skip;
7846                 for_each_mddev(mddev2, tmp) {
7847                         if (mddev2 == mddev)
7848                                 continue;
7849                         if (!mddev->parallel_resync
7850                         &&  mddev2->curr_resync
7851                         &&  match_mddev_units(mddev, mddev2)) {
7852                                 DEFINE_WAIT(wq);
7853                                 if (mddev < mddev2 && mddev->curr_resync == 2) {
7854                                         /* arbitrarily yield */
7855                                         mddev->curr_resync = 1;
7856                                         wake_up(&resync_wait);
7857                                 }
7858                                 if (mddev > mddev2 && mddev->curr_resync == 1)
7859                                         /* no need to wait here, we can wait the next
7860                                          * time 'round when curr_resync == 2
7861                                          */
7862                                         continue;
7863                                 /* We need to wait 'interruptible' so as not to
7864                                  * contribute to the load average, and not to
7865                                  * be caught by 'softlockup'
7866                                  */
7867                                 prepare_to_wait(&resync_wait, &wq, TASK_INTERRUPTIBLE);
7868                                 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
7869                                     mddev2->curr_resync >= mddev->curr_resync) {
7870                                         if (mddev2_minor != mddev2->md_minor) {
7871                                                 mddev2_minor = mddev2->md_minor;
7872                                                 pr_info("md: delaying %s of %s until %s has finished (they share one or more physical units)\n",
7873                                                         desc, mdname(mddev),
7874                                                         mdname(mddev2));
7875                                         }
7876                                         mddev_put(mddev2);
7877                                         if (signal_pending(current))
7878                                                 flush_signals(current);
7879                                         schedule();
7880                                         finish_wait(&resync_wait, &wq);
7881                                         goto try_again;
7882                                 }
7883                                 finish_wait(&resync_wait, &wq);
7884                         }
7885                 }
7886         } while (mddev->curr_resync < 2);
7887
7888         j = 0;
7889         if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
7890                 /* resync follows the size requested by the personality,
7891                  * which defaults to physical size, but can be virtual size
7892                  */
7893                 max_sectors = mddev->resync_max_sectors;
7894                 atomic64_set(&mddev->resync_mismatches, 0);
7895                 /* we don't use the checkpoint if there's a bitmap */
7896                 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
7897                         j = mddev->resync_min;
7898                 else if (!mddev->bitmap)
7899                         j = mddev->recovery_cp;
7900
7901         } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
7902                 max_sectors = mddev->resync_max_sectors;
7903         else {
7904                 /* recovery follows the physical size of devices */
7905                 max_sectors = mddev->dev_sectors;
7906                 j = MaxSector;
7907                 rcu_read_lock();
7908                 rdev_for_each_rcu(rdev, mddev)
7909                         if (rdev->raid_disk >= 0 &&
7910                             !test_bit(Journal, &rdev->flags) &&
7911                             !test_bit(Faulty, &rdev->flags) &&
7912                             !test_bit(In_sync, &rdev->flags) &&
7913                             rdev->recovery_offset < j)
7914                                 j = rdev->recovery_offset;
7915                 rcu_read_unlock();
7916
7917                 /* If there is a bitmap, we need to make sure all
7918                  * writes that started before we added a spare
7919                  * complete before we start doing a recovery.
7920                  * Otherwise the write might complete and (via
7921                  * bitmap_endwrite) set a bit in the bitmap after the
7922                  * recovery has checked that bit and skipped that
7923                  * region.
7924                  */
7925                 if (mddev->bitmap) {
7926                         mddev->pers->quiesce(mddev, 1);
7927                         mddev->pers->quiesce(mddev, 0);
7928                 }
7929         }
7930
7931         pr_info("md: %s of RAID array %s\n", desc, mdname(mddev));
7932         pr_debug("md: minimum _guaranteed_  speed: %d KB/sec/disk.\n", speed_min(mddev));
7933         pr_debug("md: using maximum available idle IO bandwidth (but not more than %d KB/sec) for %s.\n",
7934                  speed_max(mddev), desc);
7935
7936         is_mddev_idle(mddev, 1); /* this initializes IO event counters */
7937
7938         io_sectors = 0;
7939         for (m = 0; m < SYNC_MARKS; m++) {
7940                 mark[m] = jiffies;
7941                 mark_cnt[m] = io_sectors;
7942         }
7943         last_mark = 0;
7944         mddev->resync_mark = mark[last_mark];
7945         mddev->resync_mark_cnt = mark_cnt[last_mark];
7946
7947         /*
7948          * Tune reconstruction:
7949          */
7950         window = 32*(PAGE_SIZE/512);
7951         pr_debug("md: using %dk window, over a total of %lluk.\n",
7952                  window/2, (unsigned long long)max_sectors/2);
7953
7954         atomic_set(&mddev->recovery_active, 0);
7955         last_check = 0;
7956
7957         if (j>2) {
7958                 pr_debug("md: resuming %s of %s from checkpoint.\n",
7959                          desc, mdname(mddev));
7960                 mddev->curr_resync = j;
7961         } else
7962                 mddev->curr_resync = 3; /* no longer delayed */
7963         mddev->curr_resync_completed = j;
7964         sysfs_notify(&mddev->kobj, NULL, "sync_completed");
7965         md_new_event(mddev);
7966         update_time = jiffies;
7967
7968         blk_start_plug(&plug);
7969         while (j < max_sectors) {
7970                 sector_t sectors;
7971
7972                 skipped = 0;
7973
7974                 if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
7975                     ((mddev->curr_resync > mddev->curr_resync_completed &&
7976                       (mddev->curr_resync - mddev->curr_resync_completed)
7977                       > (max_sectors >> 4)) ||
7978                      time_after_eq(jiffies, update_time + UPDATE_FREQUENCY) ||
7979                      (j - mddev->curr_resync_completed)*2
7980                      >= mddev->resync_max - mddev->curr_resync_completed ||
7981                      mddev->curr_resync_completed > mddev->resync_max
7982                             )) {
7983                         /* time to update curr_resync_completed */
7984                         wait_event(mddev->recovery_wait,
7985                                    atomic_read(&mddev->recovery_active) == 0);
7986                         mddev->curr_resync_completed = j;
7987                         if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) &&
7988                             j > mddev->recovery_cp)
7989                                 mddev->recovery_cp = j;
7990                         update_time = jiffies;
7991                         set_bit(MD_CHANGE_CLEAN, &mddev->flags);
7992                         sysfs_notify(&mddev->kobj, NULL, "sync_completed");
7993                 }
7994
7995                 while (j >= mddev->resync_max &&
7996                        !test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
7997                         /* As this condition is controlled by user-space,
7998                          * we can block indefinitely, so use '_interruptible'
7999                          * to avoid triggering warnings.
8000                          */
8001                         flush_signals(current); /* just in case */
8002                         wait_event_interruptible(mddev->recovery_wait,
8003                                                  mddev->resync_max > j
8004                                                  || test_bit(MD_RECOVERY_INTR,
8005                                                              &mddev->recovery));
8006                 }
8007
8008                 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
8009                         break;
8010
8011                 sectors = mddev->pers->sync_request(mddev, j, &skipped);
8012                 if (sectors == 0) {
8013                         set_bit(MD_RECOVERY_INTR, &mddev->recovery);
8014                         break;
8015                 }
8016
8017                 if (!skipped) { /* actual IO requested */
8018                         io_sectors += sectors;
8019                         atomic_add(sectors, &mddev->recovery_active);
8020                 }
8021
8022                 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
8023                         break;
8024
8025                 j += sectors;
8026                 if (j > max_sectors)
8027                         /* when skipping, extra large numbers can be returned. */
8028                         j = max_sectors;
8029                 if (j > 2)
8030                         mddev->curr_resync = j;
8031                 mddev->curr_mark_cnt = io_sectors;
8032                 if (last_check == 0)
8033                         /* this is the earliest that rebuild will be
8034                          * visible in /proc/mdstat
8035                          */
8036                         md_new_event(mddev);
8037
8038                 if (last_check + window > io_sectors || j == max_sectors)
8039                         continue;
8040
8041                 last_check = io_sectors;
8042         repeat:
8043                 if (time_after_eq(jiffies, mark[last_mark] + SYNC_MARK_STEP )) {
8044                         /* step marks */
8045                         int next = (last_mark+1) % SYNC_MARKS;
8046
8047                         mddev->resync_mark = mark[next];
8048                         mddev->resync_mark_cnt = mark_cnt[next];
8049                         mark[next] = jiffies;
8050                         mark_cnt[next] = io_sectors - atomic_read(&mddev->recovery_active);
8051                         last_mark = next;
8052                 }
8053
8054                 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
8055                         break;
8056
8057                 /*
8058                  * this loop exits only if either when we are slower than
8059                  * the 'hard' speed limit, or the system was IO-idle for
8060                  * a jiffy.
8061                  * the system might be non-idle CPU-wise, but we only care
8062                  * about not overloading the IO subsystem. (things like an
8063                  * e2fsck being done on the RAID array should execute fast)
8064                  */
8065                 cond_resched();
8066
8067                 recovery_done = io_sectors - atomic_read(&mddev->recovery_active);
8068                 currspeed = ((unsigned long)(recovery_done - mddev->resync_mark_cnt))/2
8069                         /((jiffies-mddev->resync_mark)/HZ +1) +1;
8070
8071                 if (currspeed > speed_min(mddev)) {
8072                         if (currspeed > speed_max(mddev)) {
8073                                 msleep(500);
8074                                 goto repeat;
8075                         }
8076                         if (!is_mddev_idle(mddev, 0)) {
8077                                 /*
8078                                  * Give other IO more of a chance.
8079                                  * The faster the devices, the less we wait.
8080                                  */
8081                                 wait_event(mddev->recovery_wait,
8082                                            !atomic_read(&mddev->recovery_active));
8083                         }
8084                 }
8085         }
8086         pr_info("md: %s: %s %s.\n",mdname(mddev), desc,
8087                 test_bit(MD_RECOVERY_INTR, &mddev->recovery)
8088                 ? "interrupted" : "done");
8089         /*
8090          * this also signals 'finished resyncing' to md_stop
8091          */
8092         blk_finish_plug(&plug);
8093         wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active));
8094
8095         if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
8096             !test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
8097             mddev->curr_resync > 3) {
8098                 mddev->curr_resync_completed = mddev->curr_resync;
8099                 sysfs_notify(&mddev->kobj, NULL, "sync_completed");
8100         }
8101         mddev->pers->sync_request(mddev, max_sectors, &skipped);
8102
8103         if (!test_bit(MD_RECOVERY_CHECK, &mddev->recovery) &&
8104             mddev->curr_resync > 3) {
8105                 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
8106                         if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
8107                                 if (mddev->curr_resync >= mddev->recovery_cp) {
8108                                         pr_debug("md: checkpointing %s of %s.\n",
8109                                                  desc, mdname(mddev));
8110                                         if (test_bit(MD_RECOVERY_ERROR,
8111                                                 &mddev->recovery))
8112                                                 mddev->recovery_cp =
8113                                                         mddev->curr_resync_completed;
8114                                         else
8115                                                 mddev->recovery_cp =
8116                                                         mddev->curr_resync;
8117                                 }
8118                         } else
8119                                 mddev->recovery_cp = MaxSector;
8120                 } else {
8121                         if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery))
8122                                 mddev->curr_resync = MaxSector;
8123                         rcu_read_lock();
8124                         rdev_for_each_rcu(rdev, mddev)
8125                                 if (rdev->raid_disk >= 0 &&
8126                                     mddev->delta_disks >= 0 &&
8127                                     !test_bit(Journal, &rdev->flags) &&
8128                                     !test_bit(Faulty, &rdev->flags) &&
8129                                     !test_bit(In_sync, &rdev->flags) &&
8130                                     rdev->recovery_offset < mddev->curr_resync)
8131                                         rdev->recovery_offset = mddev->curr_resync;
8132                         rcu_read_unlock();
8133                 }
8134         }
8135  skip:
8136         /* set CHANGE_PENDING here since maybe another update is needed,
8137          * so other nodes are informed. It should be harmless for normal
8138          * raid */
8139         set_mask_bits(&mddev->flags, 0,
8140                       BIT(MD_CHANGE_PENDING) | BIT(MD_CHANGE_DEVS));
8141
8142         spin_lock(&mddev->lock);
8143         if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
8144                 /* We completed so min/max setting can be forgotten if used. */
8145                 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
8146                         mddev->resync_min = 0;
8147                 mddev->resync_max = MaxSector;
8148         } else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
8149                 mddev->resync_min = mddev->curr_resync_completed;
8150         set_bit(MD_RECOVERY_DONE, &mddev->recovery);
8151         mddev->curr_resync = 0;
8152         spin_unlock(&mddev->lock);
8153
8154         wake_up(&resync_wait);
8155         md_wakeup_thread(mddev->thread);
8156         return;
8157 }
8158 EXPORT_SYMBOL_GPL(md_do_sync);
8159
8160 static int remove_and_add_spares(struct mddev *mddev,
8161                                  struct md_rdev *this)
8162 {
8163         struct md_rdev *rdev;
8164         int spares = 0;
8165         int removed = 0;
8166         bool remove_some = false;
8167
8168         rdev_for_each(rdev, mddev) {
8169                 if ((this == NULL || rdev == this) &&
8170                     rdev->raid_disk >= 0 &&
8171                     !test_bit(Blocked, &rdev->flags) &&
8172                     test_bit(Faulty, &rdev->flags) &&
8173                     atomic_read(&rdev->nr_pending)==0) {
8174                         /* Faulty non-Blocked devices with nr_pending == 0
8175                          * never get nr_pending incremented,
8176                          * never get Faulty cleared, and never get Blocked set.
8177                          * So we can synchronize_rcu now rather than once per device
8178                          */
8179                         remove_some = true;
8180                         set_bit(RemoveSynchronized, &rdev->flags);
8181                 }
8182         }
8183
8184         if (remove_some)
8185                 synchronize_rcu();
8186         rdev_for_each(rdev, mddev) {
8187                 if ((this == NULL || rdev == this) &&
8188                     rdev->raid_disk >= 0 &&
8189                     !test_bit(Blocked, &rdev->flags) &&
8190                     ((test_bit(RemoveSynchronized, &rdev->flags) ||
8191                      (!test_bit(In_sync, &rdev->flags) &&
8192                       !test_bit(Journal, &rdev->flags))) &&
8193                     atomic_read(&rdev->nr_pending)==0)) {
8194                         if (mddev->pers->hot_remove_disk(
8195                                     mddev, rdev) == 0) {
8196                                 sysfs_unlink_rdev(mddev, rdev);
8197                                 rdev->raid_disk = -1;
8198                                 removed++;
8199                         }
8200                 }
8201                 if (remove_some && test_bit(RemoveSynchronized, &rdev->flags))
8202                         clear_bit(RemoveSynchronized, &rdev->flags);
8203         }
8204
8205         if (removed && mddev->kobj.sd)
8206                 sysfs_notify(&mddev->kobj, NULL, "degraded");
8207
8208         if (this && removed)
8209                 goto no_add;
8210
8211         rdev_for_each(rdev, mddev) {
8212                 if (this && this != rdev)
8213                         continue;
8214                 if (test_bit(Candidate, &rdev->flags))
8215                         continue;
8216                 if (rdev->raid_disk >= 0 &&
8217                     !test_bit(In_sync, &rdev->flags) &&
8218                     !test_bit(Journal, &rdev->flags) &&
8219                     !test_bit(Faulty, &rdev->flags))
8220                         spares++;
8221                 if (rdev->raid_disk >= 0)
8222                         continue;
8223                 if (test_bit(Faulty, &rdev->flags))
8224                         continue;
8225                 if (!test_bit(Journal, &rdev->flags)) {
8226                         if (mddev->ro &&
8227                             ! (rdev->saved_raid_disk >= 0 &&
8228                                !test_bit(Bitmap_sync, &rdev->flags)))
8229                                 continue;
8230
8231                         rdev->recovery_offset = 0;
8232                 }
8233                 if (mddev->pers->
8234                     hot_add_disk(mddev, rdev) == 0) {
8235                         if (sysfs_link_rdev(mddev, rdev))
8236                                 /* failure here is OK */;
8237                         if (!test_bit(Journal, &rdev->flags))
8238                                 spares++;
8239                         md_new_event(mddev);
8240                         set_bit(MD_CHANGE_DEVS, &mddev->flags);
8241                 }
8242         }
8243 no_add:
8244         if (removed)
8245                 set_bit(MD_CHANGE_DEVS, &mddev->flags);
8246         return spares;
8247 }
8248
8249 static void md_start_sync(struct work_struct *ws)
8250 {
8251         struct mddev *mddev = container_of(ws, struct mddev, del_work);
8252
8253         mddev->sync_thread = md_register_thread(md_do_sync,
8254                                                 mddev,
8255                                                 "resync");
8256         if (!mddev->sync_thread) {
8257                 pr_warn("%s: could not start resync thread...\n",
8258                         mdname(mddev));
8259                 /* leave the spares where they are, it shouldn't hurt */
8260                 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
8261                 clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
8262                 clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
8263                 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
8264                 clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
8265                 wake_up(&resync_wait);
8266                 if (test_and_clear_bit(MD_RECOVERY_RECOVER,
8267                                        &mddev->recovery))
8268                         if (mddev->sysfs_action)
8269                                 sysfs_notify_dirent_safe(mddev->sysfs_action);
8270         } else
8271                 md_wakeup_thread(mddev->sync_thread);
8272         sysfs_notify_dirent_safe(mddev->sysfs_action);
8273         md_new_event(mddev);
8274 }
8275
8276 /*
8277  * This routine is regularly called by all per-raid-array threads to
8278  * deal with generic issues like resync and super-block update.
8279  * Raid personalities that don't have a thread (linear/raid0) do not
8280  * need this as they never do any recovery or update the superblock.
8281  *
8282  * It does not do any resync itself, but rather "forks" off other threads
8283  * to do that as needed.
8284  * When it is determined that resync is needed, we set MD_RECOVERY_RUNNING in
8285  * "->recovery" and create a thread at ->sync_thread.
8286  * When the thread finishes it sets MD_RECOVERY_DONE
8287  * and wakeups up this thread which will reap the thread and finish up.
8288  * This thread also removes any faulty devices (with nr_pending == 0).
8289  *
8290  * The overall approach is:
8291  *  1/ if the superblock needs updating, update it.
8292  *  2/ If a recovery thread is running, don't do anything else.
8293  *  3/ If recovery has finished, clean up, possibly marking spares active.
8294  *  4/ If there are any faulty devices, remove them.
8295  *  5/ If array is degraded, try to add spares devices
8296  *  6/ If array has spares or is not in-sync, start a resync thread.
8297  */
8298 void md_check_recovery(struct mddev *mddev)
8299 {
8300         if (mddev->suspended)
8301                 return;
8302
8303         if (mddev->bitmap)
8304                 bitmap_daemon_work(mddev);
8305
8306         if (signal_pending(current)) {
8307                 if (mddev->pers->sync_request && !mddev->external) {
8308                         pr_debug("md: %s in immediate safe mode\n",
8309                                  mdname(mddev));
8310                         mddev->safemode = 2;
8311                 }
8312                 flush_signals(current);
8313         }
8314
8315         if (mddev->ro && !test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
8316                 return;
8317         if ( ! (
8318                 (mddev->flags & MD_UPDATE_SB_FLAGS & ~ (1<<MD_CHANGE_PENDING)) ||
8319                 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
8320                 test_bit(MD_RECOVERY_DONE, &mddev->recovery) ||
8321                 test_bit(MD_RELOAD_SB, &mddev->flags) ||
8322                 (mddev->external == 0 && mddev->safemode == 1) ||
8323                 (mddev->safemode == 2 && ! atomic_read(&mddev->writes_pending)
8324                  && !mddev->in_sync && mddev->recovery_cp == MaxSector)
8325                 ))
8326                 return;
8327
8328         if (mddev_trylock(mddev)) {
8329                 int spares = 0;
8330
8331                 if (mddev->ro) {
8332                         struct md_rdev *rdev;
8333                         if (!mddev->external && mddev->in_sync)
8334                                 /* 'Blocked' flag not needed as failed devices
8335                                  * will be recorded if array switched to read/write.
8336                                  * Leaving it set will prevent the device
8337                                  * from being removed.
8338                                  */
8339                                 rdev_for_each(rdev, mddev)
8340                                         clear_bit(Blocked, &rdev->flags);
8341                         /* On a read-only array we can:
8342                          * - remove failed devices
8343                          * - add already-in_sync devices if the array itself
8344                          *   is in-sync.
8345                          * As we only add devices that are already in-sync,
8346                          * we can activate the spares immediately.
8347                          */
8348                         remove_and_add_spares(mddev, NULL);
8349                         /* There is no thread, but we need to call
8350                          * ->spare_active and clear saved_raid_disk
8351                          */
8352                         set_bit(MD_RECOVERY_INTR, &mddev->recovery);
8353                         md_reap_sync_thread(mddev);
8354                         clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
8355                         clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
8356                         clear_bit(MD_CHANGE_PENDING, &mddev->flags);
8357                         goto unlock;
8358                 }
8359
8360                 if (mddev_is_clustered(mddev)) {
8361                         struct md_rdev *rdev;
8362                         /* kick the device if another node issued a
8363                          * remove disk.
8364                          */
8365                         rdev_for_each(rdev, mddev) {
8366                                 if (test_and_clear_bit(ClusterRemove, &rdev->flags) &&
8367                                                 rdev->raid_disk < 0)
8368                                         md_kick_rdev_from_array(rdev);
8369                         }
8370
8371                         if (test_and_clear_bit(MD_RELOAD_SB, &mddev->flags))
8372                                 md_reload_sb(mddev, mddev->good_device_nr);
8373                 }
8374
8375                 if (!mddev->external) {
8376                         int did_change = 0;
8377                         spin_lock(&mddev->lock);
8378                         if (mddev->safemode &&
8379                             !atomic_read(&mddev->writes_pending) &&
8380                             !mddev->in_sync &&
8381                             mddev->recovery_cp == MaxSector) {
8382                                 mddev->in_sync = 1;
8383                                 did_change = 1;
8384                                 set_bit(MD_CHANGE_CLEAN, &mddev->flags);
8385                         }
8386                         if (mddev->safemode == 1)
8387                                 mddev->safemode = 0;
8388                         spin_unlock(&mddev->lock);
8389                         if (did_change)
8390                                 sysfs_notify_dirent_safe(mddev->sysfs_state);
8391                 }
8392
8393                 if (mddev->flags & MD_UPDATE_SB_FLAGS)
8394                         md_update_sb(mddev, 0);
8395
8396                 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
8397                     !test_bit(MD_RECOVERY_DONE, &mddev->recovery)) {
8398                         /* resync/recovery still happening */
8399                         clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
8400                         goto unlock;
8401                 }
8402                 if (mddev->sync_thread) {
8403                         md_reap_sync_thread(mddev);
8404                         goto unlock;
8405                 }
8406                 /* Set RUNNING before clearing NEEDED to avoid
8407                  * any transients in the value of "sync_action".
8408                  */
8409                 mddev->curr_resync_completed = 0;
8410                 spin_lock(&mddev->lock);
8411                 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
8412                 spin_unlock(&mddev->lock);
8413                 /* Clear some bits that don't mean anything, but
8414                  * might be left set
8415                  */
8416                 clear_bit(MD_RECOVERY_INTR, &mddev->recovery);
8417                 clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
8418
8419                 if (!test_and_clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
8420                     test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
8421                         goto not_running;
8422                 /* no recovery is running.
8423                  * remove any failed drives, then
8424                  * add spares if possible.
8425                  * Spares are also removed and re-added, to allow
8426                  * the personality to fail the re-add.
8427                  */
8428
8429                 if (mddev->reshape_position != MaxSector) {
8430                         if (mddev->pers->check_reshape == NULL ||
8431                             mddev->pers->check_reshape(mddev) != 0)
8432                                 /* Cannot proceed */
8433                                 goto not_running;
8434                         set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
8435                         clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
8436                 } else if ((spares = remove_and_add_spares(mddev, NULL))) {
8437                         clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
8438                         clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
8439                         clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
8440                         set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
8441                 } else if (mddev->recovery_cp < MaxSector) {
8442                         set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
8443                         clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
8444                 } else if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
8445                         /* nothing to be done ... */
8446                         goto not_running;
8447
8448                 if (mddev->pers->sync_request) {
8449                         if (spares) {
8450                                 /* We are adding a device or devices to an array
8451                                  * which has the bitmap stored on all devices.
8452                                  * So make sure all bitmap pages get written
8453                                  */
8454                                 bitmap_write_all(mddev->bitmap);
8455                         }
8456                         INIT_WORK(&mddev->del_work, md_start_sync);
8457                         queue_work(md_misc_wq, &mddev->del_work);
8458                         goto unlock;
8459                 }
8460         not_running:
8461                 if (!mddev->sync_thread) {
8462                         clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
8463                         wake_up(&resync_wait);
8464                         if (test_and_clear_bit(MD_RECOVERY_RECOVER,
8465                                                &mddev->recovery))
8466                                 if (mddev->sysfs_action)
8467                                         sysfs_notify_dirent_safe(mddev->sysfs_action);
8468                 }
8469         unlock:
8470                 wake_up(&mddev->sb_wait);
8471                 mddev_unlock(mddev);
8472         }
8473 }
8474 EXPORT_SYMBOL(md_check_recovery);
8475
8476 void md_reap_sync_thread(struct mddev *mddev)
8477 {
8478         struct md_rdev *rdev;
8479
8480         /* resync has finished, collect result */
8481         md_unregister_thread(&mddev->sync_thread);
8482         if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
8483             !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
8484                 /* success...*/
8485                 /* activate any spares */
8486                 if (mddev->pers->spare_active(mddev)) {
8487                         sysfs_notify(&mddev->kobj, NULL,
8488                                      "degraded");
8489                         set_bit(MD_CHANGE_DEVS, &mddev->flags);
8490                 }
8491         }
8492         if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
8493             mddev->pers->finish_reshape)
8494                 mddev->pers->finish_reshape(mddev);
8495
8496         /* If array is no-longer degraded, then any saved_raid_disk
8497          * information must be scrapped.
8498          */
8499         if (!mddev->degraded)
8500                 rdev_for_each(rdev, mddev)
8501                         rdev->saved_raid_disk = -1;
8502
8503         md_update_sb(mddev, 1);
8504         /* MD_CHANGE_PENDING should be cleared by md_update_sb, so we can
8505          * call resync_finish here if MD_CLUSTER_RESYNC_LOCKED is set by
8506          * clustered raid */
8507         if (test_and_clear_bit(MD_CLUSTER_RESYNC_LOCKED, &mddev->flags))
8508                 md_cluster_ops->resync_finish(mddev);
8509         clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
8510         clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
8511         clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
8512         clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
8513         clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
8514         clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
8515         wake_up(&resync_wait);
8516         /* flag recovery needed just to double check */
8517         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
8518         sysfs_notify_dirent_safe(mddev->sysfs_action);
8519         md_new_event(mddev);
8520         if (mddev->event_work.func)
8521                 queue_work(md_misc_wq, &mddev->event_work);
8522 }
8523 EXPORT_SYMBOL(md_reap_sync_thread);
8524
8525 void md_wait_for_blocked_rdev(struct md_rdev *rdev, struct mddev *mddev)
8526 {
8527         sysfs_notify_dirent_safe(rdev->sysfs_state);
8528         wait_event_timeout(rdev->blocked_wait,
8529                            !test_bit(Blocked, &rdev->flags) &&
8530                            !test_bit(BlockedBadBlocks, &rdev->flags),
8531                            msecs_to_jiffies(5000));
8532         rdev_dec_pending(rdev, mddev);
8533 }
8534 EXPORT_SYMBOL(md_wait_for_blocked_rdev);
8535
8536 void md_finish_reshape(struct mddev *mddev)
8537 {
8538         /* called be personality module when reshape completes. */
8539         struct md_rdev *rdev;
8540
8541         rdev_for_each(rdev, mddev) {
8542                 if (rdev->data_offset > rdev->new_data_offset)
8543                         rdev->sectors += rdev->data_offset - rdev->new_data_offset;
8544                 else
8545                         rdev->sectors -= rdev->new_data_offset - rdev->data_offset;
8546                 rdev->data_offset = rdev->new_data_offset;
8547         }
8548 }
8549 EXPORT_SYMBOL(md_finish_reshape);
8550
8551 /* Bad block management */
8552
8553 /* Returns 1 on success, 0 on failure */
8554 int rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
8555                        int is_new)
8556 {
8557         struct mddev *mddev = rdev->mddev;
8558         int rv;
8559         if (is_new)
8560                 s += rdev->new_data_offset;
8561         else
8562                 s += rdev->data_offset;
8563         rv = badblocks_set(&rdev->badblocks, s, sectors, 0);
8564         if (rv == 0) {
8565                 /* Make sure they get written out promptly */
8566                 if (test_bit(ExternalBbl, &rdev->flags))
8567                         sysfs_notify(&rdev->kobj, NULL,
8568                                      "unacknowledged_bad_blocks");
8569                 sysfs_notify_dirent_safe(rdev->sysfs_state);
8570                 set_mask_bits(&mddev->flags, 0,
8571                               BIT(MD_CHANGE_CLEAN) | BIT(MD_CHANGE_PENDING));
8572                 md_wakeup_thread(rdev->mddev->thread);
8573                 return 1;
8574         } else
8575                 return 0;
8576 }
8577 EXPORT_SYMBOL_GPL(rdev_set_badblocks);
8578
8579 int rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
8580                          int is_new)
8581 {
8582         int rv;
8583         if (is_new)
8584                 s += rdev->new_data_offset;
8585         else
8586                 s += rdev->data_offset;
8587         rv = badblocks_clear(&rdev->badblocks, s, sectors);
8588         if ((rv == 0) && test_bit(ExternalBbl, &rdev->flags))
8589                 sysfs_notify(&rdev->kobj, NULL, "bad_blocks");
8590         return rv;
8591 }
8592 EXPORT_SYMBOL_GPL(rdev_clear_badblocks);
8593
8594 static int md_notify_reboot(struct notifier_block *this,
8595                             unsigned long code, void *x)
8596 {
8597         struct list_head *tmp;
8598         struct mddev *mddev;
8599         int need_delay = 0;
8600
8601         for_each_mddev(mddev, tmp) {
8602                 if (mddev_trylock(mddev)) {
8603                         if (mddev->pers)
8604                                 __md_stop_writes(mddev);
8605                         if (mddev->persistent)
8606                                 mddev->safemode = 2;
8607                         mddev_unlock(mddev);
8608                 }
8609                 need_delay = 1;
8610         }
8611         /*
8612          * certain more exotic SCSI devices are known to be
8613          * volatile wrt too early system reboots. While the
8614          * right place to handle this issue is the given
8615          * driver, we do want to have a safe RAID driver ...
8616          */
8617         if (need_delay)
8618                 mdelay(1000*1);
8619
8620         return NOTIFY_DONE;
8621 }
8622
8623 static struct notifier_block md_notifier = {
8624         .notifier_call  = md_notify_reboot,
8625         .next           = NULL,
8626         .priority       = INT_MAX, /* before any real devices */
8627 };
8628
8629 static void md_geninit(void)
8630 {
8631         pr_debug("md: sizeof(mdp_super_t) = %d\n", (int)sizeof(mdp_super_t));
8632
8633         proc_create("mdstat", S_IRUGO, NULL, &md_seq_fops);
8634 }
8635
8636 static int __init md_init(void)
8637 {
8638         int ret = -ENOMEM;
8639
8640         md_wq = alloc_workqueue("md", WQ_MEM_RECLAIM, 0);
8641         if (!md_wq)
8642                 goto err_wq;
8643
8644         md_misc_wq = alloc_workqueue("md_misc", 0, 0);
8645         if (!md_misc_wq)
8646                 goto err_misc_wq;
8647
8648         if ((ret = register_blkdev(MD_MAJOR, "md")) < 0)
8649                 goto err_md;
8650
8651         if ((ret = register_blkdev(0, "mdp")) < 0)
8652                 goto err_mdp;
8653         mdp_major = ret;
8654
8655         blk_register_region(MKDEV(MD_MAJOR, 0), 512, THIS_MODULE,
8656                             md_probe, NULL, NULL);
8657         blk_register_region(MKDEV(mdp_major, 0), 1UL<<MINORBITS, THIS_MODULE,
8658                             md_probe, NULL, NULL);
8659
8660         register_reboot_notifier(&md_notifier);
8661         raid_table_header = register_sysctl_table(raid_root_table);
8662
8663         md_geninit();
8664         return 0;
8665
8666 err_mdp:
8667         unregister_blkdev(MD_MAJOR, "md");
8668 err_md:
8669         destroy_workqueue(md_misc_wq);
8670 err_misc_wq:
8671         destroy_workqueue(md_wq);
8672 err_wq:
8673         return ret;
8674 }
8675
8676 static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev)
8677 {
8678         struct mdp_superblock_1 *sb = page_address(rdev->sb_page);
8679         struct md_rdev *rdev2;
8680         int role, ret;
8681         char b[BDEVNAME_SIZE];
8682
8683         /* Check for change of roles in the active devices */
8684         rdev_for_each(rdev2, mddev) {
8685                 if (test_bit(Faulty, &rdev2->flags))
8686                         continue;
8687
8688                 /* Check if the roles changed */
8689                 role = le16_to_cpu(sb->dev_roles[rdev2->desc_nr]);
8690
8691                 if (test_bit(Candidate, &rdev2->flags)) {
8692                         if (role == 0xfffe) {
8693                                 pr_info("md: Removing Candidate device %s because add failed\n", bdevname(rdev2->bdev,b));
8694                                 md_kick_rdev_from_array(rdev2);
8695                                 continue;
8696                         }
8697                         else
8698                                 clear_bit(Candidate, &rdev2->flags);
8699                 }
8700
8701                 if (role != rdev2->raid_disk) {
8702                         /* got activated */
8703                         if (rdev2->raid_disk == -1 && role != 0xffff) {
8704                                 rdev2->saved_raid_disk = role;
8705                                 ret = remove_and_add_spares(mddev, rdev2);
8706                                 pr_info("Activated spare: %s\n",
8707                                         bdevname(rdev2->bdev,b));
8708                                 /* wakeup mddev->thread here, so array could
8709                                  * perform resync with the new activated disk */
8710                                 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
8711                                 md_wakeup_thread(mddev->thread);
8712
8713                         }
8714                         /* device faulty
8715                          * We just want to do the minimum to mark the disk
8716                          * as faulty. The recovery is performed by the
8717                          * one who initiated the error.
8718                          */
8719                         if ((role == 0xfffe) || (role == 0xfffd)) {
8720                                 md_error(mddev, rdev2);
8721                                 clear_bit(Blocked, &rdev2->flags);
8722                         }
8723                 }
8724         }
8725
8726         if (mddev->raid_disks != le32_to_cpu(sb->raid_disks))
8727                 update_raid_disks(mddev, le32_to_cpu(sb->raid_disks));
8728
8729         /* Finally set the event to be up to date */
8730         mddev->events = le64_to_cpu(sb->events);
8731 }
8732
8733 static int read_rdev(struct mddev *mddev, struct md_rdev *rdev)
8734 {
8735         int err;
8736         struct page *swapout = rdev->sb_page;
8737         struct mdp_superblock_1 *sb;
8738
8739         /* Store the sb page of the rdev in the swapout temporary
8740          * variable in case we err in the future
8741          */
8742         rdev->sb_page = NULL;
8743         err = alloc_disk_sb(rdev);
8744         if (err == 0) {
8745                 ClearPageUptodate(rdev->sb_page);
8746                 rdev->sb_loaded = 0;
8747                 err = super_types[mddev->major_version].
8748                         load_super(rdev, NULL, mddev->minor_version);
8749         }
8750         if (err < 0) {
8751                 pr_warn("%s: %d Could not reload rdev(%d) err: %d. Restoring old values\n",
8752                                 __func__, __LINE__, rdev->desc_nr, err);
8753                 if (rdev->sb_page)
8754                         put_page(rdev->sb_page);
8755                 rdev->sb_page = swapout;
8756                 rdev->sb_loaded = 1;
8757                 return err;
8758         }
8759
8760         sb = page_address(rdev->sb_page);
8761         /* Read the offset unconditionally, even if MD_FEATURE_RECOVERY_OFFSET
8762          * is not set
8763          */
8764
8765         if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RECOVERY_OFFSET))
8766                 rdev->recovery_offset = le64_to_cpu(sb->recovery_offset);
8767
8768         /* The other node finished recovery, call spare_active to set
8769          * device In_sync and mddev->degraded
8770          */
8771         if (rdev->recovery_offset == MaxSector &&
8772             !test_bit(In_sync, &rdev->flags) &&
8773             mddev->pers->spare_active(mddev))
8774                 sysfs_notify(&mddev->kobj, NULL, "degraded");
8775
8776         put_page(swapout);
8777         return 0;
8778 }
8779
8780 void md_reload_sb(struct mddev *mddev, int nr)
8781 {
8782         struct md_rdev *rdev;
8783         int err;
8784
8785         /* Find the rdev */
8786         rdev_for_each_rcu(rdev, mddev) {
8787                 if (rdev->desc_nr == nr)
8788                         break;
8789         }
8790
8791         if (!rdev || rdev->desc_nr != nr) {
8792                 pr_warn("%s: %d Could not find rdev with nr %d\n", __func__, __LINE__, nr);
8793                 return;
8794         }
8795
8796         err = read_rdev(mddev, rdev);
8797         if (err < 0)
8798                 return;
8799
8800         check_sb_changes(mddev, rdev);
8801
8802         /* Read all rdev's to update recovery_offset */
8803         rdev_for_each_rcu(rdev, mddev)
8804                 read_rdev(mddev, rdev);
8805 }
8806 EXPORT_SYMBOL(md_reload_sb);
8807
8808 #ifndef MODULE
8809
8810 /*
8811  * Searches all registered partitions for autorun RAID arrays
8812  * at boot time.
8813  */
8814
8815 static DEFINE_MUTEX(detected_devices_mutex);
8816 static LIST_HEAD(all_detected_devices);
8817 struct detected_devices_node {
8818         struct list_head list;
8819         dev_t dev;
8820 };
8821
8822 void md_autodetect_dev(dev_t dev)
8823 {
8824         struct detected_devices_node *node_detected_dev;
8825
8826         node_detected_dev = kzalloc(sizeof(*node_detected_dev), GFP_KERNEL);
8827         if (node_detected_dev) {
8828                 node_detected_dev->dev = dev;
8829                 mutex_lock(&detected_devices_mutex);
8830                 list_add_tail(&node_detected_dev->list, &all_detected_devices);
8831                 mutex_unlock(&detected_devices_mutex);
8832         }
8833 }
8834
8835 static void autostart_arrays(int part)
8836 {
8837         struct md_rdev *rdev;
8838         struct detected_devices_node *node_detected_dev;
8839         dev_t dev;
8840         int i_scanned, i_passed;
8841
8842         i_scanned = 0;
8843         i_passed = 0;
8844
8845         pr_info("md: Autodetecting RAID arrays.\n");
8846
8847         mutex_lock(&detected_devices_mutex);
8848         while (!list_empty(&all_detected_devices) && i_scanned < INT_MAX) {
8849                 i_scanned++;
8850                 node_detected_dev = list_entry(all_detected_devices.next,
8851                                         struct detected_devices_node, list);
8852                 list_del(&node_detected_dev->list);
8853                 dev = node_detected_dev->dev;
8854                 kfree(node_detected_dev);
8855                 mutex_unlock(&detected_devices_mutex);
8856                 rdev = md_import_device(dev,0, 90);
8857                 mutex_lock(&detected_devices_mutex);
8858                 if (IS_ERR(rdev))
8859                         continue;
8860
8861                 if (test_bit(Faulty, &rdev->flags))
8862                         continue;
8863
8864                 set_bit(AutoDetected, &rdev->flags);
8865                 list_add(&rdev->same_set, &pending_raid_disks);
8866                 i_passed++;
8867         }
8868         mutex_unlock(&detected_devices_mutex);
8869
8870         pr_debug("md: Scanned %d and added %d devices.\n", i_scanned, i_passed);
8871
8872         autorun_devices(part);
8873 }
8874
8875 #endif /* !MODULE */
8876
8877 static __exit void md_exit(void)
8878 {
8879         struct mddev *mddev;
8880         struct list_head *tmp;
8881         int delay = 1;
8882
8883         blk_unregister_region(MKDEV(MD_MAJOR,0), 512);
8884         blk_unregister_region(MKDEV(mdp_major,0), 1U << MINORBITS);
8885
8886         unregister_blkdev(MD_MAJOR,"md");
8887         unregister_blkdev(mdp_major, "mdp");
8888         unregister_reboot_notifier(&md_notifier);
8889         unregister_sysctl_table(raid_table_header);
8890
8891         /* We cannot unload the modules while some process is
8892          * waiting for us in select() or poll() - wake them up
8893          */
8894         md_unloading = 1;
8895         while (waitqueue_active(&md_event_waiters)) {
8896                 /* not safe to leave yet */
8897                 wake_up(&md_event_waiters);
8898                 msleep(delay);
8899                 delay += delay;
8900         }
8901         remove_proc_entry("mdstat", NULL);
8902
8903         for_each_mddev(mddev, tmp) {
8904                 export_array(mddev);
8905                 mddev->hold_active = 0;
8906         }
8907         destroy_workqueue(md_misc_wq);
8908         destroy_workqueue(md_wq);
8909 }
8910
8911 subsys_initcall(md_init);
8912 module_exit(md_exit)
8913
8914 static int get_ro(char *buffer, struct kernel_param *kp)
8915 {
8916         return sprintf(buffer, "%d", start_readonly);
8917 }
8918 static int set_ro(const char *val, struct kernel_param *kp)
8919 {
8920         return kstrtouint(val, 10, (unsigned int *)&start_readonly);
8921 }
8922
8923 module_param_call(start_ro, set_ro, get_ro, NULL, S_IRUSR|S_IWUSR);
8924 module_param(start_dirty_degraded, int, S_IRUGO|S_IWUSR);
8925 module_param_call(new_array, add_named_array, NULL, NULL, S_IWUSR);
8926
8927 MODULE_LICENSE("GPL");
8928 MODULE_DESCRIPTION("MD RAID framework");
8929 MODULE_ALIAS("md");
8930 MODULE_ALIAS_BLOCKDEV_MAJOR(MD_MAJOR);