1 // SPDX-License-Identifier: GPL-2.0
2 /* Maximum size of each resync request */
3 #define RESYNC_BLOCK_SIZE (64*1024)
4 #define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE)
7 * Number of guaranteed raid bios in case of extreme VM load:
9 #define NR_RAID_BIOS 256
11 /* when we get a read error on a read-only array, we redirect to another
12 * device without failing the first device, or trying to over-write to
13 * correct the read error. To keep track of bad blocks on a per-bio
14 * level, we store IO_BLOCKED in the appropriate 'bios' pointer
16 #define IO_BLOCKED ((struct bio *)1)
17 /* When we successfully write to a known bad-block, we need to remove the
18 * bad-block marking which must be done from process context. So we record
19 * the success by setting devs[n].bio to IO_MADE_GOOD
21 #define IO_MADE_GOOD ((struct bio *)2)
23 #define BIO_SPECIAL(bio) ((unsigned long)bio <= 2)
24 #define MAX_PLUG_BIO 32
26 /* for managing resync I/O pages */
29 struct page *pages[RESYNC_PAGES];
32 struct raid1_plug_cb {
33 struct blk_plug_cb cb;
34 struct bio_list pending;
38 static void rbio_pool_free(void *rbio, void *data)
43 static inline int resync_alloc_pages(struct resync_pages *rp,
48 for (i = 0; i < RESYNC_PAGES; i++) {
49 rp->pages[i] = alloc_page(gfp_flags);
58 put_page(rp->pages[i]);
62 static inline void resync_free_pages(struct resync_pages *rp)
66 for (i = 0; i < RESYNC_PAGES; i++)
67 put_page(rp->pages[i]);
70 static inline void resync_get_all_pages(struct resync_pages *rp)
74 for (i = 0; i < RESYNC_PAGES; i++)
75 get_page(rp->pages[i]);
78 static inline struct page *resync_fetch_page(struct resync_pages *rp,
81 if (WARN_ON_ONCE(idx >= RESYNC_PAGES))
83 return rp->pages[idx];
87 * 'strct resync_pages' stores actual pages used for doing the resync
88 * IO, and it is per-bio, so make .bi_private points to it.
90 static inline struct resync_pages *get_resync_pages(struct bio *bio)
92 return bio->bi_private;
95 /* generally called after bio_reset() for reseting bvec */
96 static void md_bio_reset_resync_pages(struct bio *bio, struct resync_pages *rp,
101 /* initialize bvec table again */
103 struct page *page = resync_fetch_page(rp, idx);
104 int len = min_t(int, size, PAGE_SIZE);
106 if (WARN_ON(!bio_add_page(bio, page, len, 0))) {
107 bio->bi_status = BLK_STS_RESOURCE;
113 } while (idx++ < RESYNC_PAGES && size > 0);
117 static inline void raid1_submit_write(struct bio *bio)
119 struct md_rdev *rdev = (void *)bio->bi_bdev;
122 bio_set_dev(bio, rdev->bdev);
123 if (test_bit(Faulty, &rdev->flags))
125 else if (unlikely(bio_op(bio) == REQ_OP_DISCARD &&
126 !bdev_max_discard_sectors(bio->bi_bdev)))
130 submit_bio_noacct(bio);
133 static inline bool raid1_add_bio_to_plug(struct mddev *mddev, struct bio *bio,
134 blk_plug_cb_fn unplug, int copies)
136 struct raid1_plug_cb *plug = NULL;
137 struct blk_plug_cb *cb;
140 * If bitmap is not enabled, it's safe to submit the io directly, and
141 * this can get optimal performance.
143 if (!md_bitmap_enabled(mddev->bitmap)) {
144 raid1_submit_write(bio);
148 cb = blk_check_plugged(unplug, mddev, sizeof(*plug));
152 plug = container_of(cb, struct raid1_plug_cb, cb);
153 bio_list_add(&plug->pending, bio);
154 if (++plug->count / MAX_PLUG_BIO >= copies) {
156 cb->callback(cb, false);
164 * current->bio_list will be set under submit_bio() context, in this case bitmap
165 * io will be added to the list and wait for current io submission to finish,
166 * while current io submission must wait for bitmap io to be done. In order to
167 * avoid such deadlock, submit bitmap io asynchronously.
169 static inline void raid1_prepare_flush_writes(struct bitmap *bitmap)
171 if (current->bio_list)
172 md_bitmap_unplug_async(bitmap);
174 md_bitmap_unplug(bitmap);
178 * Used by fix_read_error() to decay the per rdev read_errors.
179 * We halve the read error count for every hour that has elapsed
180 * since the last recorded read error.
182 static inline void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
185 unsigned long hours_since_last;
186 unsigned int read_errors = atomic_read(&rdev->read_errors);
188 cur_time_mon = ktime_get_seconds();
190 if (rdev->last_read_error == 0) {
191 /* first time we've seen a read error */
192 rdev->last_read_error = cur_time_mon;
196 hours_since_last = (long)(cur_time_mon -
197 rdev->last_read_error) / 3600;
199 rdev->last_read_error = cur_time_mon;
202 * if hours_since_last is > the number of bits in read_errors
203 * just set read errors to 0. We do this to avoid
204 * overflowing the shift of read_errors by hours_since_last.
206 if (hours_since_last >= 8 * sizeof(read_errors))
207 atomic_set(&rdev->read_errors, 0);
209 atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
212 static inline bool exceed_read_errors(struct mddev *mddev, struct md_rdev *rdev)
214 int max_read_errors = atomic_read(&mddev->max_corr_read_errors);
217 check_decay_read_errors(mddev, rdev);
218 read_errors = atomic_inc_return(&rdev->read_errors);
219 if (read_errors > max_read_errors) {
220 pr_notice("md/"RAID_1_10_NAME":%s: %pg: Raid device exceeded read_error threshold [cur %d:max %d]\n",
221 mdname(mddev), rdev->bdev, read_errors, max_read_errors);
222 pr_notice("md/"RAID_1_10_NAME":%s: %pg: Failing raid device\n",
223 mdname(mddev), rdev->bdev);
224 md_error(mddev, rdev);
232 * raid1_check_read_range() - check a given read range for bad blocks,
233 * available read length is returned;
234 * @rdev: the rdev to read;
235 * @this_sector: read position;
238 * helper function for read_balance()
240 * 1) If there are no bad blocks in the range, @len is returned;
241 * 2) If the range are all bad blocks, 0 is returned;
242 * 3) If there are partial bad blocks:
243 * - If the bad block range starts after @this_sector, the length of first
244 * good region is returned;
245 * - If the bad block range starts before @this_sector, 0 is returned and
246 * the @len is updated to the offset into the region before we get to the
249 static inline int raid1_check_read_range(struct md_rdev *rdev,
250 sector_t this_sector, int *len)
255 /* no bad block overlap */
256 if (!is_badblock(rdev, this_sector, *len, &first_bad, &bad_sectors))
260 * bad block range starts offset into our range so we can return the
261 * number of sectors before the bad blocks start.
263 if (first_bad > this_sector)
264 return first_bad - this_sector;
266 /* read range is fully consumed by bad blocks. */
267 if (this_sector + *len <= first_bad + bad_sectors)
271 * final case, bad block range starts before or at the start of our
272 * range but does not cover our entire range so we still return 0 but
273 * update the length with the number of sectors before we get to the
276 *len = first_bad + bad_sectors - this_sector;
281 * Check if read should choose the first rdev.
283 * Balance on the whole device if no resync is going on (recovery is ok) or
284 * below the resync window. Otherwise, take the first readable disk.
286 static inline bool raid1_should_read_first(struct mddev *mddev,
287 sector_t this_sector, int len)
289 if ((mddev->recovery_cp < this_sector + len))
292 if (mddev_is_clustered(mddev) &&
293 md_cluster_ops->area_resyncing(mddev, READ, this_sector,