Lines Matching +full:data +full:- +full:mirror
1 // SPDX-License-Identifier: GPL-2.0-or-later
9 * RAID-1 management functions.
11 * Better read-balancing code written by Mika Kuoppala <[email protected]>, 2000
19 * - bitmap marked during normal i/o
20 * - bitmap used to skip nondirty blocks during sync
22 * Additions to bitmap code, (C) 2003-2004 Paul Clements, SteelEye Technology:
23 * - persistent bitmap code
38 #include "md-bitmap.h"
51 #include "raid1-10.c"
53 #define START(node) ((node)->start)
54 #define LAST(node) ((node)->last)
63 sector_t lo = r1_bio->sector; in check_and_add_serial()
64 sector_t hi = lo + r1_bio->sectors; in check_and_add_serial()
65 struct serial_in_rdev *serial = &rdev->serial[idx]; in check_and_add_serial()
67 spin_lock_irqsave(&serial->serial_lock, flags); in check_and_add_serial()
69 if (raid1_rb_iter_first(&serial->serial_rb, lo, hi)) in check_and_add_serial()
70 ret = -EBUSY; in check_and_add_serial()
72 si->start = lo; in check_and_add_serial()
73 si->last = hi; in check_and_add_serial()
74 raid1_rb_insert(si, &serial->serial_rb); in check_and_add_serial()
76 spin_unlock_irqrestore(&serial->serial_lock, flags); in check_and_add_serial()
83 struct mddev *mddev = rdev->mddev; in wait_for_serialization()
85 int idx = sector_to_idx(r1_bio->sector); in wait_for_serialization()
86 struct serial_in_rdev *serial = &rdev->serial[idx]; in wait_for_serialization()
88 if (WARN_ON(!mddev->serial_info_pool)) in wait_for_serialization()
90 si = mempool_alloc(mddev->serial_info_pool, GFP_NOIO); in wait_for_serialization()
91 wait_event(serial->serial_io_wait, in wait_for_serialization()
100 struct mddev *mddev = rdev->mddev; in remove_serial()
102 struct serial_in_rdev *serial = &rdev->serial[idx]; in remove_serial()
104 spin_lock_irqsave(&serial->serial_lock, flags); in remove_serial()
105 for (si = raid1_rb_iter_first(&serial->serial_rb, lo, hi); in remove_serial()
107 if (si->start == lo && si->last == hi) { in remove_serial()
108 raid1_rb_remove(si, &serial->serial_rb); in remove_serial()
109 mempool_free(si, mddev->serial_info_pool); in remove_serial()
116 spin_unlock_irqrestore(&serial->serial_lock, flags); in remove_serial()
117 wake_up(&serial->serial_io_wait); in remove_serial()
121 * for resync bio, r1bio pointer can be retrieved from the per-bio
126 return get_resync_pages(bio)->raid_bio; in get_resync_r1bio()
129 static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data) in r1bio_pool_alloc() argument
131 struct pool_info *pi = data; in r1bio_pool_alloc()
132 int size = offsetof(struct r1bio, bios[pi->raid_disks]); in r1bio_pool_alloc()
145 static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data) in r1buf_pool_alloc() argument
147 struct pool_info *pi = data; in r1buf_pool_alloc()
158 rps = kmalloc_array(pi->raid_disks, sizeof(struct resync_pages), in r1buf_pool_alloc()
164 * Allocate bios : 1 for reading, n-1 for writing in r1buf_pool_alloc()
166 for (j = pi->raid_disks ; j-- ; ) { in r1buf_pool_alloc()
170 bio_init(bio, NULL, bio->bi_inline_vecs, RESYNC_PAGES, 0); in r1buf_pool_alloc()
171 r1_bio->bios[j] = bio; in r1buf_pool_alloc()
174 * Allocate RESYNC_PAGES data pages and attach them to in r1buf_pool_alloc()
176 * If this is a user-requested check/repair, allocate in r1buf_pool_alloc()
179 if (test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery)) in r1buf_pool_alloc()
180 need_pages = pi->raid_disks; in r1buf_pool_alloc()
183 for (j = 0; j < pi->raid_disks; j++) { in r1buf_pool_alloc()
186 bio = r1_bio->bios[j]; in r1buf_pool_alloc()
196 rp->raid_bio = r1_bio; in r1buf_pool_alloc()
197 bio->bi_private = rp; in r1buf_pool_alloc()
200 r1_bio->master_bio = NULL; in r1buf_pool_alloc()
205 while (--j >= 0) in r1buf_pool_alloc()
209 while (++j < pi->raid_disks) { in r1buf_pool_alloc()
210 bio_uninit(r1_bio->bios[j]); in r1buf_pool_alloc()
211 kfree(r1_bio->bios[j]); in r1buf_pool_alloc()
216 rbio_pool_free(r1_bio, data); in r1buf_pool_alloc()
220 static void r1buf_pool_free(void *__r1_bio, void *data) in r1buf_pool_free() argument
222 struct pool_info *pi = data; in r1buf_pool_free()
227 for (i = pi->raid_disks; i--; ) { in r1buf_pool_free()
228 rp = get_resync_pages(r1bio->bios[i]); in r1buf_pool_free()
230 bio_uninit(r1bio->bios[i]); in r1buf_pool_free()
231 kfree(r1bio->bios[i]); in r1buf_pool_free()
237 rbio_pool_free(r1bio, data); in r1buf_pool_free()
244 for (i = 0; i < conf->raid_disks * 2; i++) { in put_all_bios()
245 struct bio **bio = r1_bio->bios + i; in put_all_bios()
254 struct r1conf *conf = r1_bio->mddev->private; in free_r1bio()
257 mempool_free(r1_bio, &conf->r1bio_pool); in free_r1bio()
262 struct r1conf *conf = r1_bio->mddev->private; in put_buf()
263 sector_t sect = r1_bio->sector; in put_buf()
266 for (i = 0; i < conf->raid_disks * 2; i++) { in put_buf()
267 struct bio *bio = r1_bio->bios[i]; in put_buf()
268 if (bio->bi_end_io) in put_buf()
269 rdev_dec_pending(conf->mirrors[i].rdev, r1_bio->mddev); in put_buf()
272 mempool_free(r1_bio, &conf->r1buf_pool); in put_buf()
280 struct mddev *mddev = r1_bio->mddev; in reschedule_retry()
281 struct r1conf *conf = mddev->private; in reschedule_retry()
284 idx = sector_to_idx(r1_bio->sector); in reschedule_retry()
285 spin_lock_irqsave(&conf->device_lock, flags); in reschedule_retry()
286 list_add(&r1_bio->retry_list, &conf->retry_list); in reschedule_retry()
287 atomic_inc(&conf->nr_queued[idx]); in reschedule_retry()
288 spin_unlock_irqrestore(&conf->device_lock, flags); in reschedule_retry()
290 wake_up(&conf->wait_barrier); in reschedule_retry()
291 md_wakeup_thread(mddev->thread); in reschedule_retry()
301 struct bio *bio = r1_bio->master_bio; in call_bio_endio()
303 if (!test_bit(R1BIO_Uptodate, &r1_bio->state)) in call_bio_endio()
304 bio->bi_status = BLK_STS_IOERR; in call_bio_endio()
311 struct bio *bio = r1_bio->master_bio; in raid_end_bio_io()
312 struct r1conf *conf = r1_bio->mddev->private; in raid_end_bio_io()
313 sector_t sector = r1_bio->sector; in raid_end_bio_io()
316 if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) { in raid_end_bio_io()
317 pr_debug("raid1: sync end %s on sectors %llu-%llu\n", in raid_end_bio_io()
319 (unsigned long long) bio->bi_iter.bi_sector, in raid_end_bio_io()
320 (unsigned long long) bio_end_sector(bio) - 1); in raid_end_bio_io()
328 * to go idle. All I/Os, even write-behind writes, are done. in raid_end_bio_io()
338 struct r1conf *conf = r1_bio->mddev->private; in update_head_pos()
340 conf->mirrors[disk].head_position = in update_head_pos()
341 r1_bio->sector + (r1_bio->sectors); in update_head_pos()
349 int mirror; in find_bio_disk() local
350 struct r1conf *conf = r1_bio->mddev->private; in find_bio_disk()
351 int raid_disks = conf->raid_disks; in find_bio_disk()
353 for (mirror = 0; mirror < raid_disks * 2; mirror++) in find_bio_disk()
354 if (r1_bio->bios[mirror] == bio) in find_bio_disk()
357 BUG_ON(mirror == raid_disks * 2); in find_bio_disk()
358 update_head_pos(mirror, r1_bio); in find_bio_disk()
360 return mirror; in find_bio_disk()
365 int uptodate = !bio->bi_status; in raid1_end_read_request()
366 struct r1bio *r1_bio = bio->bi_private; in raid1_end_read_request()
367 struct r1conf *conf = r1_bio->mddev->private; in raid1_end_read_request()
368 struct md_rdev *rdev = conf->mirrors[r1_bio->read_disk].rdev; in raid1_end_read_request()
371 * this branch is our 'one mirror IO has finished' event handler: in raid1_end_read_request()
373 update_head_pos(r1_bio->read_disk, r1_bio); in raid1_end_read_request()
376 set_bit(R1BIO_Uptodate, &r1_bio->state); in raid1_end_read_request()
377 else if (test_bit(FailFast, &rdev->flags) && in raid1_end_read_request()
378 test_bit(R1BIO_FailFast, &r1_bio->state)) in raid1_end_read_request()
379 /* This was a fail-fast read so we definitely in raid1_end_read_request()
388 spin_lock_irqsave(&conf->device_lock, flags); in raid1_end_read_request()
389 if (r1_bio->mddev->degraded == conf->raid_disks || in raid1_end_read_request()
390 (r1_bio->mddev->degraded == conf->raid_disks-1 && in raid1_end_read_request()
391 test_bit(In_sync, &rdev->flags))) in raid1_end_read_request()
393 spin_unlock_irqrestore(&conf->device_lock, flags); in raid1_end_read_request()
398 rdev_dec_pending(rdev, conf->mddev); in raid1_end_read_request()
404 mdname(conf->mddev), in raid1_end_read_request()
405 rdev->bdev, in raid1_end_read_request()
406 (unsigned long long)r1_bio->sector); in raid1_end_read_request()
407 set_bit(R1BIO_ReadError, &r1_bio->state); in raid1_end_read_request()
415 struct mddev *mddev = r1_bio->mddev; in close_write()
418 if (test_bit(R1BIO_BehindIO, &r1_bio->state)) { in close_write()
419 bio_free_pages(r1_bio->behind_master_bio); in close_write()
420 bio_put(r1_bio->behind_master_bio); in close_write()
421 r1_bio->behind_master_bio = NULL; in close_write()
424 if (test_bit(R1BIO_BehindIO, &r1_bio->state)) in close_write()
425 mddev->bitmap_ops->end_behind_write(mddev); in close_write()
431 if (!atomic_dec_and_test(&r1_bio->remaining)) in r1_bio_write_done()
434 if (test_bit(R1BIO_WriteError, &r1_bio->state)) in r1_bio_write_done()
438 if (test_bit(R1BIO_MadeGood, &r1_bio->state)) in r1_bio_write_done()
447 struct r1bio *r1_bio = bio->bi_private; in raid1_end_write_request()
448 int behind = test_bit(R1BIO_BehindIO, &r1_bio->state); in raid1_end_write_request()
449 struct r1conf *conf = r1_bio->mddev->private; in raid1_end_write_request()
451 int mirror = find_bio_disk(r1_bio, bio); in raid1_end_write_request() local
452 struct md_rdev *rdev = conf->mirrors[mirror].rdev; in raid1_end_write_request()
454 sector_t lo = r1_bio->sector; in raid1_end_write_request()
455 sector_t hi = r1_bio->sector + r1_bio->sectors; in raid1_end_write_request()
457 discard_error = bio->bi_status && bio_op(bio) == REQ_OP_DISCARD; in raid1_end_write_request()
460 * 'one mirror IO has finished' event handler: in raid1_end_write_request()
462 if (bio->bi_status && !discard_error) { in raid1_end_write_request()
463 set_bit(WriteErrorSeen, &rdev->flags); in raid1_end_write_request()
464 if (!test_and_set_bit(WantReplacement, &rdev->flags)) in raid1_end_write_request()
466 conf->mddev->recovery); in raid1_end_write_request()
468 if (test_bit(FailFast, &rdev->flags) && in raid1_end_write_request()
469 (bio->bi_opf & MD_FAILFAST) && in raid1_end_write_request()
471 !test_bit(WriteMostly, &rdev->flags)) { in raid1_end_write_request()
472 md_error(r1_bio->mddev, rdev); in raid1_end_write_request()
479 if (!test_bit(Faulty, &rdev->flags)) in raid1_end_write_request()
480 set_bit(R1BIO_WriteError, &r1_bio->state); in raid1_end_write_request()
483 r1_bio->bios[mirror] = NULL; in raid1_end_write_request()
494 * to user-side. So if something waits for IO, then it in raid1_end_write_request()
497 r1_bio->bios[mirror] = NULL; in raid1_end_write_request()
502 * such device for properly reading the data back (we could in raid1_end_write_request()
504 * before rdev->recovery_offset, but for simplicity we don't in raid1_end_write_request()
507 if (test_bit(In_sync, &rdev->flags) && in raid1_end_write_request()
508 !test_bit(Faulty, &rdev->flags)) in raid1_end_write_request()
509 set_bit(R1BIO_Uptodate, &r1_bio->state); in raid1_end_write_request()
512 if (rdev_has_badblock(rdev, r1_bio->sector, r1_bio->sectors) && in raid1_end_write_request()
514 r1_bio->bios[mirror] = IO_MADE_GOOD; in raid1_end_write_request()
515 set_bit(R1BIO_MadeGood, &r1_bio->state); in raid1_end_write_request()
520 if (test_bit(CollisionCheck, &rdev->flags)) in raid1_end_write_request()
522 if (test_bit(WriteMostly, &rdev->flags)) in raid1_end_write_request()
523 atomic_dec(&r1_bio->behind_remaining); in raid1_end_write_request()
527 * has safely reached all non-writemostly in raid1_end_write_request()
529 * gets done only once -- we don't ever want to return in raid1_end_write_request()
530 * -EIO here, instead we'll wait in raid1_end_write_request()
532 if (atomic_read(&r1_bio->behind_remaining) >= (atomic_read(&r1_bio->remaining)-1) && in raid1_end_write_request()
533 test_bit(R1BIO_Uptodate, &r1_bio->state)) { in raid1_end_write_request()
535 if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) { in raid1_end_write_request()
536 struct bio *mbio = r1_bio->master_bio; in raid1_end_write_request()
538 " %llu-%llu\n", in raid1_end_write_request()
539 (unsigned long long) mbio->bi_iter.bi_sector, in raid1_end_write_request()
540 (unsigned long long) bio_end_sector(mbio) - 1); in raid1_end_write_request()
544 } else if (rdev->mddev->serialize_policy) in raid1_end_write_request()
546 if (r1_bio->bios[mirror] == NULL) in raid1_end_write_request()
547 rdev_dec_pending(rdev, conf->mddev); in raid1_end_write_request()
569 len = round_up(start_sector + 1, BARRIER_UNIT_SECTOR_SIZE) - in align_to_barrier_unit_end()
581 struct raid1_info *info = &conf->mirrors[disk]; in update_read_sectors()
583 atomic_inc(&info->rdev->nr_pending); in update_read_sectors()
584 if (info->next_seq_sect != this_sector) in update_read_sectors()
585 info->seq_start = this_sector; in update_read_sectors()
586 info->next_seq_sect = this_sector + len; in update_read_sectors()
592 sector_t this_sector = r1_bio->sector; in choose_first_rdev()
593 int len = r1_bio->sectors; in choose_first_rdev()
596 for (disk = 0 ; disk < conf->raid_disks * 2 ; disk++) { in choose_first_rdev()
600 if (r1_bio->bios[disk] == IO_BLOCKED) in choose_first_rdev()
603 rdev = conf->mirrors[disk].rdev; in choose_first_rdev()
604 if (!rdev || test_bit(Faulty, &rdev->flags)) in choose_first_rdev()
616 return -1; in choose_first_rdev()
621 return !test_bit(In_sync, &rdev->flags) && in rdev_in_recovery()
622 rdev->recovery_offset < r1_bio->sector + r1_bio->sectors; in rdev_in_recovery()
628 sector_t this_sector = r1_bio->sector; in choose_bb_rdev()
629 int best_disk = -1; in choose_bb_rdev()
633 for (disk = 0 ; disk < conf->raid_disks * 2 ; disk++) { in choose_bb_rdev()
638 if (r1_bio->bios[disk] == IO_BLOCKED) in choose_bb_rdev()
641 rdev = conf->mirrors[disk].rdev; in choose_bb_rdev()
642 if (!rdev || test_bit(Faulty, &rdev->flags) || in choose_bb_rdev()
644 test_bit(WriteMostly, &rdev->flags)) in choose_bb_rdev()
648 len = r1_bio->sectors; in choose_bb_rdev()
656 if (best_disk != -1) { in choose_bb_rdev()
667 sector_t this_sector = r1_bio->sector; in choose_slow_rdev()
668 int bb_disk = -1; in choose_slow_rdev()
672 for (disk = 0 ; disk < conf->raid_disks * 2 ; disk++) { in choose_slow_rdev()
677 if (r1_bio->bios[disk] == IO_BLOCKED) in choose_slow_rdev()
680 rdev = conf->mirrors[disk].rdev; in choose_slow_rdev()
681 if (!rdev || test_bit(Faulty, &rdev->flags) || in choose_slow_rdev()
682 !test_bit(WriteMostly, &rdev->flags) || in choose_slow_rdev()
687 len = r1_bio->sectors; in choose_slow_rdev()
689 if (read_len == r1_bio->sectors) { in choose_slow_rdev()
705 if (bb_disk != -1) { in choose_slow_rdev()
716 return conf->mirrors[disk].next_seq_sect == r1_bio->sector || in is_sequential()
717 conf->mirrors[disk].head_position == r1_bio->sector; in is_sequential()
726 struct raid1_info *mirror = &conf->mirrors[disk]; in should_choose_next() local
729 if (!test_bit(Nonrot, &mirror->rdev->flags)) in should_choose_next()
732 opt_iosize = bdev_io_opt(mirror->rdev->bdev) >> 9; in should_choose_next()
733 return opt_iosize > 0 && mirror->seq_start != MaxSector && in should_choose_next()
734 mirror->next_seq_sect > opt_iosize && in should_choose_next()
735 mirror->next_seq_sect - opt_iosize >= mirror->seq_start; in should_choose_next()
740 if (!rdev || test_bit(Faulty, &rdev->flags)) in rdev_readable()
747 if (test_bit(WriteMostly, &rdev->flags)) in rdev_readable()
751 if (rdev_has_badblock(rdev, r1_bio->sector, r1_bio->sectors)) in rdev_readable()
770 .closest_dist_disk = -1, in choose_best_rdev()
772 .min_pending_disk = -1, in choose_best_rdev()
774 .sequential_disk = -1, in choose_best_rdev()
777 for (disk = 0 ; disk < conf->raid_disks * 2 ; disk++) { in choose_best_rdev()
782 if (r1_bio->bios[disk] == IO_BLOCKED) in choose_best_rdev()
785 rdev = conf->mirrors[disk].rdev; in choose_best_rdev()
791 set_bit(R1BIO_FailFast, &r1_bio->state); in choose_best_rdev()
793 pending = atomic_read(&rdev->nr_pending); in choose_best_rdev()
794 dist = abs(r1_bio->sector - conf->mirrors[disk].head_position); in choose_best_rdev()
828 if (ctl.sequential_disk != -1 && ctl.min_pending != 0) in choose_best_rdev()
833 * non-rotational, choose the disk with less pending request even the in choose_best_rdev()
835 * mixed ratation/non-rotational disks depending on workload. in choose_best_rdev()
837 if (ctl.min_pending_disk != -1 && in choose_best_rdev()
838 (READ_ONCE(conf->nonrot_disks) || ctl.min_pending == 0)) in choose_best_rdev()
867 clear_bit(R1BIO_FailFast, &r1_bio->state); in read_balance()
869 if (raid1_should_read_first(conf->mddev, r1_bio->sector, in read_balance()
870 r1_bio->sectors)) in read_balance()
875 *max_sectors = r1_bio->sectors; in read_balance()
876 update_read_sectors(conf, disk, r1_bio->sector, in read_balance()
877 r1_bio->sectors); in read_balance()
895 if (wq_has_sleeper(&conf->wait_barrier)) in wake_up_barrier()
896 wake_up(&conf->wait_barrier); in wake_up_barrier()
902 raid1_prepare_flush_writes(conf->mddev); in flush_bio_list()
906 struct bio *next = bio->bi_next; in flush_bio_list()
919 spin_lock_irq(&conf->device_lock); in flush_pending_writes()
921 if (conf->pending_bio_list.head) { in flush_pending_writes()
925 bio = bio_list_get(&conf->pending_bio_list); in flush_pending_writes()
926 spin_unlock_irq(&conf->device_lock); in flush_pending_writes()
930 * current->state might be TASK_UNINTERRUPTIBLE which will in flush_pending_writes()
934 * is a false-positive. Silence the warning by resetting in flush_pending_writes()
942 spin_unlock_irq(&conf->device_lock); in flush_pending_writes()
954 * We choose only to raise the barrier if no-one is waiting for the
966 * If resync/recovery is interrupted, returns -EINTR;
973 spin_lock_irq(&conf->resync_lock); in raise_barrier()
976 wait_event_lock_irq(conf->wait_barrier, in raise_barrier()
977 !atomic_read(&conf->nr_waiting[idx]), in raise_barrier()
978 conf->resync_lock); in raise_barrier()
981 atomic_inc(&conf->barrier[idx]); in raise_barrier()
983 * In raise_barrier() we firstly increase conf->barrier[idx] then in raise_barrier()
984 * check conf->nr_pending[idx]. In _wait_barrier() we firstly in raise_barrier()
985 * increase conf->nr_pending[idx] then check conf->barrier[idx]. in raise_barrier()
986 * A memory barrier here to make sure conf->nr_pending[idx] won't in raise_barrier()
987 * be fetched before conf->barrier[idx] is increased. Otherwise in raise_barrier()
994 * B: while conf->nr_pending[idx] is not 0, meaning regular I/O in raise_barrier()
996 * C: while conf->barrier[idx] >= RESYNC_DEPTH, meaning reaches in raise_barrier()
999 wait_event_lock_irq(conf->wait_barrier, in raise_barrier()
1000 (!conf->array_frozen && in raise_barrier()
1001 !atomic_read(&conf->nr_pending[idx]) && in raise_barrier()
1002 atomic_read(&conf->barrier[idx]) < RESYNC_DEPTH) || in raise_barrier()
1003 test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery), in raise_barrier()
1004 conf->resync_lock); in raise_barrier()
1006 if (test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) { in raise_barrier()
1007 atomic_dec(&conf->barrier[idx]); in raise_barrier()
1008 spin_unlock_irq(&conf->resync_lock); in raise_barrier()
1009 wake_up(&conf->wait_barrier); in raise_barrier()
1010 return -EINTR; in raise_barrier()
1013 atomic_inc(&conf->nr_sync_pending); in raise_barrier()
1014 spin_unlock_irq(&conf->resync_lock); in raise_barrier()
1023 BUG_ON(atomic_read(&conf->barrier[idx]) <= 0); in lower_barrier()
1025 atomic_dec(&conf->barrier[idx]); in lower_barrier()
1026 atomic_dec(&conf->nr_sync_pending); in lower_barrier()
1027 wake_up(&conf->wait_barrier); in lower_barrier()
1035 * We need to increase conf->nr_pending[idx] very early here, in _wait_barrier()
1037 * conf->nr_pending[idx] to be 0. Then we can avoid holding in _wait_barrier()
1038 * conf->resync_lock when there is no barrier raised in same in _wait_barrier()
1042 atomic_inc(&conf->nr_pending[idx]); in _wait_barrier()
1044 * In _wait_barrier() we firstly increase conf->nr_pending[idx], then in _wait_barrier()
1045 * check conf->barrier[idx]. In raise_barrier() we firstly increase in _wait_barrier()
1046 * conf->barrier[idx], then check conf->nr_pending[idx]. A memory in _wait_barrier()
1047 * barrier is necessary here to make sure conf->barrier[idx] won't be in _wait_barrier()
1048 * fetched before conf->nr_pending[idx] is increased. Otherwise there in _wait_barrier()
1055 * here. If during we check conf->barrier[idx], the array is in _wait_barrier()
1056 * frozen (conf->array_frozen is 1), and chonf->barrier[idx] is in _wait_barrier()
1062 if (!READ_ONCE(conf->array_frozen) && in _wait_barrier()
1063 !atomic_read(&conf->barrier[idx])) in _wait_barrier()
1067 * After holding conf->resync_lock, conf->nr_pending[idx] in _wait_barrier()
1070 * raise_barrer() might be waiting for conf->nr_pending[idx] in _wait_barrier()
1073 spin_lock_irq(&conf->resync_lock); in _wait_barrier()
1074 atomic_inc(&conf->nr_waiting[idx]); in _wait_barrier()
1075 atomic_dec(&conf->nr_pending[idx]); in _wait_barrier()
1087 wait_event_lock_irq(conf->wait_barrier, in _wait_barrier()
1088 !conf->array_frozen && in _wait_barrier()
1089 !atomic_read(&conf->barrier[idx]), in _wait_barrier()
1090 conf->resync_lock); in _wait_barrier()
1091 atomic_inc(&conf->nr_pending[idx]); in _wait_barrier()
1094 atomic_dec(&conf->nr_waiting[idx]); in _wait_barrier()
1095 spin_unlock_irq(&conf->resync_lock); in _wait_barrier()
1109 * conf->barrier[idx] here, memory barrier is unnecessary as well. in wait_read_barrier()
1111 atomic_inc(&conf->nr_pending[idx]); in wait_read_barrier()
1113 if (!READ_ONCE(conf->array_frozen)) in wait_read_barrier()
1116 spin_lock_irq(&conf->resync_lock); in wait_read_barrier()
1117 atomic_inc(&conf->nr_waiting[idx]); in wait_read_barrier()
1118 atomic_dec(&conf->nr_pending[idx]); in wait_read_barrier()
1131 wait_event_lock_irq(conf->wait_barrier, in wait_read_barrier()
1132 !conf->array_frozen, in wait_read_barrier()
1133 conf->resync_lock); in wait_read_barrier()
1134 atomic_inc(&conf->nr_pending[idx]); in wait_read_barrier()
1137 atomic_dec(&conf->nr_waiting[idx]); in wait_read_barrier()
1138 spin_unlock_irq(&conf->resync_lock); in wait_read_barrier()
1151 atomic_dec(&conf->nr_pending[idx]); in _allow_barrier()
1162 /* conf->resync_lock should be held */
1167 ret = atomic_read(&conf->nr_sync_pending); in get_unqueued_pending()
1169 ret += atomic_read(&conf->nr_pending[idx]) - in get_unqueued_pending()
1170 atomic_read(&conf->nr_queued[idx]); in get_unqueued_pending()
1188 * Every flying I/O contributes to a conf->nr_pending[idx], idx is the in freeze_array()
1190 * normal I/O are queued, sum of all conf->nr_pending[] will match sum in freeze_array()
1191 * of all conf->nr_queued[]. But normal I/O failure is an exception, in freeze_array()
1200 spin_lock_irq(&conf->resync_lock); in freeze_array()
1201 conf->array_frozen = 1; in freeze_array()
1202 mddev_add_trace_msg(conf->mddev, "raid1 wait freeze"); in freeze_array()
1204 conf->wait_barrier, in freeze_array()
1206 conf->resync_lock, in freeze_array()
1208 spin_unlock_irq(&conf->resync_lock); in freeze_array()
1213 spin_lock_irq(&conf->resync_lock); in unfreeze_array()
1214 conf->array_frozen = 0; in unfreeze_array()
1215 spin_unlock_irq(&conf->resync_lock); in unfreeze_array()
1216 wake_up(&conf->wait_barrier); in unfreeze_array()
1222 int size = bio->bi_iter.bi_size; in alloc_behind_master_bio()
1223 unsigned vcnt = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; in alloc_behind_master_bio()
1228 &r1_bio->mddev->bio_set); in alloc_behind_master_bio()
1232 behind_bio->bi_iter.bi_size = size; in alloc_behind_master_bio()
1249 size -= len; in alloc_behind_master_bio()
1255 r1_bio->behind_master_bio = behind_bio; in alloc_behind_master_bio()
1256 set_bit(R1BIO_BehindIO, &r1_bio->state); in alloc_behind_master_bio()
1262 bio->bi_iter.bi_size); in alloc_behind_master_bio()
1271 struct mddev *mddev = plug->cb.data; in raid1_unplug()
1272 struct r1conf *conf = mddev->private; in raid1_unplug()
1276 spin_lock_irq(&conf->device_lock); in raid1_unplug()
1277 bio_list_merge(&conf->pending_bio_list, &plug->pending); in raid1_unplug()
1278 spin_unlock_irq(&conf->device_lock); in raid1_unplug()
1280 md_wakeup_thread(mddev->thread); in raid1_unplug()
1285 /* we aren't scheduling, so we can do the write-out directly. */ in raid1_unplug()
1286 bio = bio_list_get(&plug->pending); in raid1_unplug()
1293 r1_bio->master_bio = bio; in init_r1bio()
1294 r1_bio->sectors = bio_sectors(bio); in init_r1bio()
1295 r1_bio->state = 0; in init_r1bio()
1296 r1_bio->mddev = mddev; in init_r1bio()
1297 r1_bio->sector = bio->bi_iter.bi_sector; in init_r1bio()
1303 struct r1conf *conf = mddev->private; in alloc_r1bio()
1306 r1_bio = mempool_alloc(&conf->r1bio_pool, GFP_NOIO); in alloc_r1bio()
1308 memset(r1_bio->bios, 0, conf->raid_disks * sizeof(r1_bio->bios[0])); in alloc_r1bio()
1316 struct r1conf *conf = mddev->private; in raid1_read_request()
1317 struct raid1_info *mirror; in raid1_read_request() local
1334 if (!wait_read_barrier(conf, bio->bi_iter.bi_sector, in raid1_read_request()
1335 bio->bi_opf & REQ_NOWAIT)) { in raid1_read_request()
1344 r1_bio->sectors = max_read_sectors; in raid1_read_request()
1347 * make_request() can abort the operation when read-ahead is being in raid1_read_request()
1356 conf->mirrors[r1_bio->read_disk].rdev->bdev, in raid1_read_request()
1357 r1_bio->sector); in raid1_read_request()
1361 mirror = conf->mirrors + rdisk; in raid1_read_request()
1364 pr_info_ratelimited("md/raid1:%s: redirecting sector %llu to other mirror: %pg\n", in raid1_read_request()
1366 (unsigned long long)r1_bio->sector, in raid1_read_request()
1367 mirror->rdev->bdev); in raid1_read_request()
1369 if (test_bit(WriteMostly, &mirror->rdev->flags)) { in raid1_read_request()
1371 * Reading from a write-mostly device must take care not to in raid1_read_request()
1372 * over-take any writes that are 'behind' in raid1_read_request()
1375 mddev->bitmap_ops->wait_behind_writes(mddev); in raid1_read_request()
1380 gfp, &conf->bio_split); in raid1_read_request()
1389 r1_bio->master_bio = bio; in raid1_read_request()
1390 r1_bio->sectors = max_sectors; in raid1_read_request()
1393 r1_bio->read_disk = rdisk; in raid1_read_request()
1396 r1_bio->master_bio = bio; in raid1_read_request()
1398 read_bio = bio_alloc_clone(mirror->rdev->bdev, bio, gfp, in raid1_read_request()
1399 &mddev->bio_set); in raid1_read_request()
1401 r1_bio->bios[rdisk] = read_bio; in raid1_read_request()
1403 read_bio->bi_iter.bi_sector = r1_bio->sector + in raid1_read_request()
1404 mirror->rdev->data_offset; in raid1_read_request()
1405 read_bio->bi_end_io = raid1_end_read_request; in raid1_read_request()
1406 if (test_bit(FailFast, &mirror->rdev->flags) && in raid1_read_request()
1407 test_bit(R1BIO_FailFast, &r1_bio->state)) in raid1_read_request()
1408 read_bio->bi_opf |= MD_FAILFAST; in raid1_read_request()
1409 read_bio->bi_private = r1_bio; in raid1_read_request()
1410 mddev_trace_remap(mddev, read_bio, r1_bio->sector); in raid1_read_request()
1415 atomic_dec(&mirror->rdev->nr_pending); in raid1_read_request()
1416 bio->bi_status = errno_to_blk_status(error); in raid1_read_request()
1417 set_bit(R1BIO_Uptodate, &r1_bio->state); in raid1_read_request()
1423 struct r1conf *conf = mddev->private; in wait_blocked_rdev()
1424 int disks = conf->raid_disks * 2; in wait_blocked_rdev()
1429 struct md_rdev *rdev = conf->mirrors[i].rdev; in wait_blocked_rdev()
1435 if (test_bit(WriteErrorSeen, &rdev->flags) && in wait_blocked_rdev()
1436 rdev_has_badblock(rdev, bio->bi_iter.bi_sector, in wait_blocked_rdev()
1438 set_bit(BlockedBadBlocks, &rdev->flags); in wait_blocked_rdev()
1441 if (bio->bi_opf & REQ_NOWAIT) in wait_blocked_rdev()
1444 mddev_add_trace_msg(rdev->mddev, "raid1 wait rdev %d blocked", in wait_blocked_rdev()
1445 rdev->raid_disk); in wait_blocked_rdev()
1446 atomic_inc(&rdev->nr_pending); in wait_blocked_rdev()
1447 md_wait_for_blocked_rdev(rdev, rdev->mddev); in wait_blocked_rdev()
1458 struct r1conf *conf = mddev->private; in raid1_write_request()
1468 md_cluster_ops->area_resyncing(mddev, WRITE, in raid1_write_request()
1469 bio->bi_iter.bi_sector, bio_end_sector(bio))) { in raid1_write_request()
1472 if (bio->bi_opf & REQ_NOWAIT) { in raid1_write_request()
1477 prepare_to_wait(&conf->wait_barrier, in raid1_write_request()
1479 if (!md_cluster_ops->area_resyncing(mddev, WRITE, in raid1_write_request()
1480 bio->bi_iter.bi_sector, in raid1_write_request()
1485 finish_wait(&conf->wait_barrier, &w); in raid1_write_request()
1493 if (!wait_barrier(conf, bio->bi_iter.bi_sector, in raid1_write_request()
1494 bio->bi_opf & REQ_NOWAIT)) { in raid1_write_request()
1505 r1_bio->sectors = max_write_sectors; in raid1_write_request()
1518 disks = conf->raid_disks * 2; in raid1_write_request()
1519 max_sectors = r1_bio->sectors; in raid1_write_request()
1521 struct md_rdev *rdev = conf->mirrors[i].rdev; in raid1_write_request()
1524 * The write-behind io is only attempted on drives marked as in raid1_write_request()
1525 * write-mostly, which means we could allocate write behind in raid1_write_request()
1528 if (!is_discard && rdev && test_bit(WriteMostly, &rdev->flags)) in raid1_write_request()
1531 r1_bio->bios[i] = NULL; in raid1_write_request()
1532 if (!rdev || test_bit(Faulty, &rdev->flags)) in raid1_write_request()
1535 atomic_inc(&rdev->nr_pending); in raid1_write_request()
1536 if (test_bit(WriteErrorSeen, &rdev->flags)) { in raid1_write_request()
1541 is_bad = is_badblock(rdev, r1_bio->sector, max_sectors, in raid1_write_request()
1543 if (is_bad && first_bad <= r1_bio->sector) { in raid1_write_request()
1545 bad_sectors -= (r1_bio->sector - first_bad); in raid1_write_request()
1564 if (bio->bi_opf & REQ_ATOMIC) { in raid1_write_request()
1565 error = -EIO; in raid1_write_request()
1569 good_sectors = first_bad - r1_bio->sector; in raid1_write_request()
1574 r1_bio->bios[i] = bio; in raid1_write_request()
1579 * alloc_behind_master_bio allocates a copy of the data payload a page in raid1_write_request()
1583 if (write_behind && mddev->bitmap) in raid1_write_request()
1588 GFP_NOIO, &conf->bio_split); in raid1_write_request()
1597 r1_bio->master_bio = bio; in raid1_write_request()
1598 r1_bio->sectors = max_sectors; in raid1_write_request()
1602 r1_bio->master_bio = bio; in raid1_write_request()
1603 atomic_set(&r1_bio->remaining, 1); in raid1_write_request()
1604 atomic_set(&r1_bio->behind_remaining, 0); in raid1_write_request()
1610 struct md_rdev *rdev = conf->mirrors[i].rdev; in raid1_write_request()
1611 if (!r1_bio->bios[i]) in raid1_write_request()
1616 mddev->bitmap_info.max_write_behind; in raid1_write_request()
1624 err = mddev->bitmap_ops->get_stats(mddev->bitmap, &stats); in raid1_write_request()
1629 if (test_bit(R1BIO_BehindIO, &r1_bio->state)) in raid1_write_request()
1630 mddev->bitmap_ops->start_behind_write(mddev); in raid1_write_request()
1634 if (r1_bio->behind_master_bio) { in raid1_write_request()
1635 mbio = bio_alloc_clone(rdev->bdev, in raid1_write_request()
1636 r1_bio->behind_master_bio, in raid1_write_request()
1637 GFP_NOIO, &mddev->bio_set); in raid1_write_request()
1638 if (test_bit(CollisionCheck, &rdev->flags)) in raid1_write_request()
1640 if (test_bit(WriteMostly, &rdev->flags)) in raid1_write_request()
1641 atomic_inc(&r1_bio->behind_remaining); in raid1_write_request()
1643 mbio = bio_alloc_clone(rdev->bdev, bio, GFP_NOIO, in raid1_write_request()
1644 &mddev->bio_set); in raid1_write_request()
1646 if (mddev->serialize_policy) in raid1_write_request()
1650 r1_bio->bios[i] = mbio; in raid1_write_request()
1652 mbio->bi_iter.bi_sector = (r1_bio->sector + rdev->data_offset); in raid1_write_request()
1653 mbio->bi_end_io = raid1_end_write_request; in raid1_write_request()
1654 if (test_bit(FailFast, &rdev->flags) && in raid1_write_request()
1655 !test_bit(WriteMostly, &rdev->flags) && in raid1_write_request()
1656 conf->raid_disks - mddev->degraded > 1) in raid1_write_request()
1657 mbio->bi_opf |= MD_FAILFAST; in raid1_write_request()
1658 mbio->bi_private = r1_bio; in raid1_write_request()
1660 atomic_inc(&r1_bio->remaining); in raid1_write_request()
1661 mddev_trace_remap(mddev, mbio, r1_bio->sector); in raid1_write_request()
1663 mbio->bi_bdev = (void *)rdev; in raid1_write_request()
1665 spin_lock_irqsave(&conf->device_lock, flags); in raid1_write_request()
1666 bio_list_add(&conf->pending_bio_list, mbio); in raid1_write_request()
1667 spin_unlock_irqrestore(&conf->device_lock, flags); in raid1_write_request()
1668 md_wakeup_thread(mddev->thread); in raid1_write_request()
1679 if (r1_bio->bios[k]) { in raid1_write_request()
1680 rdev_dec_pending(conf->mirrors[k].rdev, mddev); in raid1_write_request()
1681 r1_bio->bios[k] = NULL; in raid1_write_request()
1685 bio->bi_status = errno_to_blk_status(error); in raid1_write_request()
1686 set_bit(R1BIO_Uptodate, &r1_bio->state); in raid1_write_request()
1694 if (unlikely(bio->bi_opf & REQ_PREFLUSH) in raid1_make_request()
1706 bio->bi_iter.bi_sector, bio_sectors(bio)); in raid1_make_request()
1719 struct r1conf *conf = mddev->private; in raid1_status()
1722 lockdep_assert_held(&mddev->lock); in raid1_status()
1724 seq_printf(seq, " [%d/%d] [", conf->raid_disks, in raid1_status()
1725 conf->raid_disks - mddev->degraded); in raid1_status()
1726 for (i = 0; i < conf->raid_disks; i++) { in raid1_status()
1727 struct md_rdev *rdev = READ_ONCE(conf->mirrors[i].rdev); in raid1_status()
1730 rdev && test_bit(In_sync, &rdev->flags) ? "U" : "_"); in raid1_status()
1736 * raid1_error() - RAID1 error handler.
1742 * - &MD_BROKEN flag is set in &mddev->flags.
1743 * - recovery is disabled.
1745 * - recovery is interrupted.
1746 * - &mddev->degraded is bumped.
1749 * &mddev->fail_last_dev is off.
1753 struct r1conf *conf = mddev->private; in raid1_error()
1756 spin_lock_irqsave(&conf->device_lock, flags); in raid1_error()
1758 if (test_bit(In_sync, &rdev->flags) && in raid1_error()
1759 (conf->raid_disks - mddev->degraded) == 1) { in raid1_error()
1760 set_bit(MD_BROKEN, &mddev->flags); in raid1_error()
1762 if (!mddev->fail_last_dev) { in raid1_error()
1763 conf->recovery_disabled = mddev->recovery_disabled; in raid1_error()
1764 spin_unlock_irqrestore(&conf->device_lock, flags); in raid1_error()
1768 set_bit(Blocked, &rdev->flags); in raid1_error()
1769 if (test_and_clear_bit(In_sync, &rdev->flags)) in raid1_error()
1770 mddev->degraded++; in raid1_error()
1771 set_bit(Faulty, &rdev->flags); in raid1_error()
1772 spin_unlock_irqrestore(&conf->device_lock, flags); in raid1_error()
1776 set_bit(MD_RECOVERY_INTR, &mddev->recovery); in raid1_error()
1777 set_mask_bits(&mddev->sb_flags, 0, in raid1_error()
1781 mdname(mddev), rdev->bdev, in raid1_error()
1782 mdname(mddev), conf->raid_disks - mddev->degraded); in raid1_error()
1794 pr_debug(" --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded, in print_conf()
1795 conf->raid_disks); in print_conf()
1797 lockdep_assert_held(&conf->mddev->reconfig_mutex); in print_conf()
1798 for (i = 0; i < conf->raid_disks; i++) { in print_conf()
1799 struct md_rdev *rdev = conf->mirrors[i].rdev; in print_conf()
1802 i, !test_bit(In_sync, &rdev->flags), in print_conf()
1803 !test_bit(Faulty, &rdev->flags), in print_conf()
1804 rdev->bdev); in print_conf()
1817 mempool_exit(&conf->r1buf_pool); in close_sync()
1823 struct r1conf *conf = mddev->private; in raid1_spare_active()
1832 * which expects 'In_sync' flags and ->degraded to be consistent. in raid1_spare_active()
1834 spin_lock_irqsave(&conf->device_lock, flags); in raid1_spare_active()
1835 for (i = 0; i < conf->raid_disks; i++) { in raid1_spare_active()
1836 struct md_rdev *rdev = conf->mirrors[i].rdev; in raid1_spare_active()
1837 struct md_rdev *repl = conf->mirrors[conf->raid_disks + i].rdev; in raid1_spare_active()
1839 && !test_bit(Candidate, &repl->flags) in raid1_spare_active()
1840 && repl->recovery_offset == MaxSector in raid1_spare_active()
1841 && !test_bit(Faulty, &repl->flags) in raid1_spare_active()
1842 && !test_and_set_bit(In_sync, &repl->flags)) { in raid1_spare_active()
1845 !test_and_clear_bit(In_sync, &rdev->flags)) in raid1_spare_active()
1850 * it gets removed and never re-added in raid1_spare_active()
1852 set_bit(Faulty, &rdev->flags); in raid1_spare_active()
1854 rdev->sysfs_state); in raid1_spare_active()
1858 && rdev->recovery_offset == MaxSector in raid1_spare_active()
1859 && !test_bit(Faulty, &rdev->flags) in raid1_spare_active()
1860 && !test_and_set_bit(In_sync, &rdev->flags)) { in raid1_spare_active()
1862 sysfs_notify_dirent_safe(rdev->sysfs_state); in raid1_spare_active()
1865 mddev->degraded -= count; in raid1_spare_active()
1866 spin_unlock_irqrestore(&conf->device_lock, flags); in raid1_spare_active()
1875 struct raid1_info *info = conf->mirrors + disk; in raid1_add_conf()
1878 info += conf->raid_disks; in raid1_add_conf()
1880 if (info->rdev) in raid1_add_conf()
1883 if (bdev_nonrot(rdev->bdev)) { in raid1_add_conf()
1884 set_bit(Nonrot, &rdev->flags); in raid1_add_conf()
1885 WRITE_ONCE(conf->nonrot_disks, conf->nonrot_disks + 1); in raid1_add_conf()
1888 rdev->raid_disk = disk; in raid1_add_conf()
1889 info->head_position = 0; in raid1_add_conf()
1890 info->seq_start = MaxSector; in raid1_add_conf()
1891 WRITE_ONCE(info->rdev, rdev); in raid1_add_conf()
1898 struct raid1_info *info = conf->mirrors + disk; in raid1_remove_conf()
1899 struct md_rdev *rdev = info->rdev; in raid1_remove_conf()
1901 if (!rdev || test_bit(In_sync, &rdev->flags) || in raid1_remove_conf()
1902 atomic_read(&rdev->nr_pending)) in raid1_remove_conf()
1905 /* Only remove non-faulty devices if recovery is not possible. */ in raid1_remove_conf()
1906 if (!test_bit(Faulty, &rdev->flags) && in raid1_remove_conf()
1907 rdev->mddev->recovery_disabled != conf->recovery_disabled && in raid1_remove_conf()
1908 rdev->mddev->degraded < conf->raid_disks) in raid1_remove_conf()
1911 if (test_and_clear_bit(Nonrot, &rdev->flags)) in raid1_remove_conf()
1912 WRITE_ONCE(conf->nonrot_disks, conf->nonrot_disks - 1); in raid1_remove_conf()
1914 WRITE_ONCE(info->rdev, NULL); in raid1_remove_conf()
1920 struct r1conf *conf = mddev->private; in raid1_add_disk()
1921 int err = -EEXIST; in raid1_add_disk()
1922 int mirror = 0, repl_slot = -1; in raid1_add_disk() local
1925 int last = conf->raid_disks - 1; in raid1_add_disk()
1927 if (mddev->recovery_disabled == conf->recovery_disabled) in raid1_add_disk()
1928 return -EBUSY; in raid1_add_disk()
1930 if (rdev->raid_disk >= 0) in raid1_add_disk()
1931 first = last = rdev->raid_disk; in raid1_add_disk()
1934 * find the disk ... but prefer rdev->saved_raid_disk in raid1_add_disk()
1937 if (rdev->saved_raid_disk >= 0 && in raid1_add_disk()
1938 rdev->saved_raid_disk >= first && in raid1_add_disk()
1939 rdev->saved_raid_disk < conf->raid_disks && in raid1_add_disk()
1940 conf->mirrors[rdev->saved_raid_disk].rdev == NULL) in raid1_add_disk()
1941 first = last = rdev->saved_raid_disk; in raid1_add_disk()
1943 for (mirror = first; mirror <= last; mirror++) { in raid1_add_disk()
1944 p = conf->mirrors + mirror; in raid1_add_disk()
1945 if (!p->rdev) { in raid1_add_disk()
1950 raid1_add_conf(conf, rdev, mirror, false); in raid1_add_disk()
1954 if (rdev->saved_raid_disk < 0) in raid1_add_disk()
1955 conf->fullsync = 1; in raid1_add_disk()
1958 if (test_bit(WantReplacement, &p->rdev->flags) && in raid1_add_disk()
1959 p[conf->raid_disks].rdev == NULL && repl_slot < 0) in raid1_add_disk()
1960 repl_slot = mirror; in raid1_add_disk()
1965 clear_bit(In_sync, &rdev->flags); in raid1_add_disk()
1966 set_bit(Replacement, &rdev->flags); in raid1_add_disk()
1969 conf->fullsync = 1; in raid1_add_disk()
1978 struct r1conf *conf = mddev->private; in raid1_remove_disk()
1980 int number = rdev->raid_disk; in raid1_remove_disk()
1981 struct raid1_info *p = conf->mirrors + number; in raid1_remove_disk()
1983 if (unlikely(number >= conf->raid_disks)) in raid1_remove_disk()
1986 if (rdev != p->rdev) { in raid1_remove_disk()
1987 number += conf->raid_disks; in raid1_remove_disk()
1988 p = conf->mirrors + number; in raid1_remove_disk()
1992 if (rdev == p->rdev) { in raid1_remove_disk()
1994 err = -EBUSY; in raid1_remove_disk()
1998 if (number < conf->raid_disks && in raid1_remove_disk()
1999 conf->mirrors[conf->raid_disks + number].rdev) { in raid1_remove_disk()
2005 conf->mirrors[conf->raid_disks + number].rdev; in raid1_remove_disk()
2007 if (atomic_read(&repl->nr_pending)) { in raid1_remove_disk()
2014 err = -EBUSY; in raid1_remove_disk()
2018 clear_bit(Replacement, &repl->flags); in raid1_remove_disk()
2019 WRITE_ONCE(p->rdev, repl); in raid1_remove_disk()
2020 conf->mirrors[conf->raid_disks + number].rdev = NULL; in raid1_remove_disk()
2024 clear_bit(WantReplacement, &rdev->flags); in raid1_remove_disk()
2037 update_head_pos(r1_bio->read_disk, r1_bio); in end_sync_read()
2040 * we have read a block, now it needs to be re-written, in end_sync_read()
2041 * or re-read if the read failed. in end_sync_read()
2044 if (!bio->bi_status) in end_sync_read()
2045 set_bit(R1BIO_Uptodate, &r1_bio->state); in end_sync_read()
2047 if (atomic_dec_and_test(&r1_bio->remaining)) in end_sync_read()
2054 sector_t s = r1_bio->sector; in abort_sync_write()
2055 long sectors_to_go = r1_bio->sectors; in abort_sync_write()
2059 mddev->bitmap_ops->end_sync(mddev, s, &sync_blocks); in abort_sync_write()
2061 sectors_to_go -= sync_blocks; in abort_sync_write()
2067 if (atomic_dec_and_test(&r1_bio->remaining)) { in put_sync_write_buf()
2068 struct mddev *mddev = r1_bio->mddev; in put_sync_write_buf()
2069 int s = r1_bio->sectors; in put_sync_write_buf()
2071 if (test_bit(R1BIO_MadeGood, &r1_bio->state) || in put_sync_write_buf()
2072 test_bit(R1BIO_WriteError, &r1_bio->state)) in put_sync_write_buf()
2083 int uptodate = !bio->bi_status; in end_sync_write()
2085 struct mddev *mddev = r1_bio->mddev; in end_sync_write()
2086 struct r1conf *conf = mddev->private; in end_sync_write()
2087 struct md_rdev *rdev = conf->mirrors[find_bio_disk(r1_bio, bio)].rdev; in end_sync_write()
2091 set_bit(WriteErrorSeen, &rdev->flags); in end_sync_write()
2092 if (!test_and_set_bit(WantReplacement, &rdev->flags)) in end_sync_write()
2094 mddev->recovery); in end_sync_write()
2095 set_bit(R1BIO_WriteError, &r1_bio->state); in end_sync_write()
2096 } else if (rdev_has_badblock(rdev, r1_bio->sector, r1_bio->sectors) && in end_sync_write()
2097 !rdev_has_badblock(conf->mirrors[r1_bio->read_disk].rdev, in end_sync_write()
2098 r1_bio->sector, r1_bio->sectors)) { in end_sync_write()
2099 set_bit(R1BIO_MadeGood, &r1_bio->state); in end_sync_write()
2112 set_bit(WriteErrorSeen, &rdev->flags); in r1_sync_page_io()
2114 &rdev->flags)) in r1_sync_page_io()
2116 rdev->mddev->recovery); in r1_sync_page_io()
2118 /* need to record an error - either for the block or the device */ in r1_sync_page_io()
2120 md_error(rdev->mddev, rdev); in r1_sync_page_io()
2127 * good data, much like with normal read errors. Only in fix_sync_read_error()
2129 * need to re-issue the read request. in fix_sync_read_error()
2137 struct mddev *mddev = r1_bio->mddev; in fix_sync_read_error()
2138 struct r1conf *conf = mddev->private; in fix_sync_read_error()
2139 struct bio *bio = r1_bio->bios[r1_bio->read_disk]; in fix_sync_read_error()
2140 struct page **pages = get_resync_pages(bio)->pages; in fix_sync_read_error()
2141 sector_t sect = r1_bio->sector; in fix_sync_read_error()
2142 int sectors = r1_bio->sectors; in fix_sync_read_error()
2146 rdev = conf->mirrors[r1_bio->read_disk].rdev; in fix_sync_read_error()
2147 if (test_bit(FailFast, &rdev->flags)) { in fix_sync_read_error()
2148 /* Don't try recovering from here - just fail it in fix_sync_read_error()
2151 if (test_bit(Faulty, &rdev->flags)) in fix_sync_read_error()
2155 bio->bi_end_io = end_sync_write; in fix_sync_read_error()
2160 int d = r1_bio->read_disk; in fix_sync_read_error()
2167 if (r1_bio->bios[d]->bi_end_io == end_sync_read) { in fix_sync_read_error()
2172 rdev = conf->mirrors[d].rdev; in fix_sync_read_error()
2181 if (d == conf->raid_disks * 2) in fix_sync_read_error()
2183 } while (!success && d != r1_bio->read_disk); in fix_sync_read_error()
2193 mdname(mddev), bio->bi_bdev, in fix_sync_read_error()
2194 (unsigned long long)r1_bio->sector); in fix_sync_read_error()
2195 for (d = 0; d < conf->raid_disks * 2; d++) { in fix_sync_read_error()
2196 rdev = conf->mirrors[d].rdev; in fix_sync_read_error()
2197 if (!rdev || test_bit(Faulty, &rdev->flags)) in fix_sync_read_error()
2203 conf->recovery_disabled = in fix_sync_read_error()
2204 mddev->recovery_disabled; in fix_sync_read_error()
2205 set_bit(MD_RECOVERY_INTR, &mddev->recovery); in fix_sync_read_error()
2206 md_done_sync(mddev, r1_bio->sectors, 0); in fix_sync_read_error()
2211 sectors -= s; in fix_sync_read_error()
2218 /* write it back and re-read */ in fix_sync_read_error()
2219 while (d != r1_bio->read_disk) { in fix_sync_read_error()
2221 d = conf->raid_disks * 2; in fix_sync_read_error()
2222 d--; in fix_sync_read_error()
2223 if (r1_bio->bios[d]->bi_end_io != end_sync_read) in fix_sync_read_error()
2225 rdev = conf->mirrors[d].rdev; in fix_sync_read_error()
2229 r1_bio->bios[d]->bi_end_io = NULL; in fix_sync_read_error()
2234 while (d != r1_bio->read_disk) { in fix_sync_read_error()
2236 d = conf->raid_disks * 2; in fix_sync_read_error()
2237 d--; in fix_sync_read_error()
2238 if (r1_bio->bios[d]->bi_end_io != end_sync_read) in fix_sync_read_error()
2240 rdev = conf->mirrors[d].rdev; in fix_sync_read_error()
2244 atomic_add(s, &rdev->corrected_errors); in fix_sync_read_error()
2246 sectors -= s; in fix_sync_read_error()
2250 set_bit(R1BIO_Uptodate, &r1_bio->state); in fix_sync_read_error()
2251 bio->bi_status = 0; in fix_sync_read_error()
2262 * attempt an over-write in process_checks()
2264 struct mddev *mddev = r1_bio->mddev; in process_checks()
2265 struct r1conf *conf = mddev->private; in process_checks()
2271 vcnt = (r1_bio->sectors + PAGE_SIZE / 512 - 1) >> (PAGE_SHIFT - 9); in process_checks()
2272 for (i = 0; i < conf->raid_disks * 2; i++) { in process_checks()
2274 struct bio *b = r1_bio->bios[i]; in process_checks()
2276 if (b->bi_end_io != end_sync_read) in process_checks()
2279 status = b->bi_status; in process_checks()
2280 bio_reset(b, conf->mirrors[i].rdev->bdev, REQ_OP_READ); in process_checks()
2281 b->bi_status = status; in process_checks()
2282 b->bi_iter.bi_sector = r1_bio->sector + in process_checks()
2283 conf->mirrors[i].rdev->data_offset; in process_checks()
2284 b->bi_end_io = end_sync_read; in process_checks()
2285 rp->raid_bio = r1_bio; in process_checks()
2286 b->bi_private = rp; in process_checks()
2289 md_bio_reset_resync_pages(b, rp, r1_bio->sectors << 9); in process_checks()
2291 for (primary = 0; primary < conf->raid_disks * 2; primary++) in process_checks()
2292 if (r1_bio->bios[primary]->bi_end_io == end_sync_read && in process_checks()
2293 !r1_bio->bios[primary]->bi_status) { in process_checks()
2294 r1_bio->bios[primary]->bi_end_io = NULL; in process_checks()
2295 rdev_dec_pending(conf->mirrors[primary].rdev, mddev); in process_checks()
2298 r1_bio->read_disk = primary; in process_checks()
2299 for (i = 0; i < conf->raid_disks * 2; i++) { in process_checks()
2301 struct bio *pbio = r1_bio->bios[primary]; in process_checks()
2302 struct bio *sbio = r1_bio->bios[i]; in process_checks()
2303 blk_status_t status = sbio->bi_status; in process_checks()
2304 struct page **ppages = get_resync_pages(pbio)->pages; in process_checks()
2305 struct page **spages = get_resync_pages(sbio)->pages; in process_checks()
2310 if (sbio->bi_end_io != end_sync_read) in process_checks()
2313 sbio->bi_status = 0; in process_checks()
2316 page_len[j++] = bi->bv_len; in process_checks()
2319 for (j = vcnt; j-- ; ) { in process_checks()
2328 atomic64_add(r1_bio->sectors, &mddev->resync_mismatches); in process_checks()
2329 if (j < 0 || (test_bit(MD_RECOVERY_CHECK, &mddev->recovery) in process_checks()
2332 sbio->bi_end_io = NULL; in process_checks()
2333 rdev_dec_pending(conf->mirrors[i].rdev, mddev); in process_checks()
2343 struct r1conf *conf = mddev->private; in sync_request_write()
2345 int disks = conf->raid_disks * 2; in sync_request_write()
2348 if (!test_bit(R1BIO_Uptodate, &r1_bio->state)) in sync_request_write()
2349 /* ouch - failed to read all of that. */ in sync_request_write()
2353 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) in sync_request_write()
2359 atomic_set(&r1_bio->remaining, 1); in sync_request_write()
2361 wbio = r1_bio->bios[i]; in sync_request_write()
2362 if (wbio->bi_end_io == NULL || in sync_request_write()
2363 (wbio->bi_end_io == end_sync_read && in sync_request_write()
2364 (i == r1_bio->read_disk || in sync_request_write()
2365 !test_bit(MD_RECOVERY_SYNC, &mddev->recovery)))) in sync_request_write()
2367 if (test_bit(Faulty, &conf->mirrors[i].rdev->flags)) { in sync_request_write()
2372 wbio->bi_opf = REQ_OP_WRITE; in sync_request_write()
2373 if (test_bit(FailFast, &conf->mirrors[i].rdev->flags)) in sync_request_write()
2374 wbio->bi_opf |= MD_FAILFAST; in sync_request_write()
2376 wbio->bi_end_io = end_sync_write; in sync_request_write()
2377 atomic_inc(&r1_bio->remaining); in sync_request_write()
2378 md_sync_acct(conf->mirrors[i].rdev->bdev, bio_sectors(wbio)); in sync_request_write()
2396 sector_t sect = r1_bio->sector; in fix_read_error()
2397 int sectors = r1_bio->sectors; in fix_read_error()
2398 int read_disk = r1_bio->read_disk; in fix_read_error()
2399 struct mddev *mddev = conf->mddev; in fix_read_error()
2400 struct md_rdev *rdev = conf->mirrors[read_disk].rdev; in fix_read_error()
2403 r1_bio->bios[r1_bio->read_disk] = IO_BLOCKED; in fix_read_error()
2417 rdev = conf->mirrors[d].rdev; in fix_read_error()
2419 (test_bit(In_sync, &rdev->flags) || in fix_read_error()
2420 (!test_bit(Faulty, &rdev->flags) && in fix_read_error()
2421 rdev->recovery_offset >= sect + s)) && in fix_read_error()
2423 atomic_inc(&rdev->nr_pending); in fix_read_error()
2425 conf->tmppage, REQ_OP_READ, false)) in fix_read_error()
2433 if (d == conf->raid_disks * 2) in fix_read_error()
2438 /* Cannot read from anywhere - mark it bad */ in fix_read_error()
2439 struct md_rdev *rdev = conf->mirrors[read_disk].rdev; in fix_read_error()
2444 /* write it back and re-read */ in fix_read_error()
2448 d = conf->raid_disks * 2; in fix_read_error()
2449 d--; in fix_read_error()
2450 rdev = conf->mirrors[d].rdev; in fix_read_error()
2452 !test_bit(Faulty, &rdev->flags)) { in fix_read_error()
2453 atomic_inc(&rdev->nr_pending); in fix_read_error()
2455 conf->tmppage, REQ_OP_WRITE); in fix_read_error()
2462 d = conf->raid_disks * 2; in fix_read_error()
2463 d--; in fix_read_error()
2464 rdev = conf->mirrors[d].rdev; in fix_read_error()
2466 !test_bit(Faulty, &rdev->flags)) { in fix_read_error()
2467 atomic_inc(&rdev->nr_pending); in fix_read_error()
2469 conf->tmppage, REQ_OP_READ)) { in fix_read_error()
2470 atomic_add(s, &rdev->corrected_errors); in fix_read_error()
2474 rdev->data_offset), in fix_read_error()
2475 rdev->bdev); in fix_read_error()
2480 sectors -= s; in fix_read_error()
2487 struct mddev *mddev = r1_bio->mddev; in narrow_write_error()
2488 struct r1conf *conf = mddev->private; in narrow_write_error()
2489 struct md_rdev *rdev = conf->mirrors[i].rdev; in narrow_write_error()
2491 /* bio has the data to be written to device 'i' where in narrow_write_error()
2505 int sect_to_write = r1_bio->sectors; in narrow_write_error()
2508 if (rdev->badblocks.shift < 0) in narrow_write_error()
2511 block_sectors = roundup(1 << rdev->badblocks.shift, in narrow_write_error()
2512 bdev_logical_block_size(rdev->bdev) >> 9); in narrow_write_error()
2513 sector = r1_bio->sector; in narrow_write_error()
2515 & ~(sector_t)(block_sectors - 1)) in narrow_write_error()
2516 - sector; in narrow_write_error()
2524 if (test_bit(R1BIO_BehindIO, &r1_bio->state)) { in narrow_write_error()
2525 wbio = bio_alloc_clone(rdev->bdev, in narrow_write_error()
2526 r1_bio->behind_master_bio, in narrow_write_error()
2527 GFP_NOIO, &mddev->bio_set); in narrow_write_error()
2529 wbio = bio_alloc_clone(rdev->bdev, r1_bio->master_bio, in narrow_write_error()
2530 GFP_NOIO, &mddev->bio_set); in narrow_write_error()
2533 wbio->bi_opf = REQ_OP_WRITE; in narrow_write_error()
2534 wbio->bi_iter.bi_sector = r1_bio->sector; in narrow_write_error()
2535 wbio->bi_iter.bi_size = r1_bio->sectors << 9; in narrow_write_error()
2537 bio_trim(wbio, sector - r1_bio->sector, sectors); in narrow_write_error()
2538 wbio->bi_iter.bi_sector += rdev->data_offset; in narrow_write_error()
2547 sect_to_write -= sectors; in narrow_write_error()
2557 int s = r1_bio->sectors; in handle_sync_write_finished()
2558 for (m = 0; m < conf->raid_disks * 2 ; m++) { in handle_sync_write_finished()
2559 struct md_rdev *rdev = conf->mirrors[m].rdev; in handle_sync_write_finished()
2560 struct bio *bio = r1_bio->bios[m]; in handle_sync_write_finished()
2561 if (bio->bi_end_io == NULL) in handle_sync_write_finished()
2563 if (!bio->bi_status && in handle_sync_write_finished()
2564 test_bit(R1BIO_MadeGood, &r1_bio->state)) { in handle_sync_write_finished()
2565 rdev_clear_badblocks(rdev, r1_bio->sector, s, 0); in handle_sync_write_finished()
2567 if (bio->bi_status && in handle_sync_write_finished()
2568 test_bit(R1BIO_WriteError, &r1_bio->state)) { in handle_sync_write_finished()
2569 if (!rdev_set_badblocks(rdev, r1_bio->sector, s, 0)) in handle_sync_write_finished()
2570 md_error(conf->mddev, rdev); in handle_sync_write_finished()
2574 md_done_sync(conf->mddev, s, 1); in handle_sync_write_finished()
2582 for (m = 0; m < conf->raid_disks * 2 ; m++) in handle_write_finished()
2583 if (r1_bio->bios[m] == IO_MADE_GOOD) { in handle_write_finished()
2584 struct md_rdev *rdev = conf->mirrors[m].rdev; in handle_write_finished()
2586 r1_bio->sector, in handle_write_finished()
2587 r1_bio->sectors, 0); in handle_write_finished()
2588 rdev_dec_pending(rdev, conf->mddev); in handle_write_finished()
2589 } else if (r1_bio->bios[m] != NULL) { in handle_write_finished()
2596 md_error(conf->mddev, in handle_write_finished()
2597 conf->mirrors[m].rdev); in handle_write_finished()
2599 rdev_dec_pending(conf->mirrors[m].rdev, in handle_write_finished()
2600 conf->mddev); in handle_write_finished()
2603 spin_lock_irq(&conf->device_lock); in handle_write_finished()
2604 list_add(&r1_bio->retry_list, &conf->bio_end_io_list); in handle_write_finished()
2605 idx = sector_to_idx(r1_bio->sector); in handle_write_finished()
2606 atomic_inc(&conf->nr_queued[idx]); in handle_write_finished()
2607 spin_unlock_irq(&conf->device_lock); in handle_write_finished()
2612 wake_up(&conf->wait_barrier); in handle_write_finished()
2613 md_wakeup_thread(conf->mddev->thread); in handle_write_finished()
2615 if (test_bit(R1BIO_WriteError, &r1_bio->state)) in handle_write_finished()
2623 struct mddev *mddev = conf->mddev; in handle_read_error()
2628 clear_bit(R1BIO_ReadError, &r1_bio->state); in handle_read_error()
2632 * other devices. When we find one, we re-write in handle_read_error()
2638 bio = r1_bio->bios[r1_bio->read_disk]; in handle_read_error()
2640 r1_bio->bios[r1_bio->read_disk] = NULL; in handle_read_error()
2642 rdev = conf->mirrors[r1_bio->read_disk].rdev; in handle_read_error()
2643 if (mddev->ro == 0 in handle_read_error()
2644 && !test_bit(FailFast, &rdev->flags)) { in handle_read_error()
2648 } else if (mddev->ro == 0 && test_bit(FailFast, &rdev->flags)) { in handle_read_error()
2651 r1_bio->bios[r1_bio->read_disk] = IO_BLOCKED; in handle_read_error()
2654 rdev_dec_pending(rdev, conf->mddev); in handle_read_error()
2655 sector = r1_bio->sector; in handle_read_error()
2656 bio = r1_bio->master_bio; in handle_read_error()
2659 r1_bio->state = 0; in handle_read_error()
2660 raid1_read_request(mddev, bio, r1_bio->sectors, r1_bio); in handle_read_error()
2666 struct mddev *mddev = thread->mddev; in raid1d()
2669 struct r1conf *conf = mddev->private; in raid1d()
2670 struct list_head *head = &conf->retry_list; in raid1d()
2676 if (!list_empty_careful(&conf->bio_end_io_list) && in raid1d()
2677 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) { in raid1d()
2679 spin_lock_irqsave(&conf->device_lock, flags); in raid1d()
2680 if (!test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) in raid1d()
2681 list_splice_init(&conf->bio_end_io_list, &tmp); in raid1d()
2682 spin_unlock_irqrestore(&conf->device_lock, flags); in raid1d()
2686 list_del(&r1_bio->retry_list); in raid1d()
2687 idx = sector_to_idx(r1_bio->sector); in raid1d()
2688 atomic_dec(&conf->nr_queued[idx]); in raid1d()
2689 if (test_bit(R1BIO_WriteError, &r1_bio->state)) in raid1d()
2700 spin_lock_irqsave(&conf->device_lock, flags); in raid1d()
2702 spin_unlock_irqrestore(&conf->device_lock, flags); in raid1d()
2705 r1_bio = list_entry(head->prev, struct r1bio, retry_list); in raid1d()
2706 list_del(head->prev); in raid1d()
2707 idx = sector_to_idx(r1_bio->sector); in raid1d()
2708 atomic_dec(&conf->nr_queued[idx]); in raid1d()
2709 spin_unlock_irqrestore(&conf->device_lock, flags); in raid1d()
2711 mddev = r1_bio->mddev; in raid1d()
2712 conf = mddev->private; in raid1d()
2713 if (test_bit(R1BIO_IsSync, &r1_bio->state)) { in raid1d()
2714 if (test_bit(R1BIO_MadeGood, &r1_bio->state) || in raid1d()
2715 test_bit(R1BIO_WriteError, &r1_bio->state)) in raid1d()
2719 } else if (test_bit(R1BIO_MadeGood, &r1_bio->state) || in raid1d()
2720 test_bit(R1BIO_WriteError, &r1_bio->state)) in raid1d()
2722 else if (test_bit(R1BIO_ReadError, &r1_bio->state)) in raid1d()
2728 if (mddev->sb_flags & ~(1<<MD_SB_CHANGE_PENDING)) in raid1d()
2739 BUG_ON(mempool_initialized(&conf->r1buf_pool)); in init_resync()
2741 return mempool_init(&conf->r1buf_pool, buffs, r1buf_pool_alloc, in init_resync()
2742 r1buf_pool_free, conf->poolinfo); in init_resync()
2747 struct r1bio *r1bio = mempool_alloc(&conf->r1buf_pool, GFP_NOIO); in raid1_alloc_init_r1buf()
2752 for (i = conf->poolinfo->raid_disks; i--; ) { in raid1_alloc_init_r1buf()
2753 bio = r1bio->bios[i]; in raid1_alloc_init_r1buf()
2754 rps = bio->bi_private; in raid1_alloc_init_r1buf()
2756 bio->bi_private = rps; in raid1_alloc_init_r1buf()
2758 r1bio->master_bio = NULL; in raid1_alloc_init_r1buf()
2765 * We need to make sure that no normal I/O request - particularly write
2766 * requests - conflict with active sync requests.
2775 struct r1conf *conf = mddev->private; in raid1_sync_request()
2779 int disk = -1; in raid1_sync_request()
2781 int wonly = -1; in raid1_sync_request()
2790 if (!mempool_initialized(&conf->r1buf_pool)) in raid1_sync_request()
2798 * We can find the current addess in mddev->curr_resync in raid1_sync_request()
2800 if (mddev->curr_resync < max_sector) /* aborted */ in raid1_sync_request()
2801 mddev->bitmap_ops->end_sync(mddev, mddev->curr_resync, in raid1_sync_request()
2804 conf->fullsync = 0; in raid1_sync_request()
2806 mddev->bitmap_ops->close_sync(mddev); in raid1_sync_request()
2810 conf->cluster_sync_low = 0; in raid1_sync_request()
2811 conf->cluster_sync_high = 0; in raid1_sync_request()
2816 if (mddev->bitmap == NULL && in raid1_sync_request()
2817 mddev->recovery_cp == MaxSector && in raid1_sync_request()
2818 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) && in raid1_sync_request()
2819 conf->fullsync == 0) { in raid1_sync_request()
2821 return max_sector - sector_nr; in raid1_sync_request()
2826 if (!mddev->bitmap_ops->start_sync(mddev, sector_nr, &sync_blocks, true) && in raid1_sync_request()
2827 !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { in raid1_sync_request()
2834 * If there is non-resync activity waiting for a turn, then let it in raid1_sync_request()
2837 if (atomic_read(&conf->nr_waiting[idx])) in raid1_sync_request()
2844 mddev->bitmap_ops->cond_end_sync(mddev, sector_nr, in raid1_sync_request()
2846 (sector_nr + 2 * RESYNC_SECTORS > conf->cluster_sync_high)); in raid1_sync_request()
2857 * and any others (which will be non-In_sync devices) for WRITE. in raid1_sync_request()
2862 r1_bio->mddev = mddev; in raid1_sync_request()
2863 r1_bio->sector = sector_nr; in raid1_sync_request()
2864 r1_bio->state = 0; in raid1_sync_request()
2865 set_bit(R1BIO_IsSync, &r1_bio->state); in raid1_sync_request()
2869 for (i = 0; i < conf->raid_disks * 2; i++) { in raid1_sync_request()
2871 bio = r1_bio->bios[i]; in raid1_sync_request()
2873 rdev = conf->mirrors[i].rdev; in raid1_sync_request()
2875 test_bit(Faulty, &rdev->flags)) { in raid1_sync_request()
2876 if (i < conf->raid_disks) in raid1_sync_request()
2878 } else if (!test_bit(In_sync, &rdev->flags)) { in raid1_sync_request()
2879 bio->bi_opf = REQ_OP_WRITE; in raid1_sync_request()
2880 bio->bi_end_io = end_sync_write; in raid1_sync_request()
2890 good_sectors = first_bad - sector_nr; in raid1_sync_request()
2892 bad_sectors -= (sector_nr - first_bad); in raid1_sync_request()
2899 if (test_bit(WriteMostly, &rdev->flags)) { in raid1_sync_request()
2906 bio->bi_opf = REQ_OP_READ; in raid1_sync_request()
2907 bio->bi_end_io = end_sync_read; in raid1_sync_request()
2909 } else if (!test_bit(WriteErrorSeen, &rdev->flags) && in raid1_sync_request()
2910 test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && in raid1_sync_request()
2911 !test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) { in raid1_sync_request()
2918 bio->bi_opf = REQ_OP_WRITE; in raid1_sync_request()
2919 bio->bi_end_io = end_sync_write; in raid1_sync_request()
2923 if (rdev && bio->bi_end_io) { in raid1_sync_request()
2924 atomic_inc(&rdev->nr_pending); in raid1_sync_request()
2925 bio->bi_iter.bi_sector = sector_nr + rdev->data_offset; in raid1_sync_request()
2926 bio_set_dev(bio, rdev->bdev); in raid1_sync_request()
2927 if (test_bit(FailFast, &rdev->flags)) in raid1_sync_request()
2928 bio->bi_opf |= MD_FAILFAST; in raid1_sync_request()
2933 r1_bio->read_disk = disk; in raid1_sync_request()
2940 for (i = 0 ; i < conf->raid_disks * 2 ; i++) in raid1_sync_request()
2941 if (r1_bio->bios[i]->bi_end_io == end_sync_write) { in raid1_sync_request()
2942 struct md_rdev *rdev = conf->mirrors[i].rdev; in raid1_sync_request()
2947 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); in raid1_sync_request()
2957 conf->recovery_disabled = mddev->recovery_disabled; in raid1_sync_request()
2958 set_bit(MD_RECOVERY_INTR, &mddev->recovery); in raid1_sync_request()
2965 /* only resync enough to reach the next bad->good in raid1_sync_request()
2970 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && read_targets > 0) in raid1_sync_request()
2972 write_targets += read_targets-1; in raid1_sync_request()
2975 /* There is nowhere to write, so all non-sync in raid1_sync_request()
2976 * drives must be failed - so we are finished in raid1_sync_request()
2981 rv = max_sector - sector_nr; in raid1_sync_request()
2987 if (max_sector > mddev->resync_max) in raid1_sync_request()
2988 max_sector = mddev->resync_max; /* Don't do IO beyond here */ in raid1_sync_request()
2997 len = (max_sector - sector_nr) << 9; in raid1_sync_request()
3001 if (!mddev->bitmap_ops->start_sync(mddev, sector_nr, in raid1_sync_request()
3003 !conf->fullsync && in raid1_sync_request()
3004 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) in raid1_sync_request()
3010 for (i = 0 ; i < conf->raid_disks * 2; i++) { in raid1_sync_request()
3013 bio = r1_bio->bios[i]; in raid1_sync_request()
3015 if (bio->bi_end_io) { in raid1_sync_request()
3027 sync_blocks -= (len>>9); in raid1_sync_request()
3030 r1_bio->sectors = nr_sectors; in raid1_sync_request()
3033 conf->cluster_sync_high < sector_nr + nr_sectors) { in raid1_sync_request()
3034 conf->cluster_sync_low = mddev->curr_resync_completed; in raid1_sync_request()
3035 conf->cluster_sync_high = conf->cluster_sync_low + CLUSTER_RESYNC_WINDOW_SECTORS; in raid1_sync_request()
3037 md_cluster_ops->resync_info_update(mddev, in raid1_sync_request()
3038 conf->cluster_sync_low, in raid1_sync_request()
3039 conf->cluster_sync_high); in raid1_sync_request()
3042 /* For a user-requested sync, we read all readable devices and do a in raid1_sync_request()
3045 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { in raid1_sync_request()
3046 atomic_set(&r1_bio->remaining, read_targets); in raid1_sync_request()
3047 for (i = 0; i < conf->raid_disks * 2 && read_targets; i++) { in raid1_sync_request()
3048 bio = r1_bio->bios[i]; in raid1_sync_request()
3049 if (bio->bi_end_io == end_sync_read) { in raid1_sync_request()
3050 read_targets--; in raid1_sync_request()
3053 bio->bi_opf &= ~MD_FAILFAST; in raid1_sync_request()
3058 atomic_set(&r1_bio->remaining, 1); in raid1_sync_request()
3059 bio = r1_bio->bios[r1_bio->read_disk]; in raid1_sync_request()
3062 bio->bi_opf &= ~MD_FAILFAST; in raid1_sync_request()
3073 return mddev->dev_sectors; in raid1_size()
3082 int err = -ENOMEM; in setup_conf()
3088 conf->nr_pending = kcalloc(BARRIER_BUCKETS_NR, in setup_conf()
3090 if (!conf->nr_pending) in setup_conf()
3093 conf->nr_waiting = kcalloc(BARRIER_BUCKETS_NR, in setup_conf()
3095 if (!conf->nr_waiting) in setup_conf()
3098 conf->nr_queued = kcalloc(BARRIER_BUCKETS_NR, in setup_conf()
3100 if (!conf->nr_queued) in setup_conf()
3103 conf->barrier = kcalloc(BARRIER_BUCKETS_NR, in setup_conf()
3105 if (!conf->barrier) in setup_conf()
3108 conf->mirrors = kzalloc(array3_size(sizeof(struct raid1_info), in setup_conf()
3109 mddev->raid_disks, 2), in setup_conf()
3111 if (!conf->mirrors) in setup_conf()
3114 conf->tmppage = alloc_page(GFP_KERNEL); in setup_conf()
3115 if (!conf->tmppage) in setup_conf()
3118 conf->poolinfo = kzalloc(sizeof(*conf->poolinfo), GFP_KERNEL); in setup_conf()
3119 if (!conf->poolinfo) in setup_conf()
3121 conf->poolinfo->raid_disks = mddev->raid_disks * 2; in setup_conf()
3122 err = mempool_init(&conf->r1bio_pool, NR_RAID_BIOS, r1bio_pool_alloc, in setup_conf()
3123 rbio_pool_free, conf->poolinfo); in setup_conf()
3127 err = bioset_init(&conf->bio_split, BIO_POOL_SIZE, 0, 0); in setup_conf()
3131 conf->poolinfo->mddev = mddev; in setup_conf()
3133 err = -EINVAL; in setup_conf()
3134 spin_lock_init(&conf->device_lock); in setup_conf()
3135 conf->raid_disks = mddev->raid_disks; in setup_conf()
3137 int disk_idx = rdev->raid_disk; in setup_conf()
3139 if (disk_idx >= conf->raid_disks || disk_idx < 0) in setup_conf()
3143 test_bit(Replacement, &rdev->flags))) in setup_conf()
3146 conf->mddev = mddev; in setup_conf()
3147 INIT_LIST_HEAD(&conf->retry_list); in setup_conf()
3148 INIT_LIST_HEAD(&conf->bio_end_io_list); in setup_conf()
3150 spin_lock_init(&conf->resync_lock); in setup_conf()
3151 init_waitqueue_head(&conf->wait_barrier); in setup_conf()
3153 bio_list_init(&conf->pending_bio_list); in setup_conf()
3154 conf->recovery_disabled = mddev->recovery_disabled - 1; in setup_conf()
3156 err = -EIO; in setup_conf()
3157 for (i = 0; i < conf->raid_disks * 2; i++) { in setup_conf()
3159 disk = conf->mirrors + i; in setup_conf()
3161 if (i < conf->raid_disks && in setup_conf()
3162 disk[conf->raid_disks].rdev) { in setup_conf()
3164 if (!disk->rdev) { in setup_conf()
3168 disk->rdev = in setup_conf()
3169 disk[conf->raid_disks].rdev; in setup_conf()
3170 disk[conf->raid_disks].rdev = NULL; in setup_conf()
3171 } else if (!test_bit(In_sync, &disk->rdev->flags)) in setup_conf()
3172 /* Original is not in_sync - bad */ in setup_conf()
3176 if (!disk->rdev || in setup_conf()
3177 !test_bit(In_sync, &disk->rdev->flags)) { in setup_conf()
3178 disk->head_position = 0; in setup_conf()
3179 if (disk->rdev && in setup_conf()
3180 (disk->rdev->saved_raid_disk < 0)) in setup_conf()
3181 conf->fullsync = 1; in setup_conf()
3185 err = -ENOMEM; in setup_conf()
3186 rcu_assign_pointer(conf->thread, in setup_conf()
3188 if (!conf->thread) in setup_conf()
3195 mempool_exit(&conf->r1bio_pool); in setup_conf()
3196 kfree(conf->mirrors); in setup_conf()
3197 safe_put_page(conf->tmppage); in setup_conf()
3198 kfree(conf->poolinfo); in setup_conf()
3199 kfree(conf->nr_pending); in setup_conf()
3200 kfree(conf->nr_waiting); in setup_conf()
3201 kfree(conf->nr_queued); in setup_conf()
3202 kfree(conf->barrier); in setup_conf()
3203 bioset_exit(&conf->bio_split); in setup_conf()
3220 return queue_limits_set(mddev->gendisk->queue, &lim); in raid1_set_limits()
3229 if (mddev->level != 1) { in raid1_run()
3231 mdname(mddev), mddev->level); in raid1_run()
3232 return -EIO; in raid1_run()
3234 if (mddev->reshape_position != MaxSector) { in raid1_run()
3237 return -EIO; in raid1_run()
3245 if (mddev->private == NULL) in raid1_run()
3248 conf = mddev->private; in raid1_run()
3256 if (!mddev->private) in raid1_run()
3262 mddev->degraded = 0; in raid1_run()
3263 for (i = 0; i < conf->raid_disks; i++) in raid1_run()
3264 if (conf->mirrors[i].rdev == NULL || in raid1_run()
3265 !test_bit(In_sync, &conf->mirrors[i].rdev->flags) || in raid1_run()
3266 test_bit(Faulty, &conf->mirrors[i].rdev->flags)) in raid1_run()
3267 mddev->degraded++; in raid1_run()
3271 if (conf->raid_disks - mddev->degraded < 1) { in raid1_run()
3272 md_unregister_thread(mddev, &conf->thread); in raid1_run()
3273 if (!mddev->private) in raid1_run()
3275 return -EINVAL; in raid1_run()
3278 if (conf->raid_disks - mddev->degraded == 1) in raid1_run()
3279 mddev->recovery_cp = MaxSector; in raid1_run()
3281 if (mddev->recovery_cp != MaxSector) in raid1_run()
3282 pr_info("md/raid1:%s: not clean -- starting background reconstruction\n", in raid1_run()
3285 mdname(mddev), mddev->raid_disks - mddev->degraded, in raid1_run()
3286 mddev->raid_disks); in raid1_run()
3291 rcu_assign_pointer(mddev->thread, conf->thread); in raid1_run()
3292 rcu_assign_pointer(conf->thread, NULL); in raid1_run()
3293 mddev->private = conf; in raid1_run()
3294 set_bit(MD_FAILFAST_SUPPORTED, &mddev->flags); in raid1_run()
3300 md_unregister_thread(mddev, &mddev->thread); in raid1_run()
3308 mempool_exit(&conf->r1bio_pool); in raid1_free()
3309 kfree(conf->mirrors); in raid1_free()
3310 safe_put_page(conf->tmppage); in raid1_free()
3311 kfree(conf->poolinfo); in raid1_free()
3312 kfree(conf->nr_pending); in raid1_free()
3313 kfree(conf->nr_waiting); in raid1_free()
3314 kfree(conf->nr_queued); in raid1_free()
3315 kfree(conf->barrier); in raid1_free()
3316 bioset_exit(&conf->bio_split); in raid1_free()
3332 if (mddev->external_size && in raid1_resize()
3333 mddev->array_sectors > newsize) in raid1_resize()
3334 return -EINVAL; in raid1_resize()
3336 ret = mddev->bitmap_ops->resize(mddev, newsize, 0, false); in raid1_resize()
3341 if (sectors > mddev->dev_sectors && in raid1_resize()
3342 mddev->recovery_cp > mddev->dev_sectors) { in raid1_resize()
3343 mddev->recovery_cp = mddev->dev_sectors; in raid1_resize()
3344 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in raid1_resize()
3346 mddev->dev_sectors = sectors; in raid1_resize()
3347 mddev->resync_max_sectors = sectors; in raid1_resize()
3355 * 2/ resize conf->mirrors in raid1_reshape()
3359 * Then resize conf->mirrors and swap in the new r1bio pool. in raid1_reshape()
3367 struct r1conf *conf = mddev->private; in raid1_reshape()
3377 if (mddev->chunk_sectors != mddev->new_chunk_sectors || in raid1_reshape()
3378 mddev->layout != mddev->new_layout || in raid1_reshape()
3379 mddev->level != mddev->new_level) { in raid1_reshape()
3380 mddev->new_chunk_sectors = mddev->chunk_sectors; in raid1_reshape()
3381 mddev->new_layout = mddev->layout; in raid1_reshape()
3382 mddev->new_level = mddev->level; in raid1_reshape()
3383 return -EINVAL; in raid1_reshape()
3389 raid_disks = mddev->raid_disks + mddev->delta_disks; in raid1_reshape()
3391 if (raid_disks < conf->raid_disks) { in raid1_reshape()
3393 for (d= 0; d < conf->raid_disks; d++) in raid1_reshape()
3394 if (conf->mirrors[d].rdev) in raid1_reshape()
3397 return -EBUSY; in raid1_reshape()
3402 return -ENOMEM; in raid1_reshape()
3403 newpoolinfo->mddev = mddev; in raid1_reshape()
3404 newpoolinfo->raid_disks = raid_disks * 2; in raid1_reshape()
3418 return -ENOMEM; in raid1_reshape()
3424 oldpool = conf->r1bio_pool; in raid1_reshape()
3425 conf->r1bio_pool = newpool; in raid1_reshape()
3427 for (d = d2 = 0; d < conf->raid_disks; d++) { in raid1_reshape()
3428 struct md_rdev *rdev = conf->mirrors[d].rdev; in raid1_reshape()
3429 if (rdev && rdev->raid_disk != d2) { in raid1_reshape()
3431 rdev->raid_disk = d2; in raid1_reshape()
3435 mdname(mddev), rdev->raid_disk); in raid1_reshape()
3440 kfree(conf->mirrors); in raid1_reshape()
3441 conf->mirrors = newmirrors; in raid1_reshape()
3442 kfree(conf->poolinfo); in raid1_reshape()
3443 conf->poolinfo = newpoolinfo; in raid1_reshape()
3445 spin_lock_irqsave(&conf->device_lock, flags); in raid1_reshape()
3446 mddev->degraded += (raid_disks - conf->raid_disks); in raid1_reshape()
3447 spin_unlock_irqrestore(&conf->device_lock, flags); in raid1_reshape()
3448 conf->raid_disks = mddev->raid_disks = raid_disks; in raid1_reshape()
3449 mddev->delta_disks = 0; in raid1_reshape()
3453 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); in raid1_reshape()
3454 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in raid1_reshape()
3455 md_wakeup_thread(mddev->thread); in raid1_reshape()
3463 struct r1conf *conf = mddev->private; in raid1_quiesce()
3476 if (mddev->level == 5 && mddev->raid_disks == 2) { in raid1_takeover()
3478 mddev->new_level = 1; in raid1_takeover()
3479 mddev->new_layout = 0; in raid1_takeover()
3480 mddev->new_chunk_sectors = 0; in raid1_takeover()
3484 conf->array_frozen = 1; in raid1_takeover()
3490 return ERR_PTR(-EINVAL); in raid1_takeover()
3528 MODULE_ALIAS("md-personality-3"); /* RAID1 */
3529 MODULE_ALIAS("md-raid1");
3530 MODULE_ALIAS("md-level-1");