Lines Matching +full:data +full:- +full:mirror

1 // SPDX-License-Identifier: GPL-2.0
13 #include "disk-io.h"
14 #include "ordered-data.h"
18 #include "dev-replace.h"
20 #include "block-group.h"
24 #include "file-item.h"
26 #include "raid-stripe-tree.h"
29 * This is only the first step towards a full-features scrub. It reads all
31 * is found or the extent cannot be read, good data will be written back if
35 * - In case an unrepairable extent is encountered, track which files are
37 * - track and record media errors, throw out bad devices
38 * - add a mode to also read unallocated space
54 * This would be 8M per device, the same value as the old scrub in-flight bios
73 * Csum pointer for data csum verification. Should point to a
76 * NULL if this data sector has no csum.
92 /* Set when the read-repair is finished. */
96 * Set for data stripes if it's triggered from P/Q stripe.
97 * During such scrub, we should not report errors in data stripes, nor
125 * How many data/meta extents are in this stripe. Only for scrub status
163 * IO and csum errors can happen for both metadata and data.
177 * Checksum for the whole stripe if this stripe is inside a data block
213 * Use a ref counter to avoid use-after-free issues. Scrub workers
237 if (stripe->pages[i]) in release_scrub_stripe()
238 __free_page(stripe->pages[i]); in release_scrub_stripe()
239 stripe->pages[i] = NULL; in release_scrub_stripe()
241 kfree(stripe->sectors); in release_scrub_stripe()
242 kfree(stripe->csums); in release_scrub_stripe()
243 stripe->sectors = NULL; in release_scrub_stripe()
244 stripe->csums = NULL; in release_scrub_stripe()
245 stripe->sctx = NULL; in release_scrub_stripe()
246 stripe->state = 0; in release_scrub_stripe()
256 stripe->nr_sectors = BTRFS_STRIPE_LEN >> fs_info->sectorsize_bits; in init_scrub_stripe()
257 stripe->state = 0; in init_scrub_stripe()
259 init_waitqueue_head(&stripe->io_wait); in init_scrub_stripe()
260 init_waitqueue_head(&stripe->repair_wait); in init_scrub_stripe()
261 atomic_set(&stripe->pending_io, 0); in init_scrub_stripe()
262 spin_lock_init(&stripe->write_error_lock); in init_scrub_stripe()
264 ret = btrfs_alloc_page_array(SCRUB_STRIPE_PAGES, stripe->pages, false); in init_scrub_stripe()
268 stripe->sectors = kcalloc(stripe->nr_sectors, in init_scrub_stripe()
271 if (!stripe->sectors) in init_scrub_stripe()
274 stripe->csums = kcalloc(BTRFS_STRIPE_LEN >> fs_info->sectorsize_bits, in init_scrub_stripe()
275 fs_info->csum_size, GFP_KERNEL); in init_scrub_stripe()
276 if (!stripe->csums) in init_scrub_stripe()
281 return -ENOMEM; in init_scrub_stripe()
286 wait_event(stripe->io_wait, atomic_read(&stripe->pending_io) == 0); in wait_scrub_stripe_io()
293 while (atomic_read(&fs_info->scrub_pause_req)) { in __scrub_blocked_if_needed()
294 mutex_unlock(&fs_info->scrub_lock); in __scrub_blocked_if_needed()
295 wait_event(fs_info->scrub_pause_wait, in __scrub_blocked_if_needed()
296 atomic_read(&fs_info->scrub_pause_req) == 0); in __scrub_blocked_if_needed()
297 mutex_lock(&fs_info->scrub_lock); in __scrub_blocked_if_needed()
303 atomic_inc(&fs_info->scrubs_paused); in scrub_pause_on()
304 wake_up(&fs_info->scrub_pause_wait); in scrub_pause_on()
309 mutex_lock(&fs_info->scrub_lock); in scrub_pause_off()
311 atomic_dec(&fs_info->scrubs_paused); in scrub_pause_off()
312 mutex_unlock(&fs_info->scrub_lock); in scrub_pause_off()
314 wake_up(&fs_info->scrub_pause_wait); in scrub_pause_off()
331 release_scrub_stripe(&sctx->stripes[i]); in scrub_free_ctx()
338 if (refcount_dec_and_test(&sctx->refs)) in scrub_put_ctx()
354 refcount_set(&sctx->refs, 1); in scrub_setup_ctx()
355 sctx->is_dev_replace = is_dev_replace; in scrub_setup_ctx()
356 sctx->fs_info = fs_info; in scrub_setup_ctx()
357 sctx->extent_path.search_commit_root = 1; in scrub_setup_ctx()
358 sctx->extent_path.skip_locking = 1; in scrub_setup_ctx()
359 sctx->csum_path.search_commit_root = 1; in scrub_setup_ctx()
360 sctx->csum_path.skip_locking = 1; in scrub_setup_ctx()
364 ret = init_scrub_stripe(fs_info, &sctx->stripes[i]); in scrub_setup_ctx()
367 sctx->stripes[i].sctx = sctx; in scrub_setup_ctx()
369 sctx->first_free = 0; in scrub_setup_ctx()
370 atomic_set(&sctx->cancel_req, 0); in scrub_setup_ctx()
372 spin_lock_init(&sctx->stat_lock); in scrub_setup_ctx()
373 sctx->throttle_deadline = 0; in scrub_setup_ctx()
375 mutex_init(&sctx->wr_lock); in scrub_setup_ctx()
377 WARN_ON(!fs_info->dev_replace.tgtdev); in scrub_setup_ctx()
378 sctx->wr_tgtdev = fs_info->dev_replace.tgtdev; in scrub_setup_ctx()
385 return ERR_PTR(-ENOMEM); in scrub_setup_ctx()
398 struct btrfs_fs_info *fs_info = swarn->dev->fs_info; in scrub_print_warning_inode()
416 ret = btrfs_search_slot(NULL, local_root, &key, swarn->path, 0, 0); in scrub_print_warning_inode()
419 btrfs_release_path(swarn->path); in scrub_print_warning_inode()
423 eb = swarn->path->nodes[0]; in scrub_print_warning_inode()
424 inode_item = btrfs_item_ptr(eb, swarn->path->slots[0], in scrub_print_warning_inode()
427 btrfs_release_path(swarn->path); in scrub_print_warning_inode()
435 ipath = init_ipath(4096, local_root, swarn->path); in scrub_print_warning_inode()
452 for (i = 0; i < ipath->fspath->elem_cnt; ++i) in scrub_print_warning_inode()
455 swarn->errstr, swarn->logical, in scrub_print_warning_inode()
456 btrfs_dev_name(swarn->dev), in scrub_print_warning_inode()
457 swarn->physical, in scrub_print_warning_inode()
459 fs_info->sectorsize, nlink, in scrub_print_warning_inode()
460 (char *)(unsigned long)ipath->fspath->val[i]); in scrub_print_warning_inode()
469 swarn->errstr, swarn->logical, in scrub_print_warning_inode()
470 btrfs_dev_name(swarn->dev), in scrub_print_warning_inode()
471 swarn->physical, in scrub_print_warning_inode()
481 struct btrfs_fs_info *fs_info = dev->fs_info; in scrub_print_common_warning()
513 eb = path->nodes[0]; in scrub_print_common_warning()
514 ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item); in scrub_print_common_warning()
515 item_size = btrfs_item_size(eb, path->slots[0]); in scrub_print_common_warning()
547 ctx.extent_item_pos = swarn.logical - found_key.objectid; in scrub_print_common_warning()
565 if (!btrfs_is_zoned(sctx->fs_info)) in fill_writer_pointer_gap()
568 if (!btrfs_dev_is_sequential(sctx->wr_tgtdev, physical)) in fill_writer_pointer_gap()
571 if (sctx->write_pointer < physical) { in fill_writer_pointer_gap()
572 length = physical - sctx->write_pointer; in fill_writer_pointer_gap()
574 ret = btrfs_zoned_issue_zeroout(sctx->wr_tgtdev, in fill_writer_pointer_gap()
575 sctx->write_pointer, length); in fill_writer_pointer_gap()
577 sctx->write_pointer = physical; in fill_writer_pointer_gap()
584 struct btrfs_fs_info *fs_info = stripe->bg->fs_info; in scrub_stripe_get_page()
585 int page_index = (sector_nr << fs_info->sectorsize_bits) >> PAGE_SHIFT; in scrub_stripe_get_page()
587 return stripe->pages[page_index]; in scrub_stripe_get_page()
593 struct btrfs_fs_info *fs_info = stripe->bg->fs_info; in scrub_stripe_get_page_offset()
595 return offset_in_page(sector_nr << fs_info->sectorsize_bits); in scrub_stripe_get_page_offset()
600 struct btrfs_fs_info *fs_info = stripe->bg->fs_info; in scrub_verify_one_metadata()
601 const u32 sectors_per_tree = fs_info->nodesize >> fs_info->sectorsize_bits; in scrub_verify_one_metadata()
602 const u64 logical = stripe->logical + (sector_nr << fs_info->sectorsize_bits); in scrub_verify_one_metadata()
605 SHASH_DESC_ON_STACK(shash, fs_info->csum_shash); in scrub_verify_one_metadata()
616 memcpy(on_disk_csum, header->csum, fs_info->csum_size); in scrub_verify_one_metadata()
619 bitmap_set(&stripe->csum_error_bitmap, sector_nr, sectors_per_tree); in scrub_verify_one_metadata()
620 bitmap_set(&stripe->error_bitmap, sector_nr, sectors_per_tree); in scrub_verify_one_metadata()
622 "tree block %llu mirror %u has bad bytenr, has %llu want %llu", in scrub_verify_one_metadata()
623 logical, stripe->mirror_num, in scrub_verify_one_metadata()
627 if (memcmp(header->fsid, fs_info->fs_devices->metadata_uuid, in scrub_verify_one_metadata()
629 bitmap_set(&stripe->meta_error_bitmap, sector_nr, sectors_per_tree); in scrub_verify_one_metadata()
630 bitmap_set(&stripe->error_bitmap, sector_nr, sectors_per_tree); in scrub_verify_one_metadata()
632 "tree block %llu mirror %u has bad fsid, has %pU want %pU", in scrub_verify_one_metadata()
633 logical, stripe->mirror_num, in scrub_verify_one_metadata()
634 header->fsid, fs_info->fs_devices->fsid); in scrub_verify_one_metadata()
637 if (memcmp(header->chunk_tree_uuid, fs_info->chunk_tree_uuid, in scrub_verify_one_metadata()
639 bitmap_set(&stripe->meta_error_bitmap, sector_nr, sectors_per_tree); in scrub_verify_one_metadata()
640 bitmap_set(&stripe->error_bitmap, sector_nr, sectors_per_tree); in scrub_verify_one_metadata()
642 "tree block %llu mirror %u has bad chunk tree uuid, has %pU want %pU", in scrub_verify_one_metadata()
643 logical, stripe->mirror_num, in scrub_verify_one_metadata()
644 header->chunk_tree_uuid, fs_info->chunk_tree_uuid); in scrub_verify_one_metadata()
649 shash->tfm = fs_info->csum_shash; in scrub_verify_one_metadata()
652 BTRFS_CSUM_SIZE, fs_info->sectorsize - BTRFS_CSUM_SIZE); in scrub_verify_one_metadata()
659 fs_info->sectorsize); in scrub_verify_one_metadata()
663 if (memcmp(calculated_csum, on_disk_csum, fs_info->csum_size) != 0) { in scrub_verify_one_metadata()
664 bitmap_set(&stripe->meta_error_bitmap, sector_nr, sectors_per_tree); in scrub_verify_one_metadata()
665 bitmap_set(&stripe->error_bitmap, sector_nr, sectors_per_tree); in scrub_verify_one_metadata()
667 "tree block %llu mirror %u has bad csum, has " CSUM_FMT " want " CSUM_FMT, in scrub_verify_one_metadata()
668 logical, stripe->mirror_num, in scrub_verify_one_metadata()
669 CSUM_FMT_VALUE(fs_info->csum_size, on_disk_csum), in scrub_verify_one_metadata()
670 CSUM_FMT_VALUE(fs_info->csum_size, calculated_csum)); in scrub_verify_one_metadata()
673 if (stripe->sectors[sector_nr].generation != in scrub_verify_one_metadata()
675 bitmap_set(&stripe->meta_error_bitmap, sector_nr, sectors_per_tree); in scrub_verify_one_metadata()
676 bitmap_set(&stripe->error_bitmap, sector_nr, sectors_per_tree); in scrub_verify_one_metadata()
678 "tree block %llu mirror %u has bad generation, has %llu want %llu", in scrub_verify_one_metadata()
679 logical, stripe->mirror_num, in scrub_verify_one_metadata()
681 stripe->sectors[sector_nr].generation); in scrub_verify_one_metadata()
684 bitmap_clear(&stripe->error_bitmap, sector_nr, sectors_per_tree); in scrub_verify_one_metadata()
685 bitmap_clear(&stripe->csum_error_bitmap, sector_nr, sectors_per_tree); in scrub_verify_one_metadata()
686 bitmap_clear(&stripe->meta_error_bitmap, sector_nr, sectors_per_tree); in scrub_verify_one_metadata()
691 struct btrfs_fs_info *fs_info = stripe->bg->fs_info; in scrub_verify_one_sector()
692 struct scrub_sector_verification *sector = &stripe->sectors[sector_nr]; in scrub_verify_one_sector()
693 const u32 sectors_per_tree = fs_info->nodesize >> fs_info->sectorsize_bits; in scrub_verify_one_sector()
699 ASSERT(sector_nr >= 0 && sector_nr < stripe->nr_sectors); in scrub_verify_one_sector()
702 if (!test_bit(sector_nr, &stripe->extent_sector_bitmap)) in scrub_verify_one_sector()
706 if (test_bit(sector_nr, &stripe->io_error_bitmap)) in scrub_verify_one_sector()
710 if (sector->is_metadata) { in scrub_verify_one_sector()
719 if (unlikely(sector_nr + sectors_per_tree > stripe->nr_sectors)) { in scrub_verify_one_sector()
722 stripe->logical + in scrub_verify_one_sector()
723 (sector_nr << fs_info->sectorsize_bits), in scrub_verify_one_sector()
724 stripe->logical); in scrub_verify_one_sector()
732 * Data is easier, we just verify the data csum (if we have it). For in scrub_verify_one_sector()
735 if (!sector->csum) { in scrub_verify_one_sector()
736 clear_bit(sector_nr, &stripe->error_bitmap); in scrub_verify_one_sector()
740 ret = btrfs_check_sector_csum(fs_info, page, pgoff, csum_buf, sector->csum); in scrub_verify_one_sector()
742 set_bit(sector_nr, &stripe->csum_error_bitmap); in scrub_verify_one_sector()
743 set_bit(sector_nr, &stripe->error_bitmap); in scrub_verify_one_sector()
745 clear_bit(sector_nr, &stripe->csum_error_bitmap); in scrub_verify_one_sector()
746 clear_bit(sector_nr, &stripe->error_bitmap); in scrub_verify_one_sector()
753 struct btrfs_fs_info *fs_info = stripe->bg->fs_info; in scrub_verify_one_stripe()
754 const u32 sectors_per_tree = fs_info->nodesize >> fs_info->sectorsize_bits; in scrub_verify_one_stripe()
757 for_each_set_bit(sector_nr, &bitmap, stripe->nr_sectors) { in scrub_verify_one_stripe()
759 if (stripe->sectors[sector_nr].is_metadata) in scrub_verify_one_stripe()
760 sector_nr += sectors_per_tree - 1; in scrub_verify_one_stripe()
768 for (i = 0; i < stripe->nr_sectors; i++) { in calc_sector_number()
769 if (scrub_stripe_get_page(stripe, i) == first_bvec->bv_page && in calc_sector_number()
770 scrub_stripe_get_page_offset(stripe, i) == first_bvec->bv_offset) in calc_sector_number()
773 ASSERT(i < stripe->nr_sectors); in calc_sector_number()
780 * - Only reads the failed sectors
781 * - May have extra blocksize limits
785 struct scrub_stripe *stripe = bbio->private; in scrub_repair_read_endio()
786 struct btrfs_fs_info *fs_info = stripe->bg->fs_info; in scrub_repair_read_endio()
788 int sector_nr = calc_sector_number(stripe, bio_first_bvec_all(&bbio->bio)); in scrub_repair_read_endio()
792 ASSERT(sector_nr < stripe->nr_sectors); in scrub_repair_read_endio()
794 bio_for_each_bvec_all(bvec, &bbio->bio, i) in scrub_repair_read_endio()
795 bio_size += bvec->bv_len; in scrub_repair_read_endio()
797 if (bbio->bio.bi_status) { in scrub_repair_read_endio()
798 bitmap_set(&stripe->io_error_bitmap, sector_nr, in scrub_repair_read_endio()
799 bio_size >> fs_info->sectorsize_bits); in scrub_repair_read_endio()
800 bitmap_set(&stripe->error_bitmap, sector_nr, in scrub_repair_read_endio()
801 bio_size >> fs_info->sectorsize_bits); in scrub_repair_read_endio()
803 bitmap_clear(&stripe->io_error_bitmap, sector_nr, in scrub_repair_read_endio()
804 bio_size >> fs_info->sectorsize_bits); in scrub_repair_read_endio()
806 bio_put(&bbio->bio); in scrub_repair_read_endio()
807 if (atomic_dec_and_test(&stripe->pending_io)) in scrub_repair_read_endio()
808 wake_up(&stripe->io_wait); in scrub_repair_read_endio()
811 static int calc_next_mirror(int mirror, int num_copies) in calc_next_mirror() argument
813 ASSERT(mirror <= num_copies); in calc_next_mirror()
814 return (mirror + 1 > num_copies) ? 1 : mirror + 1; in calc_next_mirror()
818 int mirror, int blocksize, bool wait) in scrub_stripe_submit_repair_read() argument
820 struct btrfs_fs_info *fs_info = stripe->bg->fs_info; in scrub_stripe_submit_repair_read()
822 const unsigned long old_error_bitmap = stripe->error_bitmap; in scrub_stripe_submit_repair_read()
825 ASSERT(stripe->mirror_num >= 1); in scrub_stripe_submit_repair_read()
826 ASSERT(atomic_read(&stripe->pending_io) == 0); in scrub_stripe_submit_repair_read()
828 for_each_set_bit(i, &old_error_bitmap, stripe->nr_sectors) { in scrub_stripe_submit_repair_read()
837 if (bbio && ((i > 0 && !test_bit(i - 1, &stripe->error_bitmap)) || in scrub_stripe_submit_repair_read()
838 bbio->bio.bi_iter.bi_size >= blocksize)) { in scrub_stripe_submit_repair_read()
839 ASSERT(bbio->bio.bi_iter.bi_size); in scrub_stripe_submit_repair_read()
840 atomic_inc(&stripe->pending_io); in scrub_stripe_submit_repair_read()
841 btrfs_submit_bbio(bbio, mirror); in scrub_stripe_submit_repair_read()
848 bbio = btrfs_bio_alloc(stripe->nr_sectors, REQ_OP_READ, in scrub_stripe_submit_repair_read()
850 bbio->bio.bi_iter.bi_sector = (stripe->logical + in scrub_stripe_submit_repair_read()
851 (i << fs_info->sectorsize_bits)) >> SECTOR_SHIFT; in scrub_stripe_submit_repair_read()
854 ret = bio_add_page(&bbio->bio, page, fs_info->sectorsize, pgoff); in scrub_stripe_submit_repair_read()
855 ASSERT(ret == fs_info->sectorsize); in scrub_stripe_submit_repair_read()
858 ASSERT(bbio->bio.bi_iter.bi_size); in scrub_stripe_submit_repair_read()
859 atomic_inc(&stripe->pending_io); in scrub_stripe_submit_repair_read()
860 btrfs_submit_bbio(bbio, mirror); in scrub_stripe_submit_repair_read()
871 struct btrfs_fs_info *fs_info = sctx->fs_info; in scrub_stripe_report_errors()
880 if (test_bit(SCRUB_STRIPE_FLAG_NO_REPORT, &stripe->state)) in scrub_stripe_report_errors()
889 if (!bitmap_empty(&stripe->init_error_bitmap, stripe->nr_sectors)) { in scrub_stripe_report_errors()
890 u64 mapped_len = fs_info->sectorsize; in scrub_stripe_report_errors()
892 int stripe_index = stripe->mirror_num - 1; in scrub_stripe_report_errors()
896 ASSERT(stripe->mirror_num >= 1); in scrub_stripe_report_errors()
898 stripe->logical, &mapped_len, &bioc, in scrub_stripe_report_errors()
906 physical = bioc->stripes[stripe_index].physical; in scrub_stripe_report_errors()
907 dev = bioc->stripes[stripe_index].dev; in scrub_stripe_report_errors()
912 for_each_set_bit(sector_nr, &stripe->extent_sector_bitmap, stripe->nr_sectors) { in scrub_stripe_report_errors()
915 if (stripe->sectors[sector_nr].is_metadata) { in scrub_stripe_report_errors()
919 if (!stripe->sectors[sector_nr].csum) in scrub_stripe_report_errors()
923 if (test_bit(sector_nr, &stripe->init_error_bitmap) && in scrub_stripe_report_errors()
924 !test_bit(sector_nr, &stripe->error_bitmap)) { in scrub_stripe_report_errors()
930 if (!test_bit(sector_nr, &stripe->init_error_bitmap)) in scrub_stripe_report_errors()
941 stripe->logical, btrfs_dev_name(dev), in scrub_stripe_report_errors()
945 "fixed up error at logical %llu on mirror %u", in scrub_stripe_report_errors()
946 stripe->logical, stripe->mirror_num); in scrub_stripe_report_errors()
955 stripe->logical, btrfs_dev_name(dev), in scrub_stripe_report_errors()
959 "unable to fixup (regular) error at logical %llu on mirror %u", in scrub_stripe_report_errors()
960 stripe->logical, stripe->mirror_num); in scrub_stripe_report_errors()
963 if (test_bit(sector_nr, &stripe->io_error_bitmap)) in scrub_stripe_report_errors()
966 stripe->logical, physical); in scrub_stripe_report_errors()
967 if (test_bit(sector_nr, &stripe->csum_error_bitmap)) in scrub_stripe_report_errors()
970 stripe->logical, physical); in scrub_stripe_report_errors()
971 if (test_bit(sector_nr, &stripe->meta_error_bitmap)) in scrub_stripe_report_errors()
974 stripe->logical, physical); in scrub_stripe_report_errors()
977 spin_lock(&sctx->stat_lock); in scrub_stripe_report_errors()
978 sctx->stat.data_extents_scrubbed += stripe->nr_data_extents; in scrub_stripe_report_errors()
979 sctx->stat.tree_extents_scrubbed += stripe->nr_meta_extents; in scrub_stripe_report_errors()
980 sctx->stat.data_bytes_scrubbed += nr_data_sectors << fs_info->sectorsize_bits; in scrub_stripe_report_errors()
981 sctx->stat.tree_bytes_scrubbed += nr_meta_sectors << fs_info->sectorsize_bits; in scrub_stripe_report_errors()
982 sctx->stat.no_csum += nr_nodatacsum_sectors; in scrub_stripe_report_errors()
983 sctx->stat.read_errors += stripe->init_nr_io_errors; in scrub_stripe_report_errors()
984 sctx->stat.csum_errors += stripe->init_nr_csum_errors; in scrub_stripe_report_errors()
985 sctx->stat.verify_errors += stripe->init_nr_meta_errors; in scrub_stripe_report_errors()
986 sctx->stat.uncorrectable_errors += in scrub_stripe_report_errors()
987 bitmap_weight(&stripe->error_bitmap, stripe->nr_sectors); in scrub_stripe_report_errors()
988 sctx->stat.corrected_errors += nr_repaired_sectors; in scrub_stripe_report_errors()
989 spin_unlock(&sctx->stat_lock); in scrub_stripe_report_errors()
998 * - Wait for the initial read to finish
999 * - Verify and locate any bad sectors
1000 * - Go through the remaining mirrors and try to read as large blocksize as
1002 * - Go through all mirrors (including the failed mirror) sector-by-sector
1003 * - Submit writeback for repaired sectors
1005 * Writeback for dev-replace does not happen here, it needs extra
1011 struct scrub_ctx *sctx = stripe->sctx; in scrub_stripe_read_repair_worker()
1012 struct btrfs_fs_info *fs_info = sctx->fs_info; in scrub_stripe_read_repair_worker()
1013 int num_copies = btrfs_num_copies(fs_info, stripe->bg->start, in scrub_stripe_read_repair_worker()
1014 stripe->bg->length); in scrub_stripe_read_repair_worker()
1016 int mirror; in scrub_stripe_read_repair_worker() local
1019 ASSERT(stripe->mirror_num > 0); in scrub_stripe_read_repair_worker()
1022 scrub_verify_one_stripe(stripe, stripe->extent_sector_bitmap); in scrub_stripe_read_repair_worker()
1024 stripe->init_error_bitmap = stripe->error_bitmap; in scrub_stripe_read_repair_worker()
1025 stripe->init_nr_io_errors = bitmap_weight(&stripe->io_error_bitmap, in scrub_stripe_read_repair_worker()
1026 stripe->nr_sectors); in scrub_stripe_read_repair_worker()
1027 stripe->init_nr_csum_errors = bitmap_weight(&stripe->csum_error_bitmap, in scrub_stripe_read_repair_worker()
1028 stripe->nr_sectors); in scrub_stripe_read_repair_worker()
1029 stripe->init_nr_meta_errors = bitmap_weight(&stripe->meta_error_bitmap, in scrub_stripe_read_repair_worker()
1030 stripe->nr_sectors); in scrub_stripe_read_repair_worker()
1032 if (bitmap_empty(&stripe->init_error_bitmap, stripe->nr_sectors)) in scrub_stripe_read_repair_worker()
1041 for (mirror = calc_next_mirror(stripe->mirror_num, num_copies); in scrub_stripe_read_repair_worker()
1042 mirror != stripe->mirror_num; in scrub_stripe_read_repair_worker()
1043 mirror = calc_next_mirror(mirror, num_copies)) { in scrub_stripe_read_repair_worker()
1044 const unsigned long old_error_bitmap = stripe->error_bitmap; in scrub_stripe_read_repair_worker()
1046 scrub_stripe_submit_repair_read(stripe, mirror, in scrub_stripe_read_repair_worker()
1050 if (bitmap_empty(&stripe->error_bitmap, stripe->nr_sectors)) in scrub_stripe_read_repair_worker()
1055 * Last safety net, try re-checking all mirrors, including the failed in scrub_stripe_read_repair_worker()
1056 * one, sector-by-sector. in scrub_stripe_read_repair_worker()
1060 * Thus here we do sector-by-sector read. in scrub_stripe_read_repair_worker()
1065 for (i = 0, mirror = stripe->mirror_num; in scrub_stripe_read_repair_worker()
1067 i++, mirror = calc_next_mirror(mirror, num_copies)) { in scrub_stripe_read_repair_worker()
1068 const unsigned long old_error_bitmap = stripe->error_bitmap; in scrub_stripe_read_repair_worker()
1070 scrub_stripe_submit_repair_read(stripe, mirror, in scrub_stripe_read_repair_worker()
1071 fs_info->sectorsize, true); in scrub_stripe_read_repair_worker()
1074 if (bitmap_empty(&stripe->error_bitmap, stripe->nr_sectors)) in scrub_stripe_read_repair_worker()
1080 * in-place, but queue the bg to be relocated. in scrub_stripe_read_repair_worker()
1082 bitmap_andnot(&repaired, &stripe->init_error_bitmap, &stripe->error_bitmap, in scrub_stripe_read_repair_worker()
1083 stripe->nr_sectors); in scrub_stripe_read_repair_worker()
1084 if (!sctx->readonly && !bitmap_empty(&repaired, stripe->nr_sectors)) { in scrub_stripe_read_repair_worker()
1086 btrfs_repair_one_zone(fs_info, sctx->stripes[0].bg->start); in scrub_stripe_read_repair_worker()
1094 set_bit(SCRUB_STRIPE_FLAG_REPAIR_DONE, &stripe->state); in scrub_stripe_read_repair_worker()
1095 wake_up(&stripe->repair_wait); in scrub_stripe_read_repair_worker()
1100 struct scrub_stripe *stripe = bbio->private; in scrub_read_endio()
1102 int sector_nr = calc_sector_number(stripe, bio_first_bvec_all(&bbio->bio)); in scrub_read_endio()
1107 ASSERT(sector_nr < stripe->nr_sectors); in scrub_read_endio()
1108 bio_for_each_bvec_all(bvec, &bbio->bio, i) in scrub_read_endio()
1109 bio_size += bvec->bv_len; in scrub_read_endio()
1110 num_sectors = bio_size >> stripe->bg->fs_info->sectorsize_bits; in scrub_read_endio()
1112 if (bbio->bio.bi_status) { in scrub_read_endio()
1113 bitmap_set(&stripe->io_error_bitmap, sector_nr, num_sectors); in scrub_read_endio()
1114 bitmap_set(&stripe->error_bitmap, sector_nr, num_sectors); in scrub_read_endio()
1116 bitmap_clear(&stripe->io_error_bitmap, sector_nr, num_sectors); in scrub_read_endio()
1118 bio_put(&bbio->bio); in scrub_read_endio()
1119 if (atomic_dec_and_test(&stripe->pending_io)) { in scrub_read_endio()
1120 wake_up(&stripe->io_wait); in scrub_read_endio()
1121 INIT_WORK(&stripe->work, scrub_stripe_read_repair_worker); in scrub_read_endio()
1122 queue_work(stripe->bg->fs_info->scrub_workers, &stripe->work); in scrub_read_endio()
1128 struct scrub_stripe *stripe = bbio->private; in scrub_write_endio()
1129 struct btrfs_fs_info *fs_info = stripe->bg->fs_info; in scrub_write_endio()
1131 int sector_nr = calc_sector_number(stripe, bio_first_bvec_all(&bbio->bio)); in scrub_write_endio()
1135 bio_for_each_bvec_all(bvec, &bbio->bio, i) in scrub_write_endio()
1136 bio_size += bvec->bv_len; in scrub_write_endio()
1138 if (bbio->bio.bi_status) { in scrub_write_endio()
1141 spin_lock_irqsave(&stripe->write_error_lock, flags); in scrub_write_endio()
1142 bitmap_set(&stripe->write_error_bitmap, sector_nr, in scrub_write_endio()
1143 bio_size >> fs_info->sectorsize_bits); in scrub_write_endio()
1144 spin_unlock_irqrestore(&stripe->write_error_lock, flags); in scrub_write_endio()
1146 bio_put(&bbio->bio); in scrub_write_endio()
1148 if (atomic_dec_and_test(&stripe->pending_io)) in scrub_write_endio()
1149 wake_up(&stripe->io_wait); in scrub_write_endio()
1156 struct btrfs_fs_info *fs_info = sctx->fs_info; in scrub_submit_write_bio()
1157 u32 bio_len = bbio->bio.bi_iter.bi_size; in scrub_submit_write_bio()
1158 u32 bio_off = (bbio->bio.bi_iter.bi_sector << SECTOR_SHIFT) - in scrub_submit_write_bio()
1159 stripe->logical; in scrub_submit_write_bio()
1161 fill_writer_pointer_gap(sctx, stripe->physical + bio_off); in scrub_submit_write_bio()
1162 atomic_inc(&stripe->pending_io); in scrub_submit_write_bio()
1163 btrfs_submit_repair_write(bbio, stripe->mirror_num, dev_replace); in scrub_submit_write_bio()
1176 if (!test_bit(bio_off >> fs_info->sectorsize_bits, in scrub_submit_write_bio()
1177 &stripe->write_error_bitmap)) in scrub_submit_write_bio()
1178 sctx->write_pointer += bio_len; in scrub_submit_write_bio()
1186 * - Only needs logical bytenr and mirror_num
1189 * - Would only result in writes to the specified mirror
1192 * - Handle dev-replace and read-repair writeback differently
1197 struct btrfs_fs_info *fs_info = stripe->bg->fs_info; in scrub_write_sectors()
1201 for_each_set_bit(sector_nr, &write_bitmap, stripe->nr_sectors) { in scrub_write_sectors()
1207 ASSERT(test_bit(sector_nr, &stripe->extent_sector_bitmap)); in scrub_write_sectors()
1210 if (bbio && sector_nr && !test_bit(sector_nr - 1, &write_bitmap)) { in scrub_write_sectors()
1215 bbio = btrfs_bio_alloc(stripe->nr_sectors, REQ_OP_WRITE, in scrub_write_sectors()
1217 bbio->bio.bi_iter.bi_sector = (stripe->logical + in scrub_write_sectors()
1218 (sector_nr << fs_info->sectorsize_bits)) >> in scrub_write_sectors()
1221 ret = bio_add_page(&bbio->bio, page, fs_info->sectorsize, pgoff); in scrub_write_sectors()
1222 ASSERT(ret == fs_info->sectorsize); in scrub_write_sectors()
1229 * Throttling of IO submission, bandwidth-limit based, the timeslice is 1
1241 bwlimit = READ_ONCE(device->scrub_speed_max); in scrub_throttle_dev_io()
1254 if (sctx->throttle_deadline == 0) { in scrub_throttle_dev_io()
1255 sctx->throttle_deadline = ktime_add_ms(now, time_slice / div); in scrub_throttle_dev_io()
1256 sctx->throttle_sent = 0; in scrub_throttle_dev_io()
1260 if (ktime_before(now, sctx->throttle_deadline)) { in scrub_throttle_dev_io()
1262 sctx->throttle_sent += bio_size; in scrub_throttle_dev_io()
1263 if (sctx->throttle_sent <= div_u64(bwlimit, div)) in scrub_throttle_dev_io()
1267 delta = ktime_ms_delta(sctx->throttle_deadline, now); in scrub_throttle_dev_io()
1281 sctx->throttle_deadline = 0; in scrub_throttle_dev_io()
1287 * the most left data stripe's logical offset.
1289 * return 0 if it is a data stripe, 1 means parity stripe.
1300 last_offset = (physical - map->stripes[num].physical) * data_stripes; in get_raid56_logic_offset()
1314 /* Work out the disk rotation on this stripe-set */ in get_raid56_logic_offset()
1315 rot = stripe_nr % map->num_stripes; in get_raid56_logic_offset()
1316 /* calculate which stripe this data locates */ in get_raid56_logic_offset()
1318 stripe_index = rot % map->num_stripes; in get_raid56_logic_offset()
1336 struct btrfs_fs_info *fs_info = path->nodes[0]->fs_info; in compare_extent_item_range()
1340 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); in compare_extent_item_range()
1344 len = fs_info->nodesize; in compare_extent_item_range()
1349 return -1; in compare_extent_item_range()
1365 * return the extent item. This is for data extent crossing stripe boundary.
1375 struct btrfs_fs_info *fs_info = extent_root->fs_info; in find_first_extent_item()
1380 if (path->nodes[0]) in find_first_extent_item()
1388 key.offset = (u64)-1; in find_first_extent_item()
1395 * Key with offset -1 found, there would have to exist an extent in find_first_extent_item()
1399 return -EUCLEAN; in find_first_extent_item()
1415 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); in find_first_extent_item()
1445 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); in get_extent_info()
1450 *size_ret = path->nodes[0]->fs_info->nodesize; in get_extent_info()
1453 ei = btrfs_item_ptr(path->nodes[0], path->slots[0], struct btrfs_extent_item); in get_extent_info()
1454 *flags_ret = btrfs_extent_flags(path->nodes[0], ei); in get_extent_info()
1455 *generation_ret = btrfs_extent_generation(path->nodes[0], ei); in get_extent_info()
1461 struct btrfs_fs_info *fs_info = sctx->fs_info; in sync_write_pointer_for_zoned()
1467 mutex_lock(&sctx->wr_lock); in sync_write_pointer_for_zoned()
1468 if (sctx->write_pointer < physical_end) { in sync_write_pointer_for_zoned()
1469 ret = btrfs_sync_zone_write_pointer(sctx->wr_tgtdev, logical, in sync_write_pointer_for_zoned()
1471 sctx->write_pointer); in sync_write_pointer_for_zoned()
1476 mutex_unlock(&sctx->wr_lock); in sync_write_pointer_for_zoned()
1477 btrfs_dev_clear_zone_empty(sctx->wr_tgtdev, physical); in sync_write_pointer_for_zoned()
1487 for (u64 cur_logical = max(stripe->logical, extent_start); in fill_one_extent_info()
1488 cur_logical < min(stripe->logical + BTRFS_STRIPE_LEN, in fill_one_extent_info()
1490 cur_logical += fs_info->sectorsize) { in fill_one_extent_info()
1491 const int nr_sector = (cur_logical - stripe->logical) >> in fill_one_extent_info()
1492 fs_info->sectorsize_bits; in fill_one_extent_info()
1494 &stripe->sectors[nr_sector]; in fill_one_extent_info()
1496 set_bit(nr_sector, &stripe->extent_sector_bitmap); in fill_one_extent_info()
1498 sector->is_metadata = true; in fill_one_extent_info()
1499 sector->generation = extent_gen; in fill_one_extent_info()
1506 stripe->extent_sector_bitmap = 0; in scrub_stripe_reset_bitmaps()
1507 stripe->init_error_bitmap = 0; in scrub_stripe_reset_bitmaps()
1508 stripe->init_nr_io_errors = 0; in scrub_stripe_reset_bitmaps()
1509 stripe->init_nr_csum_errors = 0; in scrub_stripe_reset_bitmaps()
1510 stripe->init_nr_meta_errors = 0; in scrub_stripe_reset_bitmaps()
1511 stripe->error_bitmap = 0; in scrub_stripe_reset_bitmaps()
1512 stripe->io_error_bitmap = 0; in scrub_stripe_reset_bitmaps()
1513 stripe->csum_error_bitmap = 0; in scrub_stripe_reset_bitmaps()
1514 stripe->meta_error_bitmap = 0; in scrub_stripe_reset_bitmaps()
1532 struct btrfs_fs_info *fs_info = bg->fs_info; in scrub_find_fill_first_stripe()
1533 struct btrfs_root *extent_root = btrfs_extent_root(fs_info, bg->start); in scrub_find_fill_first_stripe()
1534 struct btrfs_root *csum_root = btrfs_csum_root(fs_info, bg->start); in scrub_find_fill_first_stripe()
1546 return -EUCLEAN; in scrub_find_fill_first_stripe()
1548 memset(stripe->sectors, 0, sizeof(struct scrub_sector_verification) * in scrub_find_fill_first_stripe()
1549 stripe->nr_sectors); in scrub_find_fill_first_stripe()
1553 ASSERT(logical_start >= bg->start && logical_end <= bg->start + bg->length); in scrub_find_fill_first_stripe()
1563 stripe->nr_meta_extents++; in scrub_find_fill_first_stripe()
1565 stripe->nr_data_extents++; in scrub_find_fill_first_stripe()
1571 * The extra calculation against bg->start is to handle block groups in scrub_find_fill_first_stripe()
1574 stripe->logical = round_down(cur_logical - bg->start, BTRFS_STRIPE_LEN) + in scrub_find_fill_first_stripe()
1575 bg->start; in scrub_find_fill_first_stripe()
1576 stripe->physical = physical + stripe->logical - logical_start; in scrub_find_fill_first_stripe()
1577 stripe->dev = dev; in scrub_find_fill_first_stripe()
1578 stripe->bg = bg; in scrub_find_fill_first_stripe()
1579 stripe->mirror_num = mirror_num; in scrub_find_fill_first_stripe()
1580 stripe_end = stripe->logical + BTRFS_STRIPE_LEN - 1; in scrub_find_fill_first_stripe()
1582 /* Fill the first extent info into stripe->sectors[] array. */ in scrub_find_fill_first_stripe()
1590 stripe_end - cur_logical + 1); in scrub_find_fill_first_stripe()
1600 stripe->nr_meta_extents++; in scrub_find_fill_first_stripe()
1602 stripe->nr_data_extents++; in scrub_find_fill_first_stripe()
1608 /* Now fill the data csum. */ in scrub_find_fill_first_stripe()
1609 if (bg->flags & BTRFS_BLOCK_GROUP_DATA) { in scrub_find_fill_first_stripe()
1614 ASSERT(stripe->csums); in scrub_find_fill_first_stripe()
1620 ASSERT(BITS_PER_LONG >= BTRFS_STRIPE_LEN >> fs_info->sectorsize_bits); in scrub_find_fill_first_stripe()
1623 stripe->logical, stripe_end, in scrub_find_fill_first_stripe()
1624 stripe->csums, &csum_bitmap); in scrub_find_fill_first_stripe()
1630 for_each_set_bit(sector_nr, &csum_bitmap, stripe->nr_sectors) { in scrub_find_fill_first_stripe()
1631 stripe->sectors[sector_nr].csum = stripe->csums + in scrub_find_fill_first_stripe()
1632 sector_nr * fs_info->csum_size; in scrub_find_fill_first_stripe()
1635 set_bit(SCRUB_STRIPE_FLAG_INITIALIZED, &stripe->state); in scrub_find_fill_first_stripe()
1644 stripe->nr_meta_extents = 0; in scrub_reset_stripe()
1645 stripe->nr_data_extents = 0; in scrub_reset_stripe()
1646 stripe->state = 0; in scrub_reset_stripe()
1648 for (int i = 0; i < stripe->nr_sectors; i++) { in scrub_reset_stripe()
1649 stripe->sectors[i].is_metadata = false; in scrub_reset_stripe()
1650 stripe->sectors[i].csum = NULL; in scrub_reset_stripe()
1651 stripe->sectors[i].generation = 0; in scrub_reset_stripe()
1657 ASSERT(stripe->bg); in stripe_length()
1660 stripe->bg->start + stripe->bg->length - stripe->logical); in stripe_length()
1665 struct btrfs_fs_info *fs_info = stripe->bg->fs_info; in scrub_submit_extent_sector_read()
1667 unsigned int nr_sectors = stripe_length(stripe) >> fs_info->sectorsize_bits; in scrub_submit_extent_sector_read()
1669 int mirror = stripe->mirror_num; in scrub_submit_extent_sector_read() local
1672 atomic_inc(&stripe->pending_io); in scrub_submit_extent_sector_read()
1674 for_each_set_bit(i, &stripe->extent_sector_bitmap, stripe->nr_sectors) { in scrub_submit_extent_sector_read()
1685 !test_bit(i - 1, &stripe->extent_sector_bitmap)) || in scrub_submit_extent_sector_read()
1686 bbio->bio.bi_iter.bi_size >= stripe_len)) { in scrub_submit_extent_sector_read()
1687 ASSERT(bbio->bio.bi_iter.bi_size); in scrub_submit_extent_sector_read()
1688 atomic_inc(&stripe->pending_io); in scrub_submit_extent_sector_read()
1689 btrfs_submit_bbio(bbio, mirror); in scrub_submit_extent_sector_read()
1696 const u64 logical = stripe->logical + in scrub_submit_extent_sector_read()
1697 (i << fs_info->sectorsize_bits); in scrub_submit_extent_sector_read()
1701 stripe_len = (nr_sectors - i) << fs_info->sectorsize_bits; in scrub_submit_extent_sector_read()
1707 &stripe_len, &bioc, &io_stripe, &mirror); in scrub_submit_extent_sector_read()
1710 if (err != -ENODATA) { in scrub_submit_extent_sector_read()
1713 * returned -ENODATA, which means there's in scrub_submit_extent_sector_read()
1719 set_bit(i, &stripe->io_error_bitmap); in scrub_submit_extent_sector_read()
1720 set_bit(i, &stripe->error_bitmap); in scrub_submit_extent_sector_read()
1725 bbio = btrfs_bio_alloc(stripe->nr_sectors, REQ_OP_READ, in scrub_submit_extent_sector_read()
1727 bbio->bio.bi_iter.bi_sector = logical >> SECTOR_SHIFT; in scrub_submit_extent_sector_read()
1730 __bio_add_page(&bbio->bio, page, fs_info->sectorsize, pgoff); in scrub_submit_extent_sector_read()
1734 ASSERT(bbio->bio.bi_iter.bi_size); in scrub_submit_extent_sector_read()
1735 atomic_inc(&stripe->pending_io); in scrub_submit_extent_sector_read()
1736 btrfs_submit_bbio(bbio, mirror); in scrub_submit_extent_sector_read()
1739 if (atomic_dec_and_test(&stripe->pending_io)) { in scrub_submit_extent_sector_read()
1740 wake_up(&stripe->io_wait); in scrub_submit_extent_sector_read()
1741 INIT_WORK(&stripe->work, scrub_stripe_read_repair_worker); in scrub_submit_extent_sector_read()
1742 queue_work(stripe->bg->fs_info->scrub_workers, &stripe->work); in scrub_submit_extent_sector_read()
1749 struct btrfs_fs_info *fs_info = sctx->fs_info; in scrub_submit_initial_read()
1751 unsigned int nr_sectors = stripe_length(stripe) >> fs_info->sectorsize_bits; in scrub_submit_initial_read()
1752 int mirror = stripe->mirror_num; in scrub_submit_initial_read() local
1754 ASSERT(stripe->bg); in scrub_submit_initial_read()
1755 ASSERT(stripe->mirror_num > 0); in scrub_submit_initial_read()
1756 ASSERT(test_bit(SCRUB_STRIPE_FLAG_INITIALIZED, &stripe->state)); in scrub_submit_initial_read()
1758 if (btrfs_need_stripe_tree_update(fs_info, stripe->bg->flags)) { in scrub_submit_initial_read()
1766 bbio->bio.bi_iter.bi_sector = stripe->logical >> SECTOR_SHIFT; in scrub_submit_initial_read()
1773 ret = bio_add_page(&bbio->bio, page, fs_info->sectorsize, pgoff); in scrub_submit_initial_read()
1775 ASSERT(ret == fs_info->sectorsize); in scrub_submit_initial_read()
1777 atomic_inc(&stripe->pending_io); in scrub_submit_initial_read()
1780 * For dev-replace, either user asks to avoid the source dev, or in scrub_submit_initial_read()
1781 * the device is missing, we try the next mirror instead. in scrub_submit_initial_read()
1783 if (sctx->is_dev_replace && in scrub_submit_initial_read()
1784 (fs_info->dev_replace.cont_reading_from_srcdev_mode == in scrub_submit_initial_read()
1786 !stripe->dev->bdev)) { in scrub_submit_initial_read()
1787 int num_copies = btrfs_num_copies(fs_info, stripe->bg->start, in scrub_submit_initial_read()
1788 stripe->bg->length); in scrub_submit_initial_read()
1790 mirror = calc_next_mirror(mirror, num_copies); in scrub_submit_initial_read()
1792 btrfs_submit_bbio(bbio, mirror); in scrub_submit_initial_read()
1799 for_each_set_bit(i, &stripe->error_bitmap, stripe->nr_sectors) { in stripe_has_metadata_error()
1800 if (stripe->sectors[i].is_metadata) { in stripe_has_metadata_error()
1801 struct btrfs_fs_info *fs_info = stripe->bg->fs_info; in stripe_has_metadata_error()
1805 stripe->logical, in stripe_has_metadata_error()
1806 stripe->logical + (i << fs_info->sectorsize_bits)); in stripe_has_metadata_error()
1822 scrub_throttle_dev_io(sctx, sctx->stripes[0].dev, in submit_initial_group_read()
1826 struct scrub_stripe *stripe = &sctx->stripes[first_slot + i]; in submit_initial_group_read()
1829 ASSERT(test_bit(SCRUB_STRIPE_FLAG_INITIALIZED, &stripe->state)); in submit_initial_group_read()
1837 struct btrfs_fs_info *fs_info = sctx->fs_info; in flush_scrub_stripes()
1839 const int nr_stripes = sctx->cur_stripe; in flush_scrub_stripes()
1845 ASSERT(test_bit(SCRUB_STRIPE_FLAG_INITIALIZED, &sctx->stripes[0].state)); in flush_scrub_stripes()
1851 submit_initial_group_read(sctx, first_slot, nr_stripes - first_slot); in flush_scrub_stripes()
1855 stripe = &sctx->stripes[i]; in flush_scrub_stripes()
1857 wait_event(stripe->repair_wait, in flush_scrub_stripes()
1858 test_bit(SCRUB_STRIPE_FLAG_REPAIR_DONE, &stripe->state)); in flush_scrub_stripes()
1861 /* Submit for dev-replace. */ in flush_scrub_stripes()
1862 if (sctx->is_dev_replace) { in flush_scrub_stripes()
1864 * For dev-replace, if we know there is something wrong with in flush_scrub_stripes()
1868 if (stripe_has_metadata_error(&sctx->stripes[i])) { in flush_scrub_stripes()
1869 ret = -EIO; in flush_scrub_stripes()
1876 stripe = &sctx->stripes[i]; in flush_scrub_stripes()
1878 ASSERT(stripe->dev == fs_info->dev_replace.srcdev); in flush_scrub_stripes()
1880 bitmap_andnot(&good, &stripe->extent_sector_bitmap, in flush_scrub_stripes()
1881 &stripe->error_bitmap, stripe->nr_sectors); in flush_scrub_stripes()
1888 stripe = &sctx->stripes[i]; in flush_scrub_stripes()
1891 spin_lock(&sctx->stat_lock); in flush_scrub_stripes()
1892 sctx->stat.last_physical = stripe->physical + stripe_length(stripe); in flush_scrub_stripes()
1893 spin_unlock(&sctx->stat_lock); in flush_scrub_stripes()
1897 sctx->cur_stripe = 0; in flush_scrub_stripes()
1903 complete(bio->bi_private); in raid56_scrub_wait_endio()
1918 ASSERT(sctx->cur_stripe < SCRUB_TOTAL_STRIPES); in queue_scrub_stripe()
1923 stripe = &sctx->stripes[sctx->cur_stripe]; in queue_scrub_stripe()
1925 ret = scrub_find_fill_first_stripe(bg, &sctx->extent_path, in queue_scrub_stripe()
1926 &sctx->csum_path, dev, physical, in queue_scrub_stripe()
1931 *found_logical_ret = stripe->logical; in queue_scrub_stripe()
1932 sctx->cur_stripe++; in queue_scrub_stripe()
1935 if (sctx->cur_stripe % SCRUB_STRIPES_PER_GROUP == 0) { in queue_scrub_stripe()
1936 const int first_slot = sctx->cur_stripe - SCRUB_STRIPES_PER_GROUP; in queue_scrub_stripe()
1942 if (sctx->cur_stripe == SCRUB_TOTAL_STRIPES) in queue_scrub_stripe()
1954 struct btrfs_fs_info *fs_info = sctx->fs_info; in scrub_raid56_parity_stripe()
1967 ASSERT(sctx->raid56_data_stripes); in scrub_raid56_parity_stripe()
1970 * For data stripe search, we cannot reuse the same extent/csum paths, in scrub_raid56_parity_stripe()
1971 * as the data stripe bytenr may be smaller than previous extent. Thus in scrub_raid56_parity_stripe()
1984 stripe = &sctx->raid56_data_stripes[i]; in scrub_raid56_parity_stripe()
1985 rot = div_u64(full_stripe_start - bg->start, in scrub_raid56_parity_stripe()
1987 stripe_index = (i + rot) % map->num_stripes; in scrub_raid56_parity_stripe()
1988 physical = map->stripes[stripe_index].physical + in scrub_raid56_parity_stripe()
1992 set_bit(SCRUB_STRIPE_FLAG_NO_REPORT, &stripe->state); in scrub_raid56_parity_stripe()
1994 map->stripes[stripe_index].dev, physical, 1, in scrub_raid56_parity_stripe()
2000 * No extent in this data stripe, need to manually mark them in scrub_raid56_parity_stripe()
2004 stripe->logical = full_stripe_start + in scrub_raid56_parity_stripe()
2006 stripe->dev = map->stripes[stripe_index].dev; in scrub_raid56_parity_stripe()
2007 stripe->mirror_num = 1; in scrub_raid56_parity_stripe()
2008 set_bit(SCRUB_STRIPE_FLAG_INITIALIZED, &stripe->state); in scrub_raid56_parity_stripe()
2012 /* Check if all data stripes are empty. */ in scrub_raid56_parity_stripe()
2014 stripe = &sctx->raid56_data_stripes[i]; in scrub_raid56_parity_stripe()
2015 if (!bitmap_empty(&stripe->extent_sector_bitmap, stripe->nr_sectors)) { in scrub_raid56_parity_stripe()
2026 stripe = &sctx->raid56_data_stripes[i]; in scrub_raid56_parity_stripe()
2030 stripe = &sctx->raid56_data_stripes[i]; in scrub_raid56_parity_stripe()
2032 wait_event(stripe->repair_wait, in scrub_raid56_parity_stripe()
2033 test_bit(SCRUB_STRIPE_FLAG_REPAIR_DONE, &stripe->state)); in scrub_raid56_parity_stripe()
2036 ASSERT(!btrfs_is_zoned(sctx->fs_info)); in scrub_raid56_parity_stripe()
2039 * Now all data stripes are properly verified. Check if we have any in scrub_raid56_parity_stripe()
2048 stripe = &sctx->raid56_data_stripes[i]; in scrub_raid56_parity_stripe()
2052 * As we may hit an empty data stripe while it's missing. in scrub_raid56_parity_stripe()
2054 bitmap_and(&error, &stripe->error_bitmap, in scrub_raid56_parity_stripe()
2055 &stripe->extent_sector_bitmap, stripe->nr_sectors); in scrub_raid56_parity_stripe()
2056 if (!bitmap_empty(&error, stripe->nr_sectors)) { in scrub_raid56_parity_stripe()
2058 "unrepaired sectors detected, full stripe %llu data stripe %u errors %*pbl", in scrub_raid56_parity_stripe()
2059 full_stripe_start, i, stripe->nr_sectors, in scrub_raid56_parity_stripe()
2061 ret = -EIO; in scrub_raid56_parity_stripe()
2065 &stripe->extent_sector_bitmap, stripe->nr_sectors); in scrub_raid56_parity_stripe()
2070 bio->bi_iter.bi_sector = full_stripe_start >> SECTOR_SHIFT; in scrub_raid56_parity_stripe()
2071 bio->bi_private = &io_done; in scrub_raid56_parity_stripe()
2072 bio->bi_end_io = raid56_scrub_wait_endio; in scrub_raid56_parity_stripe()
2083 BTRFS_STRIPE_LEN >> fs_info->sectorsize_bits); in scrub_raid56_parity_stripe()
2086 ret = -ENOMEM; in scrub_raid56_parity_stripe()
2092 stripe = &sctx->raid56_data_stripes[i]; in scrub_raid56_parity_stripe()
2094 raid56_parity_cache_data_pages(rbio, stripe->pages, in scrub_raid56_parity_stripe()
2099 ret = blk_status_to_errno(bio->bi_status); in scrub_raid56_parity_stripe()
2110 * Scrub one range which can only has simple mirror based profile.
2123 struct btrfs_fs_info *fs_info = sctx->fs_info; in scrub_simple_mirror()
2129 ASSERT(logical_start >= bg->start && logical_end <= bg->start + bg->length); in scrub_simple_mirror()
2134 u64 cur_physical = physical + cur_logical - logical_start; in scrub_simple_mirror()
2137 if (atomic_read(&fs_info->scrub_cancel_req) || in scrub_simple_mirror()
2138 atomic_read(&sctx->cancel_req)) { in scrub_simple_mirror()
2139 ret = -ECANCELED; in scrub_simple_mirror()
2143 if (atomic_read(&fs_info->scrub_pause_req)) { in scrub_simple_mirror()
2148 spin_lock(&bg->lock); in scrub_simple_mirror()
2149 if (test_bit(BLOCK_GROUP_FLAG_REMOVED, &bg->runtime_flags)) { in scrub_simple_mirror()
2150 spin_unlock(&bg->lock); in scrub_simple_mirror()
2154 spin_unlock(&bg->lock); in scrub_simple_mirror()
2157 cur_logical, logical_end - cur_logical, in scrub_simple_mirror()
2161 spin_lock(&sctx->stat_lock); in scrub_simple_mirror()
2162 sctx->stat.last_physical = physical + logical_length; in scrub_simple_mirror()
2163 spin_unlock(&sctx->stat_lock); in scrub_simple_mirror()
2183 ASSERT(map->type & (BTRFS_BLOCK_GROUP_RAID0 | in simple_stripe_full_stripe_len()
2186 return btrfs_stripe_nr_to_offset(map->num_stripes / map->sub_stripes); in simple_stripe_full_stripe_len()
2194 ASSERT(map->type & (BTRFS_BLOCK_GROUP_RAID0 | in simple_stripe_get_logical()
2196 ASSERT(stripe_index < map->num_stripes); in simple_stripe_get_logical()
2199 * (stripe_index / sub_stripes) gives how many data stripes we need to in simple_stripe_get_logical()
2202 return btrfs_stripe_nr_to_offset(stripe_index / map->sub_stripes) + in simple_stripe_get_logical()
2203 bg->start; in simple_stripe_get_logical()
2206 /* Get the mirror number for the stripe */
2209 ASSERT(map->type & (BTRFS_BLOCK_GROUP_RAID0 | in simple_stripe_mirror_num()
2211 ASSERT(stripe_index < map->num_stripes); in simple_stripe_mirror_num()
2214 return stripe_index % map->sub_stripes + 1; in simple_stripe_mirror_num()
2225 const u64 orig_physical = map->stripes[stripe_index].physical; in scrub_simple_stripe()
2231 while (cur_logical < bg->start + bg->length) { in scrub_simple_stripe()
2256 struct btrfs_fs_info *fs_info = sctx->fs_info; in scrub_stripe()
2257 const u64 profile = map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK; in scrub_stripe()
2258 const u64 chunk_logical = bg->start; in scrub_stripe()
2261 u64 physical = map->stripes[stripe_index].physical; in scrub_stripe()
2273 ASSERT(sctx->extent_path.nodes[0] == NULL); in scrub_stripe()
2277 if (sctx->is_dev_replace && in scrub_stripe()
2278 btrfs_dev_is_sequential(sctx->wr_tgtdev, physical)) { in scrub_stripe()
2279 mutex_lock(&sctx->wr_lock); in scrub_stripe()
2280 sctx->write_pointer = physical; in scrub_stripe()
2281 mutex_unlock(&sctx->wr_lock); in scrub_stripe()
2284 /* Prepare the extra data stripes used by RAID56. */ in scrub_stripe()
2286 ASSERT(sctx->raid56_data_stripes == NULL); in scrub_stripe()
2288 sctx->raid56_data_stripes = kcalloc(nr_data_stripes(map), in scrub_stripe()
2291 if (!sctx->raid56_data_stripes) { in scrub_stripe()
2292 ret = -ENOMEM; in scrub_stripe()
2297 &sctx->raid56_data_stripes[i]); in scrub_stripe()
2300 sctx->raid56_data_stripes[i].bg = bg; in scrub_stripe()
2301 sctx->raid56_data_stripes[i].sctx = sctx; in scrub_stripe()
2321 ret = scrub_simple_mirror(sctx, bg, bg->start, bg->length, in scrub_stripe()
2322 scrub_dev, map->stripes[stripe_index].physical, in scrub_stripe()
2329 offset = btrfs_stripe_nr_to_offset(stripe_index / map->sub_stripes); in scrub_stripe()
2334 ASSERT(map->type & BTRFS_BLOCK_GROUP_RAID56_MASK); in scrub_stripe()
2359 spin_lock(&sctx->stat_lock); in scrub_stripe()
2360 sctx->stat.last_physical = min(physical + BTRFS_STRIPE_LEN, in scrub_stripe()
2362 spin_unlock(&sctx->stat_lock); in scrub_stripe()
2369 * Now we're at a data stripe, scrub each extents in the range. in scrub_stripe()
2371 * At this stage, if we ignore the repair part, inside each data in scrub_stripe()
2383 spin_lock(&sctx->stat_lock); in scrub_stripe()
2384 sctx->stat.last_physical = physical; in scrub_stripe()
2385 spin_unlock(&sctx->stat_lock); in scrub_stripe()
2391 btrfs_release_path(&sctx->extent_path); in scrub_stripe()
2392 btrfs_release_path(&sctx->csum_path); in scrub_stripe()
2394 if (sctx->raid56_data_stripes) { in scrub_stripe()
2396 release_scrub_stripe(&sctx->raid56_data_stripes[i]); in scrub_stripe()
2397 kfree(sctx->raid56_data_stripes); in scrub_stripe()
2398 sctx->raid56_data_stripes = NULL; in scrub_stripe()
2401 if (sctx->is_dev_replace && ret >= 0) { in scrub_stripe()
2406 map->stripes[stripe_index].physical, in scrub_stripe()
2421 struct btrfs_fs_info *fs_info = sctx->fs_info; in scrub_chunk()
2426 map = btrfs_find_chunk_map(fs_info, bg->start, bg->length); in scrub_chunk()
2432 spin_lock(&bg->lock); in scrub_chunk()
2433 if (!test_bit(BLOCK_GROUP_FLAG_REMOVED, &bg->runtime_flags)) in scrub_chunk()
2434 ret = -EINVAL; in scrub_chunk()
2435 spin_unlock(&bg->lock); in scrub_chunk()
2439 if (map->start != bg->start) in scrub_chunk()
2441 if (map->chunk_len < dev_extent_len) in scrub_chunk()
2444 for (i = 0; i < map->num_stripes; ++i) { in scrub_chunk()
2445 if (map->stripes[i].dev->bdev == scrub_dev->bdev && in scrub_chunk()
2446 map->stripes[i].physical == dev_offset) { in scrub_chunk()
2461 struct btrfs_fs_info *fs_info = cache->fs_info; in finish_extent_writes_for_zoned()
2479 struct btrfs_fs_info *fs_info = sctx->fs_info; in scrub_enumerate_chunks()
2480 struct btrfs_root *root = fs_info->dev_root; in scrub_enumerate_chunks()
2489 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace; in scrub_enumerate_chunks()
2493 return -ENOMEM; in scrub_enumerate_chunks()
2495 path->reada = READA_FORWARD; in scrub_enumerate_chunks()
2496 path->search_commit_root = 1; in scrub_enumerate_chunks()
2497 path->skip_locking = 1; in scrub_enumerate_chunks()
2499 key.objectid = scrub_dev->devid; in scrub_enumerate_chunks()
2510 if (path->slots[0] >= in scrub_enumerate_chunks()
2511 btrfs_header_nritems(path->nodes[0])) { in scrub_enumerate_chunks()
2524 l = path->nodes[0]; in scrub_enumerate_chunks()
2525 slot = path->slots[0]; in scrub_enumerate_chunks()
2529 if (found_key.objectid != scrub_dev->devid) in scrub_enumerate_chunks()
2560 ASSERT(cache->start <= chunk_offset); in scrub_enumerate_chunks()
2580 if (cache->start < chunk_offset) { in scrub_enumerate_chunks()
2585 if (sctx->is_dev_replace && btrfs_is_zoned(fs_info)) { in scrub_enumerate_chunks()
2586 if (!test_bit(BLOCK_GROUP_FLAG_TO_COPY, &cache->runtime_flags)) { in scrub_enumerate_chunks()
2600 spin_lock(&cache->lock); in scrub_enumerate_chunks()
2601 if (test_bit(BLOCK_GROUP_FLAG_REMOVED, &cache->runtime_flags)) { in scrub_enumerate_chunks()
2602 spin_unlock(&cache->lock); in scrub_enumerate_chunks()
2607 spin_unlock(&cache->lock); in scrub_enumerate_chunks()
2613 * -> btrfs_wait_for_commit() in scrub_enumerate_chunks()
2614 * -> btrfs_commit_transaction() in scrub_enumerate_chunks()
2615 * -> btrfs_scrub_pause() in scrub_enumerate_chunks()
2623 * -EFBIG from btrfs_finish_chunk_alloc() like: in scrub_enumerate_chunks()
2639 * - Write duplication in scrub_enumerate_chunks()
2640 * Contains latest data in scrub_enumerate_chunks()
2641 * - Scrub copy in scrub_enumerate_chunks()
2642 * Contains data from commit tree in scrub_enumerate_chunks()
2645 * be overwritten by scrub copy, causing data corruption. in scrub_enumerate_chunks()
2646 * So for dev-replace, it's not allowed to continue if a block in scrub_enumerate_chunks()
2649 ret = btrfs_inc_block_group_ro(cache, sctx->is_dev_replace); in scrub_enumerate_chunks()
2650 if (!ret && sctx->is_dev_replace) { in scrub_enumerate_chunks()
2662 } else if (ret == -ENOSPC && !sctx->is_dev_replace && in scrub_enumerate_chunks()
2663 !(cache->flags & BTRFS_BLOCK_GROUP_RAID56_MASK)) { in scrub_enumerate_chunks()
2665 * btrfs_inc_block_group_ro return -ENOSPC when it in scrub_enumerate_chunks()
2671 * For RAID56 chunks, we have to mark them read-only in scrub_enumerate_chunks()
2678 } else if (ret == -ETXTBSY) { in scrub_enumerate_chunks()
2681 cache->start); in scrub_enumerate_chunks()
2696 * finish before dev-replace. in scrub_enumerate_chunks()
2699 if (sctx->is_dev_replace) { in scrub_enumerate_chunks()
2705 down_write(&dev_replace->rwsem); in scrub_enumerate_chunks()
2706 dev_replace->cursor_right = found_key.offset + dev_extent_len; in scrub_enumerate_chunks()
2707 dev_replace->cursor_left = found_key.offset; in scrub_enumerate_chunks()
2708 dev_replace->item_needs_writeback = 1; in scrub_enumerate_chunks()
2709 up_write(&dev_replace->rwsem); in scrub_enumerate_chunks()
2713 if (sctx->is_dev_replace && in scrub_enumerate_chunks()
2714 !btrfs_finish_block_group_to_copy(dev_replace->srcdev, in scrub_enumerate_chunks()
2718 down_write(&dev_replace->rwsem); in scrub_enumerate_chunks()
2719 dev_replace->cursor_left = dev_replace->cursor_right; in scrub_enumerate_chunks()
2720 dev_replace->item_needs_writeback = 1; in scrub_enumerate_chunks()
2721 up_write(&dev_replace->rwsem); in scrub_enumerate_chunks()
2733 spin_lock(&cache->lock); in scrub_enumerate_chunks()
2734 if (!test_bit(BLOCK_GROUP_FLAG_REMOVED, &cache->runtime_flags) && in scrub_enumerate_chunks()
2735 !cache->ro && cache->reserved == 0 && cache->used == 0) { in scrub_enumerate_chunks()
2736 spin_unlock(&cache->lock); in scrub_enumerate_chunks()
2738 btrfs_discard_queue_work(&fs_info->discard_ctl, in scrub_enumerate_chunks()
2743 spin_unlock(&cache->lock); in scrub_enumerate_chunks()
2750 if (sctx->is_dev_replace && in scrub_enumerate_chunks()
2751 atomic64_read(&dev_replace->num_write_errors) > 0) { in scrub_enumerate_chunks()
2752 ret = -EIO; in scrub_enumerate_chunks()
2755 if (sctx->stat.malloc_errors > 0) { in scrub_enumerate_chunks()
2756 ret = -ENOMEM; in scrub_enumerate_chunks()
2772 struct btrfs_fs_info *fs_info = sctx->fs_info; in scrub_one_super()
2778 bio_init(&bio, dev->bdev, &bvec, 1, REQ_OP_READ); in scrub_one_super()
2790 physical, dev->devid); in scrub_one_super()
2791 return -EIO; in scrub_one_super()
2796 physical, dev->devid, in scrub_one_super()
2798 return -EUCLEAN; in scrub_one_super()
2801 return btrfs_validate_super(fs_info, sb, -1); in scrub_one_super()
2812 struct btrfs_fs_info *fs_info = sctx->fs_info; in scrub_supers()
2815 return -EROFS; in scrub_supers()
2819 spin_lock(&sctx->stat_lock); in scrub_supers()
2820 sctx->stat.malloc_errors++; in scrub_supers()
2821 spin_unlock(&sctx->stat_lock); in scrub_supers()
2822 return -ENOMEM; in scrub_supers()
2826 if (scrub_dev->fs_devices != fs_info->fs_devices) in scrub_supers()
2827 gen = scrub_dev->generation; in scrub_supers()
2833 if (ret == -ENOENT) in scrub_supers()
2837 spin_lock(&sctx->stat_lock); in scrub_supers()
2838 sctx->stat.super_errors++; in scrub_supers()
2839 spin_unlock(&sctx->stat_lock); in scrub_supers()
2844 scrub_dev->commit_total_bytes) in scrub_supers()
2851 spin_lock(&sctx->stat_lock); in scrub_supers()
2852 sctx->stat.super_errors++; in scrub_supers()
2853 spin_unlock(&sctx->stat_lock); in scrub_supers()
2862 if (refcount_dec_and_mutex_lock(&fs_info->scrub_workers_refcnt, in scrub_workers_put()
2863 &fs_info->scrub_lock)) { in scrub_workers_put()
2864 struct workqueue_struct *scrub_workers = fs_info->scrub_workers; in scrub_workers_put()
2866 fs_info->scrub_workers = NULL; in scrub_workers_put()
2867 mutex_unlock(&fs_info->scrub_lock); in scrub_workers_put()
2875 * get a reference count on fs_info->scrub_workers. start worker if necessary
2881 int max_active = fs_info->thread_pool_size; in scrub_workers_get()
2882 int ret = -ENOMEM; in scrub_workers_get()
2884 if (refcount_inc_not_zero(&fs_info->scrub_workers_refcnt)) in scrub_workers_get()
2887 scrub_workers = alloc_workqueue("btrfs-scrub", flags, max_active); in scrub_workers_get()
2889 return -ENOMEM; in scrub_workers_get()
2891 mutex_lock(&fs_info->scrub_lock); in scrub_workers_get()
2892 if (refcount_read(&fs_info->scrub_workers_refcnt) == 0) { in scrub_workers_get()
2893 ASSERT(fs_info->scrub_workers == NULL); in scrub_workers_get()
2894 fs_info->scrub_workers = scrub_workers; in scrub_workers_get()
2895 refcount_set(&fs_info->scrub_workers_refcnt, 1); in scrub_workers_get()
2896 mutex_unlock(&fs_info->scrub_lock); in scrub_workers_get()
2900 refcount_inc(&fs_info->scrub_workers_refcnt); in scrub_workers_get()
2901 mutex_unlock(&fs_info->scrub_lock); in scrub_workers_get()
2921 return -EAGAIN; in btrfs_scrub_dev()
2924 ASSERT(fs_info->nodesize <= BTRFS_STRIPE_LEN); in btrfs_scrub_dev()
2931 ASSERT(fs_info->nodesize <= in btrfs_scrub_dev()
2932 SCRUB_MAX_SECTORS_PER_BLOCK << fs_info->sectorsize_bits); in btrfs_scrub_dev()
2943 mutex_lock(&fs_info->fs_devices->device_list_mutex); in btrfs_scrub_dev()
2944 dev = btrfs_find_device(fs_info->fs_devices, &args); in btrfs_scrub_dev()
2945 if (!dev || (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state) && in btrfs_scrub_dev()
2947 mutex_unlock(&fs_info->fs_devices->device_list_mutex); in btrfs_scrub_dev()
2948 ret = -ENODEV; in btrfs_scrub_dev()
2953 !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state)) { in btrfs_scrub_dev()
2954 mutex_unlock(&fs_info->fs_devices->device_list_mutex); in btrfs_scrub_dev()
2958 ret = -EROFS; in btrfs_scrub_dev()
2962 mutex_lock(&fs_info->scrub_lock); in btrfs_scrub_dev()
2963 if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) || in btrfs_scrub_dev()
2964 test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &dev->dev_state)) { in btrfs_scrub_dev()
2965 mutex_unlock(&fs_info->scrub_lock); in btrfs_scrub_dev()
2966 mutex_unlock(&fs_info->fs_devices->device_list_mutex); in btrfs_scrub_dev()
2967 ret = -EIO; in btrfs_scrub_dev()
2971 down_read(&fs_info->dev_replace.rwsem); in btrfs_scrub_dev()
2972 if (dev->scrub_ctx || in btrfs_scrub_dev()
2974 btrfs_dev_replace_is_ongoing(&fs_info->dev_replace))) { in btrfs_scrub_dev()
2975 up_read(&fs_info->dev_replace.rwsem); in btrfs_scrub_dev()
2976 mutex_unlock(&fs_info->scrub_lock); in btrfs_scrub_dev()
2977 mutex_unlock(&fs_info->fs_devices->device_list_mutex); in btrfs_scrub_dev()
2978 ret = -EINPROGRESS; in btrfs_scrub_dev()
2981 up_read(&fs_info->dev_replace.rwsem); in btrfs_scrub_dev()
2983 sctx->readonly = readonly; in btrfs_scrub_dev()
2984 dev->scrub_ctx = sctx; in btrfs_scrub_dev()
2985 mutex_unlock(&fs_info->fs_devices->device_list_mutex); in btrfs_scrub_dev()
2992 atomic_inc(&fs_info->scrubs_running); in btrfs_scrub_dev()
2993 mutex_unlock(&fs_info->scrub_lock); in btrfs_scrub_dev()
3002 * before incrementing fs_info->scrubs_running). in btrfs_scrub_dev()
3008 spin_lock(&sctx->stat_lock); in btrfs_scrub_dev()
3009 old_super_errors = sctx->stat.super_errors; in btrfs_scrub_dev()
3010 spin_unlock(&sctx->stat_lock); in btrfs_scrub_dev()
3017 mutex_lock(&fs_info->fs_devices->device_list_mutex); in btrfs_scrub_dev()
3019 mutex_unlock(&fs_info->fs_devices->device_list_mutex); in btrfs_scrub_dev()
3021 spin_lock(&sctx->stat_lock); in btrfs_scrub_dev()
3027 if (sctx->stat.super_errors > old_super_errors && !sctx->readonly) in btrfs_scrub_dev()
3029 spin_unlock(&sctx->stat_lock); in btrfs_scrub_dev()
3036 atomic_dec(&fs_info->scrubs_running); in btrfs_scrub_dev()
3037 wake_up(&fs_info->scrub_pause_wait); in btrfs_scrub_dev()
3040 memcpy(progress, &sctx->stat, sizeof(*progress)); in btrfs_scrub_dev()
3046 mutex_lock(&fs_info->scrub_lock); in btrfs_scrub_dev()
3047 dev->scrub_ctx = NULL; in btrfs_scrub_dev()
3048 mutex_unlock(&fs_info->scrub_lock); in btrfs_scrub_dev()
3060 trans = btrfs_start_transaction(fs_info->tree_root, 0); in btrfs_scrub_dev()
3083 mutex_lock(&fs_info->scrub_lock); in btrfs_scrub_pause()
3084 atomic_inc(&fs_info->scrub_pause_req); in btrfs_scrub_pause()
3085 while (atomic_read(&fs_info->scrubs_paused) != in btrfs_scrub_pause()
3086 atomic_read(&fs_info->scrubs_running)) { in btrfs_scrub_pause()
3087 mutex_unlock(&fs_info->scrub_lock); in btrfs_scrub_pause()
3088 wait_event(fs_info->scrub_pause_wait, in btrfs_scrub_pause()
3089 atomic_read(&fs_info->scrubs_paused) == in btrfs_scrub_pause()
3090 atomic_read(&fs_info->scrubs_running)); in btrfs_scrub_pause()
3091 mutex_lock(&fs_info->scrub_lock); in btrfs_scrub_pause()
3093 mutex_unlock(&fs_info->scrub_lock); in btrfs_scrub_pause()
3098 atomic_dec(&fs_info->scrub_pause_req); in btrfs_scrub_continue()
3099 wake_up(&fs_info->scrub_pause_wait); in btrfs_scrub_continue()
3104 mutex_lock(&fs_info->scrub_lock); in btrfs_scrub_cancel()
3105 if (!atomic_read(&fs_info->scrubs_running)) { in btrfs_scrub_cancel()
3106 mutex_unlock(&fs_info->scrub_lock); in btrfs_scrub_cancel()
3107 return -ENOTCONN; in btrfs_scrub_cancel()
3110 atomic_inc(&fs_info->scrub_cancel_req); in btrfs_scrub_cancel()
3111 while (atomic_read(&fs_info->scrubs_running)) { in btrfs_scrub_cancel()
3112 mutex_unlock(&fs_info->scrub_lock); in btrfs_scrub_cancel()
3113 wait_event(fs_info->scrub_pause_wait, in btrfs_scrub_cancel()
3114 atomic_read(&fs_info->scrubs_running) == 0); in btrfs_scrub_cancel()
3115 mutex_lock(&fs_info->scrub_lock); in btrfs_scrub_cancel()
3117 atomic_dec(&fs_info->scrub_cancel_req); in btrfs_scrub_cancel()
3118 mutex_unlock(&fs_info->scrub_lock); in btrfs_scrub_cancel()
3125 struct btrfs_fs_info *fs_info = dev->fs_info; in btrfs_scrub_cancel_dev()
3128 mutex_lock(&fs_info->scrub_lock); in btrfs_scrub_cancel_dev()
3129 sctx = dev->scrub_ctx; in btrfs_scrub_cancel_dev()
3131 mutex_unlock(&fs_info->scrub_lock); in btrfs_scrub_cancel_dev()
3132 return -ENOTCONN; in btrfs_scrub_cancel_dev()
3134 atomic_inc(&sctx->cancel_req); in btrfs_scrub_cancel_dev()
3135 while (dev->scrub_ctx) { in btrfs_scrub_cancel_dev()
3136 mutex_unlock(&fs_info->scrub_lock); in btrfs_scrub_cancel_dev()
3137 wait_event(fs_info->scrub_pause_wait, in btrfs_scrub_cancel_dev()
3138 dev->scrub_ctx == NULL); in btrfs_scrub_cancel_dev()
3139 mutex_lock(&fs_info->scrub_lock); in btrfs_scrub_cancel_dev()
3141 mutex_unlock(&fs_info->scrub_lock); in btrfs_scrub_cancel_dev()
3153 mutex_lock(&fs_info->fs_devices->device_list_mutex); in btrfs_scrub_progress()
3154 dev = btrfs_find_device(fs_info->fs_devices, &args); in btrfs_scrub_progress()
3156 sctx = dev->scrub_ctx; in btrfs_scrub_progress()
3158 memcpy(progress, &sctx->stat, sizeof(*progress)); in btrfs_scrub_progress()
3159 mutex_unlock(&fs_info->fs_devices->device_list_mutex); in btrfs_scrub_progress()
3161 return dev ? (sctx ? 0 : -ENOTCONN) : -ENODEV; in btrfs_scrub_progress()