Lines Matching +full:data +full:- +full:mirror

1 // SPDX-License-Identifier: GPL-2.0-only
4 * Copyright (C) 2005-2008 Red Hat, Inc. All rights reserved.
9 #include "dm-bio-record.h"
17 #include <linux/device-mapper.h>
18 #include <linux/dm-io.h>
19 #include <linux/dm-dirty-log.h>
20 #include <linux/dm-kcopyd.h>
21 #include <linux/dm-region-hash.h>
33 #define errors_handled(p) ((p)->features & DM_RAID1_HANDLE_ERRORS)
34 #define keep_log(p) ((p)->features & DM_RAID1_KEEP_LOG)
39 *---------------------------------------------------------------
40 * Mirror set structures.
41 *---------------------------------------------------------------
50 struct mirror { struct
81 atomic_t default_mirror; /* Default mirror */
91 struct mirror mirror[]; member
101 queue_work(ms->kmirrord_wq, &ms->kmirrord_work); in wakeup_mirrord()
108 clear_bit(0, &ms->timer_pending); in delayed_wake_fn()
114 if (test_and_set_bit(0, &ms->timer_pending)) in delayed_wake()
117 ms->timer.expires = jiffies + HZ / 5; in delayed_wake()
118 add_timer(&ms->timer); in delayed_wake()
132 bl = (rw == WRITE) ? &ms->writes : &ms->reads; in queue_bio()
133 spin_lock_irqsave(&ms->lock, flags); in queue_bio()
134 should_wake = !(bl->head); in queue_bio()
136 spin_unlock_irqrestore(&ms->lock, flags); in queue_bio()
152 struct mirror *m;
153 /* if details->bi_bdev == NULL, details were not saved */
159 * Every mirror should look like this one.
164 * This is yucky. We squirrel the mirror struct away inside
168 static struct mirror *bio_get_m(struct bio *bio) in bio_get_m()
170 return (struct mirror *) bio->bi_next; in bio_get_m()
173 static void bio_set_m(struct bio *bio, struct mirror *m) in bio_set_m()
175 bio->bi_next = (struct bio *) m; in bio_set_m()
178 static struct mirror *get_default_mirror(struct mirror_set *ms) in get_default_mirror()
180 return &ms->mirror[atomic_read(&ms->default_mirror)]; in get_default_mirror()
183 static void set_default_mirror(struct mirror *m) in set_default_mirror()
185 struct mirror_set *ms = m->ms; in set_default_mirror()
186 struct mirror *m0 = &(ms->mirror[0]); in set_default_mirror()
188 atomic_set(&ms->default_mirror, m - m0); in set_default_mirror()
191 static struct mirror *get_valid_mirror(struct mirror_set *ms) in get_valid_mirror()
193 struct mirror *m; in get_valid_mirror()
195 for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++) in get_valid_mirror()
196 if (!atomic_read(&m->error_count)) in get_valid_mirror()
203 * @m: mirror device to fail
212 * only if the mirror is in-sync.
216 static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type) in fail_mirror()
218 struct mirror_set *ms = m->ms; in fail_mirror()
219 struct mirror *new; in fail_mirror()
221 ms->leg_failure = 1; in fail_mirror()
228 atomic_inc(&m->error_count); in fail_mirror()
230 if (test_and_set_bit(error_type, &m->error_type)) in fail_mirror()
239 if (!ms->in_sync && !keep_log(ms)) { in fail_mirror()
242 * than to risk returning corrupt data. in fail_mirror()
244 DMERR("Primary mirror (%s) failed while out-of-sync: Reads may fail.", in fail_mirror()
245 m->dev->name); in fail_mirror()
253 DMWARN("All sides of mirror have failed."); in fail_mirror()
256 queue_work(dm_raid1_wq, &ms->trigger_event); in fail_mirror()
261 struct mirror_set *ms = ti->private; in mirror_flush()
266 struct mirror *m; in mirror_flush()
271 .client = ms->io_client, in mirror_flush()
274 for (i = 0, m = ms->mirror; i < ms->nr_mirrors; i++, m++) { in mirror_flush()
275 io[i].bdev = m->dev->bdev; in mirror_flush()
280 error_bits = -1; in mirror_flush()
281 dm_io(&io_req, ms->nr_mirrors, io, &error_bits, IOPRIO_DEFAULT); in mirror_flush()
283 for (i = 0; i < ms->nr_mirrors; i++) in mirror_flush()
285 fail_mirror(ms->mirror + i, in mirror_flush()
287 return -EIO; in mirror_flush()
294 *---------------------------------------------------------------
297 * When a mirror is first activated we may find that some regions
298 * are in the no-sync state. We have to recover these by
299 * recopying from the default mirror to all the others.
300 *---------------------------------------------------------------
310 /* Read error means the failure of default mirror. */ in recovery_complete()
311 DMERR_LIMIT("Unable to read primary mirror during recovery"); in recovery_complete()
319 * Bits correspond to devices (excluding default mirror). in recovery_complete()
320 * The default mirror cannot change during recovery. in recovery_complete()
322 for (m = 0; m < ms->nr_mirrors; m++) { in recovery_complete()
323 if (&ms->mirror[m] == get_default_mirror(ms)) in recovery_complete()
326 fail_mirror(ms->mirror + m, in recovery_complete()
339 struct mirror *m; in recover()
342 sector_t region_size = dm_rh_get_region_size(ms->rh); in recover()
346 from.bdev = m->dev->bdev; in recover()
347 from.sector = m->offset + dm_rh_region_to_sector(ms->rh, key); in recover()
348 if (key == (ms->nr_regions - 1)) { in recover()
353 from.count = ms->ti->len & (region_size - 1); in recover()
360 for (i = 0, dest = to; i < ms->nr_mirrors; i++) { in recover()
361 if (&ms->mirror[i] == get_default_mirror(ms)) in recover()
364 m = ms->mirror + i; in recover()
365 dest->bdev = m->dev->bdev; in recover()
366 dest->sector = m->offset + dm_rh_region_to_sector(ms->rh, key); in recover()
367 dest->count = from.count; in recover()
375 dm_kcopyd_copy(ms->kcopyd_client, &from, ms->nr_mirrors - 1, to, in recover()
383 ms->leg_failure = 0; in reset_ms_flags()
384 for (m = 0; m < ms->nr_mirrors; m++) { in reset_ms_flags()
385 atomic_set(&(ms->mirror[m].error_count), 0); in reset_ms_flags()
386 ms->mirror[m].error_type = 0; in reset_ms_flags()
393 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh); in do_recovery()
398 dm_rh_recovery_prepare(ms->rh); in do_recovery()
403 while ((reg = dm_rh_recovery_start(ms->rh))) in do_recovery()
409 if (!ms->in_sync && in do_recovery()
410 (log->type->get_sync_count(log) == ms->nr_regions)) { in do_recovery()
412 dm_table_event(ms->ti->table); in do_recovery()
413 ms->in_sync = 1; in do_recovery()
419 *---------------------------------------------------------------
421 *---------------------------------------------------------------
423 static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector) in choose_mirror()
425 struct mirror *m = get_default_mirror(ms); in choose_mirror()
428 if (likely(!atomic_read(&m->error_count))) in choose_mirror()
431 if (m-- == ms->mirror) in choose_mirror()
432 m += ms->nr_mirrors; in choose_mirror()
438 static int default_ok(struct mirror *m) in default_ok()
440 struct mirror *default_mirror = get_default_mirror(m->ms); in default_ok()
442 return !atomic_read(&default_mirror->error_count); in default_ok()
447 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh); in mirror_available()
448 region_t region = dm_rh_bio_to_region(ms->rh, bio); in mirror_available()
450 if (log->type->in_sync(log, region, 0)) in mirror_available()
451 return choose_mirror(ms, bio->bi_iter.bi_sector) ? 1 : 0; in mirror_available()
457 * remap a buffer to a particular mirror.
459 static sector_t map_sector(struct mirror *m, struct bio *bio) in map_sector()
461 if (unlikely(!bio->bi_iter.bi_size)) in map_sector()
463 return m->offset + dm_target_offset(m->ms->ti, bio->bi_iter.bi_sector); in map_sector()
466 static void map_bio(struct mirror *m, struct bio *bio) in map_bio()
468 bio_set_dev(bio, m->dev->bdev); in map_bio()
469 bio->bi_iter.bi_sector = map_sector(m, bio); in map_bio()
472 static void map_region(struct dm_io_region *io, struct mirror *m, in map_region()
475 io->bdev = m->dev->bdev; in map_region()
476 io->sector = map_sector(m, bio); in map_region()
477 io->count = bio_sectors(bio); in map_region()
486 spin_lock_irq(&ms->lock); in hold_bio()
488 if (atomic_read(&ms->suspend)) { in hold_bio()
489 spin_unlock_irq(&ms->lock); in hold_bio()
494 if (dm_noflush_suspending(ms->ti)) in hold_bio()
495 bio->bi_status = BLK_STS_DM_REQUEUE; in hold_bio()
497 bio->bi_status = BLK_STS_IOERR; in hold_bio()
506 bio_list_add(&ms->holds, bio); in hold_bio()
507 spin_unlock_irq(&ms->lock); in hold_bio()
511 *---------------------------------------------------------------
513 *---------------------------------------------------------------
518 struct mirror *m; in read_callback()
530 if (likely(default_ok(m)) || mirror_available(m->ms, bio)) { in read_callback()
531 DMWARN_LIMIT("Read failure on mirror device %s. Trying alternative device.", in read_callback()
532 m->dev->name); in read_callback()
533 queue_bio(m->ms, bio, bio_data_dir(bio)); in read_callback()
537 DMERR_LIMIT("Read failure on mirror device %s. Failing I/O.", in read_callback()
538 m->dev->name); in read_callback()
543 static void read_async_bio(struct mirror *m, struct bio *bio) in read_async_bio()
552 .client = m->ms->io_client, in read_async_bio()
563 int state = dm_rh_get_state(ms->rh, region, may_block); in region_in_sync()
571 struct mirror *m; in do_reads()
574 region = dm_rh_bio_to_region(ms->rh, bio); in do_reads()
581 m = choose_mirror(ms, bio->bi_iter.bi_sector); in do_reads()
582 else if (m && atomic_read(&m->error_count)) in do_reads()
593 *---------------------------------------------------------------------
601 * NOSYNC: increment pending, just write to the default mirror
602 *---------------------------------------------------------------------
612 ms = bio_get_m(bio)->ms; in write_callback()
631 bio->bi_status = BLK_STS_NOTSUPP; in write_callback()
636 for (i = 0; i < ms->nr_mirrors; i++) in write_callback()
638 fail_mirror(ms->mirror + i, DM_RAID1_WRITE_ERROR); in write_callback()
645 spin_lock_irqsave(&ms->lock, flags); in write_callback()
646 if (!ms->failures.head) in write_callback()
648 bio_list_add(&ms->failures, bio); in write_callback()
649 spin_unlock_irqrestore(&ms->lock, flags); in write_callback()
658 struct mirror *m; in do_write()
659 blk_opf_t op_flags = bio->bi_opf & (REQ_FUA | REQ_PREFLUSH | REQ_ATOMIC); in do_write()
666 .client = ms->io_client, in do_write()
675 for (i = 0, m = ms->mirror; i < ms->nr_mirrors; i++, m++) in do_write()
679 * Use default mirror because we only need it to retrieve the reference in do_write()
680 * to the mirror set in write_callback(). in do_write()
684 BUG_ON(dm_io(&io_req, ms->nr_mirrors, io, NULL, IOPRIO_DEFAULT)); in do_write()
693 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh); in do_writes()
696 if (!writes->head) in do_writes()
708 if ((bio->bi_opf & REQ_PREFLUSH) || in do_writes()
714 region = dm_rh_bio_to_region(ms->rh, bio); in do_writes()
716 if (log->type->is_remote_recovering && in do_writes()
717 log->type->is_remote_recovering(log, region)) { in do_writes()
722 state = dm_rh_get_state(ms->rh, region, 1); in do_writes()
746 spin_lock_irq(&ms->lock); in do_writes()
747 bio_list_merge(&ms->writes, &requeue); in do_writes()
748 spin_unlock_irq(&ms->lock); in do_writes()
757 dm_rh_inc_pending(ms->rh, &sync); in do_writes()
758 dm_rh_inc_pending(ms->rh, &nosync); in do_writes()
765 ms->log_failure = dm_rh_flush(ms->rh) ? 1 : ms->log_failure; in do_writes()
770 if (unlikely(ms->log_failure) && errors_handled(ms)) { in do_writes()
771 spin_lock_irq(&ms->lock); in do_writes()
772 bio_list_merge(&ms->failures, &sync); in do_writes()
773 spin_unlock_irq(&ms->lock); in do_writes()
780 dm_rh_delay(ms->rh, bio); in do_writes()
783 if (unlikely(ms->leg_failure) && errors_handled(ms) && !keep_log(ms)) { in do_writes()
784 spin_lock_irq(&ms->lock); in do_writes()
785 bio_list_add(&ms->failures, bio); in do_writes()
786 spin_unlock_irq(&ms->lock); in do_writes()
799 if (likely(!failures->head)) in do_failures()
809 * to reconfigure the mirror, at which point the core in do_failures()
820 if (!ms->log_failure) { in do_failures()
821 ms->in_sync = 0; in do_failures()
822 dm_rh_mark_nosync(ms->rh, bio); in do_failures()
838 if (unlikely(!get_valid_mirror(ms) || (keep_log(ms) && ms->log_failure))) in do_failures()
852 dm_table_event(ms->ti->table); in trigger_event()
856 *---------------------------------------------------------------
858 *---------------------------------------------------------------
867 spin_lock_irqsave(&ms->lock, flags); in do_mirror()
868 reads = ms->reads; in do_mirror()
869 writes = ms->writes; in do_mirror()
870 failures = ms->failures; in do_mirror()
871 bio_list_init(&ms->reads); in do_mirror()
872 bio_list_init(&ms->writes); in do_mirror()
873 bio_list_init(&ms->failures); in do_mirror()
874 spin_unlock_irqrestore(&ms->lock, flags); in do_mirror()
876 dm_rh_update_states(ms->rh, errors_handled(ms)); in do_mirror()
884 *---------------------------------------------------------------
886 *---------------------------------------------------------------
894 kzalloc(struct_size(ms, mirror, nr_mirrors), GFP_KERNEL); in alloc_context()
897 ti->error = "Cannot allocate mirror context"; in alloc_context()
901 spin_lock_init(&ms->lock); in alloc_context()
902 bio_list_init(&ms->reads); in alloc_context()
903 bio_list_init(&ms->writes); in alloc_context()
904 bio_list_init(&ms->failures); in alloc_context()
905 bio_list_init(&ms->holds); in alloc_context()
907 ms->ti = ti; in alloc_context()
908 ms->nr_mirrors = nr_mirrors; in alloc_context()
909 ms->nr_regions = dm_sector_div_up(ti->len, region_size); in alloc_context()
910 ms->in_sync = 0; in alloc_context()
911 ms->log_failure = 0; in alloc_context()
912 ms->leg_failure = 0; in alloc_context()
913 atomic_set(&ms->suspend, 0); in alloc_context()
914 atomic_set(&ms->default_mirror, DEFAULT_MIRROR); in alloc_context()
916 ms->io_client = dm_io_client_create(); in alloc_context()
917 if (IS_ERR(ms->io_client)) { in alloc_context()
918 ti->error = "Error creating dm_io client"; in alloc_context()
923 ms->rh = dm_region_hash_create(ms, dispatch_bios, wakeup_mirrord, in alloc_context()
925 ms->ti->begin, MAX_RECOVERY, in alloc_context()
926 dl, region_size, ms->nr_regions); in alloc_context()
927 if (IS_ERR(ms->rh)) { in alloc_context()
928 ti->error = "Error creating dirty region hash"; in alloc_context()
929 dm_io_client_destroy(ms->io_client); in alloc_context()
940 while (m--) in free_context()
941 dm_put_device(ti, ms->mirror[m].dev); in free_context()
943 dm_io_client_destroy(ms->io_client); in free_context()
944 dm_region_hash_destroy(ms->rh); in free_context()
949 unsigned int mirror, char **argv) in get_mirror() argument
957 ti->error = "Invalid offset"; in get_mirror()
958 return -EINVAL; in get_mirror()
961 ret = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), in get_mirror()
962 &ms->mirror[mirror].dev); in get_mirror()
964 ti->error = "Device lookup failure"; in get_mirror()
968 ms->mirror[mirror].ms = ms; in get_mirror()
969 atomic_set(&(ms->mirror[mirror].error_count), 0); in get_mirror()
970 ms->mirror[mirror].error_type = 0; in get_mirror()
971 ms->mirror[mirror].offset = offset; in get_mirror()
988 ti->error = "Insufficient mirror log arguments"; in create_dirty_log()
993 ti->error = "Invalid mirror log argument count"; in create_dirty_log()
1000 ti->error = "Insufficient mirror log arguments"; in create_dirty_log()
1007 ti->error = "Error creating mirror dirty log"; in create_dirty_log()
1018 struct dm_target *ti = ms->ti; in parse_features()
1028 ti->error = "Invalid number of features"; in parse_features()
1029 return -EINVAL; in parse_features()
1032 argc--; in parse_features()
1037 ti->error = "Not enough arguments to support feature count"; in parse_features()
1038 return -EINVAL; in parse_features()
1043 ms->features |= DM_RAID1_HANDLE_ERRORS; in parse_features()
1045 ms->features |= DM_RAID1_KEEP_LOG; in parse_features()
1047 ti->error = "Unrecognised feature requested"; in parse_features()
1048 return -EINVAL; in parse_features()
1051 argc--; in parse_features()
1056 ti->error = "keep_log feature requires the handle_errors feature"; in parse_features()
1057 return -EINVAL; in parse_features()
1064 * Construct a mirror mapping:
1085 return -EINVAL; in mirror_ctr()
1088 argc -= args_used; in mirror_ctr()
1092 ti->error = "Invalid number of mirrors"; in mirror_ctr()
1094 return -EINVAL; in mirror_ctr()
1097 argv++, argc--; in mirror_ctr()
1100 ti->error = "Too few mirror arguments"; in mirror_ctr()
1102 return -EINVAL; in mirror_ctr()
1105 ms = alloc_context(nr_mirrors, dl->type->get_region_size(dl), ti, dl); in mirror_ctr()
1108 return -ENOMEM; in mirror_ctr()
1111 /* Get the mirror parameter sets */ in mirror_ctr()
1119 argc -= 2; in mirror_ctr()
1122 ti->private = ms; in mirror_ctr()
1124 r = dm_set_target_max_io_len(ti, dm_rh_get_region_size(ms->rh)); in mirror_ctr()
1128 ti->num_flush_bios = 1; in mirror_ctr()
1129 ti->num_discard_bios = 1; in mirror_ctr()
1130 ti->per_io_data_size = sizeof(struct dm_raid1_bio_record); in mirror_ctr()
1132 ms->kmirrord_wq = alloc_workqueue("kmirrord", WQ_MEM_RECLAIM, 0); in mirror_ctr()
1133 if (!ms->kmirrord_wq) { in mirror_ctr()
1135 r = -ENOMEM; in mirror_ctr()
1138 INIT_WORK(&ms->kmirrord_work, do_mirror); in mirror_ctr()
1139 timer_setup(&ms->timer, delayed_wake_fn, 0); in mirror_ctr()
1140 ms->timer_pending = 0; in mirror_ctr()
1141 INIT_WORK(&ms->trigger_event, trigger_event); in mirror_ctr()
1148 argc -= args_used; in mirror_ctr()
1151 * Any read-balancing addition depends on the in mirror_ctr()
1160 ti->error = "Too many mirror arguments"; in mirror_ctr()
1161 r = -EINVAL; in mirror_ctr()
1165 ms->kcopyd_client = dm_kcopyd_client_create(&dm_kcopyd_throttle); in mirror_ctr()
1166 if (IS_ERR(ms->kcopyd_client)) { in mirror_ctr()
1167 r = PTR_ERR(ms->kcopyd_client); in mirror_ctr()
1175 destroy_workqueue(ms->kmirrord_wq); in mirror_ctr()
1177 free_context(ms, ti, ms->nr_mirrors); in mirror_ctr()
1183 struct mirror_set *ms = ti->private; in mirror_dtr()
1185 del_timer_sync(&ms->timer); in mirror_dtr()
1186 flush_workqueue(ms->kmirrord_wq); in mirror_dtr()
1187 flush_work(&ms->trigger_event); in mirror_dtr()
1188 dm_kcopyd_client_destroy(ms->kcopyd_client); in mirror_dtr()
1189 destroy_workqueue(ms->kmirrord_wq); in mirror_dtr()
1190 free_context(ms, ti, ms->nr_mirrors); in mirror_dtr()
1194 * Mirror mapping function
1199 struct mirror *m; in mirror_map()
1200 struct mirror_set *ms = ti->private; in mirror_map()
1201 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh); in mirror_map()
1205 bio_record->details.bi_bdev = NULL; in mirror_map()
1209 bio_record->write_region = dm_rh_bio_to_region(ms->rh, bio); in mirror_map()
1214 r = log->type->in_sync(log, dm_rh_bio_to_region(ms->rh, bio), 0); in mirror_map()
1215 if (r < 0 && r != -EWOULDBLOCK) in mirror_map()
1219 * If region is not in-sync queue the bio. in mirror_map()
1221 if (!r || (r == -EWOULDBLOCK)) { in mirror_map()
1222 if (bio->bi_opf & REQ_RAHEAD) in mirror_map()
1230 * The region is in-sync and we can perform reads directly. in mirror_map()
1233 m = choose_mirror(ms, bio->bi_iter.bi_sector); in mirror_map()
1237 dm_bio_record(&bio_record->details, bio); in mirror_map()
1238 bio_record->m = m; in mirror_map()
1249 struct mirror_set *ms = ti->private; in mirror_end_io()
1250 struct mirror *m = NULL; in mirror_end_io()
1259 if (!(bio->bi_opf & REQ_PREFLUSH) && in mirror_end_io()
1261 dm_rh_dec(ms->rh, bio_record->write_region); in mirror_end_io()
1268 if (bio->bi_opf & REQ_RAHEAD) in mirror_end_io()
1272 if (!bio_record->details.bi_bdev) { in mirror_end_io()
1276 * mirror in-sync. in mirror_end_io()
1278 DMERR_LIMIT("Mirror read failed."); in mirror_end_io()
1282 m = bio_record->m; in mirror_end_io()
1284 DMERR("Mirror read failed from %s. Trying alternative device.", in mirror_end_io()
1285 m->dev->name); in mirror_end_io()
1291 * mirror. in mirror_end_io()
1294 bd = &bio_record->details; in mirror_end_io()
1297 bio_record->details.bi_bdev = NULL; in mirror_end_io()
1298 bio->bi_status = 0; in mirror_end_io()
1307 bio_record->details.bi_bdev = NULL; in mirror_end_io()
1314 struct mirror_set *ms = ti->private; in mirror_presuspend()
1315 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh); in mirror_presuspend()
1320 atomic_set(&ms->suspend, 1); in mirror_presuspend()
1325 * a chance to be added in the hold list because ms->suspend in mirror_presuspend()
1328 spin_lock_irq(&ms->lock); in mirror_presuspend()
1329 holds = ms->holds; in mirror_presuspend()
1330 bio_list_init(&ms->holds); in mirror_presuspend()
1331 spin_unlock_irq(&ms->lock); in mirror_presuspend()
1340 dm_rh_stop_recovery(ms->rh); in mirror_presuspend()
1343 !dm_rh_recovery_in_flight(ms->rh)); in mirror_presuspend()
1345 if (log->type->presuspend && log->type->presuspend(log)) in mirror_presuspend()
1355 flush_workqueue(ms->kmirrord_wq); in mirror_presuspend()
1360 struct mirror_set *ms = ti->private; in mirror_postsuspend()
1361 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh); in mirror_postsuspend()
1363 if (log->type->postsuspend && log->type->postsuspend(log)) in mirror_postsuspend()
1370 struct mirror_set *ms = ti->private; in mirror_resume()
1371 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh); in mirror_resume()
1373 atomic_set(&ms->suspend, 0); in mirror_resume()
1374 if (log->type->resume && log->type->resume(log)) in mirror_resume()
1377 dm_rh_start_recovery(ms->rh); in mirror_resume()
1382 * @m: mirror device/leg we want the status of
1386 * A => Alive - No failures
1387 * D => Dead - A write failure occurred leaving mirror out-of-sync
1388 * S => Sync - A sychronization failure occurred, mirror out-of-sync
1389 * R => Read - A read failure occurred, mirror data unaffected
1393 static char device_status_char(struct mirror *m) in device_status_char()
1395 if (!atomic_read(&(m->error_count))) in device_status_char()
1398 return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' : in device_status_char()
1399 (test_bit(DM_RAID1_WRITE_ERROR, &(m->error_type))) ? 'D' : in device_status_char()
1400 (test_bit(DM_RAID1_SYNC_ERROR, &(m->error_type))) ? 'S' : in device_status_char()
1401 (test_bit(DM_RAID1_READ_ERROR, &(m->error_type))) ? 'R' : 'U'; in device_status_char()
1410 struct mirror_set *ms = ti->private; in mirror_status()
1411 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh); in mirror_status()
1416 DMEMIT("%d ", ms->nr_mirrors); in mirror_status()
1417 for (m = 0; m < ms->nr_mirrors; m++) { in mirror_status()
1418 DMEMIT("%s ", ms->mirror[m].dev->name); in mirror_status()
1419 buffer[m] = device_status_char(&(ms->mirror[m])); in mirror_status()
1424 (unsigned long long)log->type->get_sync_count(log), in mirror_status()
1425 (unsigned long long)ms->nr_regions, buffer); in mirror_status()
1427 sz += log->type->status(log, type, result+sz, maxlen-sz); in mirror_status()
1432 sz = log->type->status(log, type, result, maxlen); in mirror_status()
1434 DMEMIT("%d", ms->nr_mirrors); in mirror_status()
1435 for (m = 0; m < ms->nr_mirrors; m++) in mirror_status()
1436 DMEMIT(" %s %llu", ms->mirror[m].dev->name, in mirror_status()
1437 (unsigned long long)ms->mirror[m].offset); in mirror_status()
1452 DMEMIT_TARGET_NAME_VERSION(ti->type); in mirror_status()
1453 DMEMIT(",nr_mirrors=%d", ms->nr_mirrors); in mirror_status()
1454 for (m = 0; m < ms->nr_mirrors; m++) { in mirror_status()
1455 DMEMIT(",mirror_device_%d=%s", m, ms->mirror[m].dev->name); in mirror_status()
1457 m, device_status_char(&(ms->mirror[m]))); in mirror_status()
1464 sz += log->type->status(log, type, result+sz, maxlen-sz); in mirror_status()
1471 iterate_devices_callout_fn fn, void *data) in mirror_iterate_devices() argument
1473 struct mirror_set *ms = ti->private; in mirror_iterate_devices()
1477 for (i = 0; !ret && i < ms->nr_mirrors; i++) in mirror_iterate_devices()
1478 ret = fn(ti, ms->mirror[i].dev, in mirror_iterate_devices()
1479 ms->mirror[i].offset, ti->len, data); in mirror_iterate_devices()
1485 .name = "mirror",
1507 return -ENOMEM; in dm_mirror_init()
1529 MODULE_DESCRIPTION(DM_NAME " mirror target");