1  // SPDX-License-Identifier: GPL-2.0-or-later
2  /*
3   * Copyright (C) 2018-2023 Oracle.  All Rights Reserved.
4   * Author: Darrick J. Wong <djwong@kernel.org>
5   */
6  #include "xfs.h"
7  #include "xfs_fs.h"
8  #include "xfs_shared.h"
9  #include "xfs_bit.h"
10  #include "xfs_format.h"
11  #include "xfs_trans_resv.h"
12  #include "xfs_mount.h"
13  #include "xfs_btree.h"
14  #include "scrub/scrub.h"
15  #include "scrub/bitmap.h"
16  
17  #include <linux/interval_tree_generic.h>
18  
19  /* u64 bitmap */
20  
21  struct xbitmap64_node {
22  	struct rb_node	bn_rbnode;
23  
24  	/* First set bit of this interval and subtree. */
25  	uint64_t	bn_start;
26  
27  	/* Last set bit of this interval. */
28  	uint64_t	bn_last;
29  
30  	/* Last set bit of this subtree.  Do not touch this. */
31  	uint64_t	__bn_subtree_last;
32  };
33  
34  /* Define our own interval tree type with uint64_t parameters. */
35  
36  #define START(node) ((node)->bn_start)
37  #define LAST(node)  ((node)->bn_last)
38  
39  /*
40   * These functions are defined by the INTERVAL_TREE_DEFINE macro, but we'll
41   * forward-declare them anyway for clarity.
42   */
43  static inline __maybe_unused void
44  xbitmap64_tree_insert(struct xbitmap64_node *node, struct rb_root_cached *root);
45  
46  static inline __maybe_unused void
47  xbitmap64_tree_remove(struct xbitmap64_node *node, struct rb_root_cached *root);
48  
49  static inline __maybe_unused struct xbitmap64_node *
50  xbitmap64_tree_iter_first(struct rb_root_cached *root, uint64_t start,
51  			uint64_t last);
52  
53  static inline __maybe_unused struct xbitmap64_node *
54  xbitmap64_tree_iter_next(struct xbitmap64_node *node, uint64_t start,
55  		       uint64_t last);
56  
INTERVAL_TREE_DEFINE(struct xbitmap64_node,bn_rbnode,uint64_t,__bn_subtree_last,START,LAST,static inline __maybe_unused,xbitmap64_tree)57  INTERVAL_TREE_DEFINE(struct xbitmap64_node, bn_rbnode, uint64_t,
58  		__bn_subtree_last, START, LAST, static inline __maybe_unused,
59  		xbitmap64_tree)
60  
61  /* Iterate each interval of a bitmap.  Do not change the bitmap. */
62  #define for_each_xbitmap64_extent(bn, bitmap) \
63  	for ((bn) = rb_entry_safe(rb_first(&(bitmap)->xb_root.rb_root), \
64  				   struct xbitmap64_node, bn_rbnode); \
65  	     (bn) != NULL; \
66  	     (bn) = rb_entry_safe(rb_next(&(bn)->bn_rbnode), \
67  				   struct xbitmap64_node, bn_rbnode))
68  
69  /* Clear a range of this bitmap. */
70  int
71  xbitmap64_clear(
72  	struct xbitmap64	*bitmap,
73  	uint64_t		start,
74  	uint64_t		len)
75  {
76  	struct xbitmap64_node	*bn;
77  	struct xbitmap64_node	*new_bn;
78  	uint64_t		last = start + len - 1;
79  
80  	while ((bn = xbitmap64_tree_iter_first(&bitmap->xb_root, start, last))) {
81  		if (bn->bn_start < start && bn->bn_last > last) {
82  			uint64_t	old_last = bn->bn_last;
83  
84  			/* overlaps with the entire clearing range */
85  			xbitmap64_tree_remove(bn, &bitmap->xb_root);
86  			bn->bn_last = start - 1;
87  			xbitmap64_tree_insert(bn, &bitmap->xb_root);
88  
89  			/* add an extent */
90  			new_bn = kmalloc(sizeof(struct xbitmap64_node),
91  					XCHK_GFP_FLAGS);
92  			if (!new_bn)
93  				return -ENOMEM;
94  			new_bn->bn_start = last + 1;
95  			new_bn->bn_last = old_last;
96  			xbitmap64_tree_insert(new_bn, &bitmap->xb_root);
97  		} else if (bn->bn_start < start) {
98  			/* overlaps with the left side of the clearing range */
99  			xbitmap64_tree_remove(bn, &bitmap->xb_root);
100  			bn->bn_last = start - 1;
101  			xbitmap64_tree_insert(bn, &bitmap->xb_root);
102  		} else if (bn->bn_last > last) {
103  			/* overlaps with the right side of the clearing range */
104  			xbitmap64_tree_remove(bn, &bitmap->xb_root);
105  			bn->bn_start = last + 1;
106  			xbitmap64_tree_insert(bn, &bitmap->xb_root);
107  			break;
108  		} else {
109  			/* in the middle of the clearing range */
110  			xbitmap64_tree_remove(bn, &bitmap->xb_root);
111  			kfree(bn);
112  		}
113  	}
114  
115  	return 0;
116  }
117  
118  /* Set a range of this bitmap. */
119  int
xbitmap64_set(struct xbitmap64 * bitmap,uint64_t start,uint64_t len)120  xbitmap64_set(
121  	struct xbitmap64	*bitmap,
122  	uint64_t		start,
123  	uint64_t		len)
124  {
125  	struct xbitmap64_node	*left;
126  	struct xbitmap64_node	*right;
127  	uint64_t		last = start + len - 1;
128  	int			error;
129  
130  	/* Is this whole range already set? */
131  	left = xbitmap64_tree_iter_first(&bitmap->xb_root, start, last);
132  	if (left && left->bn_start <= start && left->bn_last >= last)
133  		return 0;
134  
135  	/* Clear out everything in the range we want to set. */
136  	error = xbitmap64_clear(bitmap, start, len);
137  	if (error)
138  		return error;
139  
140  	/* Do we have a left-adjacent extent? */
141  	left = xbitmap64_tree_iter_first(&bitmap->xb_root, start - 1, start - 1);
142  	ASSERT(!left || left->bn_last + 1 == start);
143  
144  	/* Do we have a right-adjacent extent? */
145  	right = xbitmap64_tree_iter_first(&bitmap->xb_root, last + 1, last + 1);
146  	ASSERT(!right || right->bn_start == last + 1);
147  
148  	if (left && right) {
149  		/* combine left and right adjacent extent */
150  		xbitmap64_tree_remove(left, &bitmap->xb_root);
151  		xbitmap64_tree_remove(right, &bitmap->xb_root);
152  		left->bn_last = right->bn_last;
153  		xbitmap64_tree_insert(left, &bitmap->xb_root);
154  		kfree(right);
155  	} else if (left) {
156  		/* combine with left extent */
157  		xbitmap64_tree_remove(left, &bitmap->xb_root);
158  		left->bn_last = last;
159  		xbitmap64_tree_insert(left, &bitmap->xb_root);
160  	} else if (right) {
161  		/* combine with right extent */
162  		xbitmap64_tree_remove(right, &bitmap->xb_root);
163  		right->bn_start = start;
164  		xbitmap64_tree_insert(right, &bitmap->xb_root);
165  	} else {
166  		/* add an extent */
167  		left = kmalloc(sizeof(struct xbitmap64_node), XCHK_GFP_FLAGS);
168  		if (!left)
169  			return -ENOMEM;
170  		left->bn_start = start;
171  		left->bn_last = last;
172  		xbitmap64_tree_insert(left, &bitmap->xb_root);
173  	}
174  
175  	return 0;
176  }
177  
178  /* Free everything related to this bitmap. */
179  void
xbitmap64_destroy(struct xbitmap64 * bitmap)180  xbitmap64_destroy(
181  	struct xbitmap64	*bitmap)
182  {
183  	struct xbitmap64_node	*bn;
184  
185  	while ((bn = xbitmap64_tree_iter_first(&bitmap->xb_root, 0, -1ULL))) {
186  		xbitmap64_tree_remove(bn, &bitmap->xb_root);
187  		kfree(bn);
188  	}
189  }
190  
191  /* Set up a per-AG block bitmap. */
192  void
xbitmap64_init(struct xbitmap64 * bitmap)193  xbitmap64_init(
194  	struct xbitmap64	*bitmap)
195  {
196  	bitmap->xb_root = RB_ROOT_CACHED;
197  }
198  
199  /*
200   * Remove all the blocks mentioned in @sub from the extents in @bitmap.
201   *
202   * The intent is that callers will iterate the rmapbt for all of its records
203   * for a given owner to generate @bitmap; and iterate all the blocks of the
204   * metadata structures that are not being rebuilt and have the same rmapbt
205   * owner to generate @sub.  This routine subtracts all the extents
206   * mentioned in sub from all the extents linked in @bitmap, which leaves
207   * @bitmap as the list of blocks that are not accounted for, which we assume
208   * are the dead blocks of the old metadata structure.  The blocks mentioned in
209   * @bitmap can be reaped.
210   *
211   * This is the logical equivalent of bitmap &= ~sub.
212   */
213  int
xbitmap64_disunion(struct xbitmap64 * bitmap,struct xbitmap64 * sub)214  xbitmap64_disunion(
215  	struct xbitmap64	*bitmap,
216  	struct xbitmap64	*sub)
217  {
218  	struct xbitmap64_node	*bn;
219  	int			error;
220  
221  	if (xbitmap64_empty(bitmap) || xbitmap64_empty(sub))
222  		return 0;
223  
224  	for_each_xbitmap64_extent(bn, sub) {
225  		error = xbitmap64_clear(bitmap, bn->bn_start,
226  				bn->bn_last - bn->bn_start + 1);
227  		if (error)
228  			return error;
229  	}
230  
231  	return 0;
232  }
233  
234  /* How many bits are set in this bitmap? */
235  uint64_t
xbitmap64_hweight(struct xbitmap64 * bitmap)236  xbitmap64_hweight(
237  	struct xbitmap64	*bitmap)
238  {
239  	struct xbitmap64_node	*bn;
240  	uint64_t		ret = 0;
241  
242  	for_each_xbitmap64_extent(bn, bitmap)
243  		ret += bn->bn_last - bn->bn_start + 1;
244  
245  	return ret;
246  }
247  
248  /* Call a function for every run of set bits in this bitmap. */
249  int
xbitmap64_walk(struct xbitmap64 * bitmap,xbitmap64_walk_fn fn,void * priv)250  xbitmap64_walk(
251  	struct xbitmap64	*bitmap,
252  	xbitmap64_walk_fn		fn,
253  	void			*priv)
254  {
255  	struct xbitmap64_node	*bn;
256  	int			error = 0;
257  
258  	for_each_xbitmap64_extent(bn, bitmap) {
259  		error = fn(bn->bn_start, bn->bn_last - bn->bn_start + 1, priv);
260  		if (error)
261  			break;
262  	}
263  
264  	return error;
265  }
266  
267  /* Does this bitmap have no bits set at all? */
268  bool
xbitmap64_empty(struct xbitmap64 * bitmap)269  xbitmap64_empty(
270  	struct xbitmap64	*bitmap)
271  {
272  	return bitmap->xb_root.rb_root.rb_node == NULL;
273  }
274  
275  /* Is the start of the range set or clear?  And for how long? */
276  bool
xbitmap64_test(struct xbitmap64 * bitmap,uint64_t start,uint64_t * len)277  xbitmap64_test(
278  	struct xbitmap64	*bitmap,
279  	uint64_t		start,
280  	uint64_t		*len)
281  {
282  	struct xbitmap64_node	*bn;
283  	uint64_t		last = start + *len - 1;
284  
285  	bn = xbitmap64_tree_iter_first(&bitmap->xb_root, start, last);
286  	if (!bn)
287  		return false;
288  	if (bn->bn_start <= start) {
289  		if (bn->bn_last < last)
290  			*len = bn->bn_last - start + 1;
291  		return true;
292  	}
293  	*len = bn->bn_start - start;
294  	return false;
295  }
296  
297  /* u32 bitmap */
298  
299  struct xbitmap32_node {
300  	struct rb_node	bn_rbnode;
301  
302  	/* First set bit of this interval and subtree. */
303  	uint32_t	bn_start;
304  
305  	/* Last set bit of this interval. */
306  	uint32_t	bn_last;
307  
308  	/* Last set bit of this subtree.  Do not touch this. */
309  	uint32_t	__bn_subtree_last;
310  };
311  
312  /* Define our own interval tree type with uint32_t parameters. */
313  
314  /*
315   * These functions are defined by the INTERVAL_TREE_DEFINE macro, but we'll
316   * forward-declare them anyway for clarity.
317   */
318  static inline __maybe_unused void
319  xbitmap32_tree_insert(struct xbitmap32_node *node, struct rb_root_cached *root);
320  
321  static inline __maybe_unused void
322  xbitmap32_tree_remove(struct xbitmap32_node *node, struct rb_root_cached *root);
323  
324  static inline __maybe_unused struct xbitmap32_node *
325  xbitmap32_tree_iter_first(struct rb_root_cached *root, uint32_t start,
326  			  uint32_t last);
327  
328  static inline __maybe_unused struct xbitmap32_node *
329  xbitmap32_tree_iter_next(struct xbitmap32_node *node, uint32_t start,
330  			 uint32_t last);
331  
INTERVAL_TREE_DEFINE(struct xbitmap32_node,bn_rbnode,uint32_t,__bn_subtree_last,START,LAST,static inline __maybe_unused,xbitmap32_tree)332  INTERVAL_TREE_DEFINE(struct xbitmap32_node, bn_rbnode, uint32_t,
333  		__bn_subtree_last, START, LAST, static inline __maybe_unused,
334  		xbitmap32_tree)
335  
336  /* Iterate each interval of a bitmap.  Do not change the bitmap. */
337  #define for_each_xbitmap32_extent(bn, bitmap) \
338  	for ((bn) = rb_entry_safe(rb_first(&(bitmap)->xb_root.rb_root), \
339  				   struct xbitmap32_node, bn_rbnode); \
340  	     (bn) != NULL; \
341  	     (bn) = rb_entry_safe(rb_next(&(bn)->bn_rbnode), \
342  				   struct xbitmap32_node, bn_rbnode))
343  
344  /* Clear a range of this bitmap. */
345  int
346  xbitmap32_clear(
347  	struct xbitmap32	*bitmap,
348  	uint32_t		start,
349  	uint32_t		len)
350  {
351  	struct xbitmap32_node	*bn;
352  	struct xbitmap32_node	*new_bn;
353  	uint32_t		last = start + len - 1;
354  
355  	while ((bn = xbitmap32_tree_iter_first(&bitmap->xb_root, start, last))) {
356  		if (bn->bn_start < start && bn->bn_last > last) {
357  			uint32_t	old_last = bn->bn_last;
358  
359  			/* overlaps with the entire clearing range */
360  			xbitmap32_tree_remove(bn, &bitmap->xb_root);
361  			bn->bn_last = start - 1;
362  			xbitmap32_tree_insert(bn, &bitmap->xb_root);
363  
364  			/* add an extent */
365  			new_bn = kmalloc(sizeof(struct xbitmap32_node),
366  					XCHK_GFP_FLAGS);
367  			if (!new_bn)
368  				return -ENOMEM;
369  			new_bn->bn_start = last + 1;
370  			new_bn->bn_last = old_last;
371  			xbitmap32_tree_insert(new_bn, &bitmap->xb_root);
372  		} else if (bn->bn_start < start) {
373  			/* overlaps with the left side of the clearing range */
374  			xbitmap32_tree_remove(bn, &bitmap->xb_root);
375  			bn->bn_last = start - 1;
376  			xbitmap32_tree_insert(bn, &bitmap->xb_root);
377  		} else if (bn->bn_last > last) {
378  			/* overlaps with the right side of the clearing range */
379  			xbitmap32_tree_remove(bn, &bitmap->xb_root);
380  			bn->bn_start = last + 1;
381  			xbitmap32_tree_insert(bn, &bitmap->xb_root);
382  			break;
383  		} else {
384  			/* in the middle of the clearing range */
385  			xbitmap32_tree_remove(bn, &bitmap->xb_root);
386  			kfree(bn);
387  		}
388  	}
389  
390  	return 0;
391  }
392  
393  /* Set a range of this bitmap. */
394  int
xbitmap32_set(struct xbitmap32 * bitmap,uint32_t start,uint32_t len)395  xbitmap32_set(
396  	struct xbitmap32	*bitmap,
397  	uint32_t		start,
398  	uint32_t		len)
399  {
400  	struct xbitmap32_node	*left;
401  	struct xbitmap32_node	*right;
402  	uint32_t		last = start + len - 1;
403  	int			error;
404  
405  	/* Is this whole range already set? */
406  	left = xbitmap32_tree_iter_first(&bitmap->xb_root, start, last);
407  	if (left && left->bn_start <= start && left->bn_last >= last)
408  		return 0;
409  
410  	/* Clear out everything in the range we want to set. */
411  	error = xbitmap32_clear(bitmap, start, len);
412  	if (error)
413  		return error;
414  
415  	/* Do we have a left-adjacent extent? */
416  	left = xbitmap32_tree_iter_first(&bitmap->xb_root, start - 1, start - 1);
417  	ASSERT(!left || left->bn_last + 1 == start);
418  
419  	/* Do we have a right-adjacent extent? */
420  	right = xbitmap32_tree_iter_first(&bitmap->xb_root, last + 1, last + 1);
421  	ASSERT(!right || right->bn_start == last + 1);
422  
423  	if (left && right) {
424  		/* combine left and right adjacent extent */
425  		xbitmap32_tree_remove(left, &bitmap->xb_root);
426  		xbitmap32_tree_remove(right, &bitmap->xb_root);
427  		left->bn_last = right->bn_last;
428  		xbitmap32_tree_insert(left, &bitmap->xb_root);
429  		kfree(right);
430  	} else if (left) {
431  		/* combine with left extent */
432  		xbitmap32_tree_remove(left, &bitmap->xb_root);
433  		left->bn_last = last;
434  		xbitmap32_tree_insert(left, &bitmap->xb_root);
435  	} else if (right) {
436  		/* combine with right extent */
437  		xbitmap32_tree_remove(right, &bitmap->xb_root);
438  		right->bn_start = start;
439  		xbitmap32_tree_insert(right, &bitmap->xb_root);
440  	} else {
441  		/* add an extent */
442  		left = kmalloc(sizeof(struct xbitmap32_node), XCHK_GFP_FLAGS);
443  		if (!left)
444  			return -ENOMEM;
445  		left->bn_start = start;
446  		left->bn_last = last;
447  		xbitmap32_tree_insert(left, &bitmap->xb_root);
448  	}
449  
450  	return 0;
451  }
452  
453  /* Free everything related to this bitmap. */
454  void
xbitmap32_destroy(struct xbitmap32 * bitmap)455  xbitmap32_destroy(
456  	struct xbitmap32	*bitmap)
457  {
458  	struct xbitmap32_node	*bn;
459  
460  	while ((bn = xbitmap32_tree_iter_first(&bitmap->xb_root, 0, -1U))) {
461  		xbitmap32_tree_remove(bn, &bitmap->xb_root);
462  		kfree(bn);
463  	}
464  }
465  
466  /* Set up a per-AG block bitmap. */
467  void
xbitmap32_init(struct xbitmap32 * bitmap)468  xbitmap32_init(
469  	struct xbitmap32	*bitmap)
470  {
471  	bitmap->xb_root = RB_ROOT_CACHED;
472  }
473  
474  /*
475   * Remove all the blocks mentioned in @sub from the extents in @bitmap.
476   *
477   * The intent is that callers will iterate the rmapbt for all of its records
478   * for a given owner to generate @bitmap; and iterate all the blocks of the
479   * metadata structures that are not being rebuilt and have the same rmapbt
480   * owner to generate @sub.  This routine subtracts all the extents
481   * mentioned in sub from all the extents linked in @bitmap, which leaves
482   * @bitmap as the list of blocks that are not accounted for, which we assume
483   * are the dead blocks of the old metadata structure.  The blocks mentioned in
484   * @bitmap can be reaped.
485   *
486   * This is the logical equivalent of bitmap &= ~sub.
487   */
488  int
xbitmap32_disunion(struct xbitmap32 * bitmap,struct xbitmap32 * sub)489  xbitmap32_disunion(
490  	struct xbitmap32	*bitmap,
491  	struct xbitmap32	*sub)
492  {
493  	struct xbitmap32_node	*bn;
494  	int			error;
495  
496  	if (xbitmap32_empty(bitmap) || xbitmap32_empty(sub))
497  		return 0;
498  
499  	for_each_xbitmap32_extent(bn, sub) {
500  		error = xbitmap32_clear(bitmap, bn->bn_start,
501  				bn->bn_last - bn->bn_start + 1);
502  		if (error)
503  			return error;
504  	}
505  
506  	return 0;
507  }
508  
509  /* How many bits are set in this bitmap? */
510  uint32_t
xbitmap32_hweight(struct xbitmap32 * bitmap)511  xbitmap32_hweight(
512  	struct xbitmap32	*bitmap)
513  {
514  	struct xbitmap32_node	*bn;
515  	uint32_t		ret = 0;
516  
517  	for_each_xbitmap32_extent(bn, bitmap)
518  		ret += bn->bn_last - bn->bn_start + 1;
519  
520  	return ret;
521  }
522  
523  /* Call a function for every run of set bits in this bitmap. */
524  int
xbitmap32_walk(struct xbitmap32 * bitmap,xbitmap32_walk_fn fn,void * priv)525  xbitmap32_walk(
526  	struct xbitmap32	*bitmap,
527  	xbitmap32_walk_fn	fn,
528  	void			*priv)
529  {
530  	struct xbitmap32_node	*bn;
531  	int			error = 0;
532  
533  	for_each_xbitmap32_extent(bn, bitmap) {
534  		error = fn(bn->bn_start, bn->bn_last - bn->bn_start + 1, priv);
535  		if (error)
536  			break;
537  	}
538  
539  	return error;
540  }
541  
542  /* Does this bitmap have no bits set at all? */
543  bool
xbitmap32_empty(struct xbitmap32 * bitmap)544  xbitmap32_empty(
545  	struct xbitmap32	*bitmap)
546  {
547  	return bitmap->xb_root.rb_root.rb_node == NULL;
548  }
549  
550  /* Is the start of the range set or clear?  And for how long? */
551  bool
xbitmap32_test(struct xbitmap32 * bitmap,uint32_t start,uint32_t * len)552  xbitmap32_test(
553  	struct xbitmap32	*bitmap,
554  	uint32_t		start,
555  	uint32_t		*len)
556  {
557  	struct xbitmap32_node	*bn;
558  	uint32_t		last = start + *len - 1;
559  
560  	bn = xbitmap32_tree_iter_first(&bitmap->xb_root, start, last);
561  	if (!bn)
562  		return false;
563  	if (bn->bn_start <= start) {
564  		if (bn->bn_last < last)
565  			*len = bn->bn_last - start + 1;
566  		return true;
567  	}
568  	*len = bn->bn_start - start;
569  	return false;
570  }
571  
572  /* Count the number of set regions in this bitmap. */
573  uint32_t
xbitmap32_count_set_regions(struct xbitmap32 * bitmap)574  xbitmap32_count_set_regions(
575  	struct xbitmap32	*bitmap)
576  {
577  	struct xbitmap32_node	*bn;
578  	uint32_t		nr = 0;
579  
580  	for_each_xbitmap32_extent(bn, bitmap)
581  		nr++;
582  
583  	return nr;
584  }
585