1 // SPDX-License-Identifier: GPL-2.0-or-later
2 
3 #include <stdbool.h>
4 #include <stdio.h>
5 #include <stdlib.h>
6 
7 #include "generated/bit-length.h"
8 
9 #include "maple-shared.h"
10 #include "vma_internal.h"
11 
12 /* Include so header guard set. */
13 #include "../../../mm/vma.h"
14 
15 static bool fail_prealloc;
16 
17 /* Then override vma_iter_prealloc() so we can choose to fail it. */
18 #define vma_iter_prealloc(vmi, vma)					\
19 	(fail_prealloc ? -ENOMEM : mas_preallocate(&(vmi)->mas, (vma), GFP_KERNEL))
20 
21 #define CONFIG_DEFAULT_MMAP_MIN_ADDR 65536
22 
23 unsigned long mmap_min_addr = CONFIG_DEFAULT_MMAP_MIN_ADDR;
24 unsigned long dac_mmap_min_addr = CONFIG_DEFAULT_MMAP_MIN_ADDR;
25 unsigned long stack_guard_gap = 256UL<<PAGE_SHIFT;
26 
27 /*
28  * Directly import the VMA implementation here. Our vma_internal.h wrapper
29  * provides userland-equivalent functionality for everything vma.c uses.
30  */
31 #include "../../../mm/vma.c"
32 
33 const struct vm_operations_struct vma_dummy_vm_ops;
34 static struct anon_vma dummy_anon_vma;
35 
36 #define ASSERT_TRUE(_expr)						\
37 	do {								\
38 		if (!(_expr)) {						\
39 			fprintf(stderr,					\
40 				"Assert FAILED at %s:%d:%s(): %s is FALSE.\n", \
41 				__FILE__, __LINE__, __FUNCTION__, #_expr); \
42 			return false;					\
43 		}							\
44 	} while (0)
45 #define ASSERT_FALSE(_expr) ASSERT_TRUE(!(_expr))
46 #define ASSERT_EQ(_val1, _val2) ASSERT_TRUE((_val1) == (_val2))
47 #define ASSERT_NE(_val1, _val2) ASSERT_TRUE((_val1) != (_val2))
48 
49 static struct task_struct __current;
50 
get_current(void)51 struct task_struct *get_current(void)
52 {
53 	return &__current;
54 }
55 
rlimit(unsigned int limit)56 unsigned long rlimit(unsigned int limit)
57 {
58 	return (unsigned long)-1;
59 }
60 
61 /* Helper function to simply allocate a VMA. */
alloc_vma(struct mm_struct * mm,unsigned long start,unsigned long end,pgoff_t pgoff,vm_flags_t flags)62 static struct vm_area_struct *alloc_vma(struct mm_struct *mm,
63 					unsigned long start,
64 					unsigned long end,
65 					pgoff_t pgoff,
66 					vm_flags_t flags)
67 {
68 	struct vm_area_struct *ret = vm_area_alloc(mm);
69 
70 	if (ret == NULL)
71 		return NULL;
72 
73 	ret->vm_start = start;
74 	ret->vm_end = end;
75 	ret->vm_pgoff = pgoff;
76 	ret->__vm_flags = flags;
77 
78 	return ret;
79 }
80 
81 /* Helper function to allocate a VMA and link it to the tree. */
alloc_and_link_vma(struct mm_struct * mm,unsigned long start,unsigned long end,pgoff_t pgoff,vm_flags_t flags)82 static struct vm_area_struct *alloc_and_link_vma(struct mm_struct *mm,
83 						 unsigned long start,
84 						 unsigned long end,
85 						 pgoff_t pgoff,
86 						 vm_flags_t flags)
87 {
88 	struct vm_area_struct *vma = alloc_vma(mm, start, end, pgoff, flags);
89 
90 	if (vma == NULL)
91 		return NULL;
92 
93 	if (vma_link(mm, vma)) {
94 		vm_area_free(vma);
95 		return NULL;
96 	}
97 
98 	/*
99 	 * Reset this counter which we use to track whether writes have
100 	 * begun. Linking to the tree will have caused this to be incremented,
101 	 * which means we will get a false positive otherwise.
102 	 */
103 	vma->vm_lock_seq = UINT_MAX;
104 
105 	return vma;
106 }
107 
108 /* Helper function which provides a wrapper around a merge new VMA operation. */
merge_new(struct vma_merge_struct * vmg)109 static struct vm_area_struct *merge_new(struct vma_merge_struct *vmg)
110 {
111 	/*
112 	 * For convenience, get prev and next VMAs. Which the new VMA operation
113 	 * requires.
114 	 */
115 	vmg->next = vma_next(vmg->vmi);
116 	vmg->prev = vma_prev(vmg->vmi);
117 	vma_iter_next_range(vmg->vmi);
118 
119 	return vma_merge_new_range(vmg);
120 }
121 
122 /*
123  * Helper function which provides a wrapper around a merge existing VMA
124  * operation.
125  */
merge_existing(struct vma_merge_struct * vmg)126 static struct vm_area_struct *merge_existing(struct vma_merge_struct *vmg)
127 {
128 	return vma_merge_existing_range(vmg);
129 }
130 
131 /*
132  * Helper function which provides a wrapper around the expansion of an existing
133  * VMA.
134  */
expand_existing(struct vma_merge_struct * vmg)135 static int expand_existing(struct vma_merge_struct *vmg)
136 {
137 	return vma_expand(vmg);
138 }
139 
140 /*
141  * Helper function to reset merge state the associated VMA iterator to a
142  * specified new range.
143  */
vmg_set_range(struct vma_merge_struct * vmg,unsigned long start,unsigned long end,pgoff_t pgoff,vm_flags_t flags)144 static void vmg_set_range(struct vma_merge_struct *vmg, unsigned long start,
145 			  unsigned long end, pgoff_t pgoff, vm_flags_t flags)
146 {
147 	vma_iter_set(vmg->vmi, start);
148 
149 	vmg->prev = NULL;
150 	vmg->next = NULL;
151 	vmg->vma = NULL;
152 
153 	vmg->start = start;
154 	vmg->end = end;
155 	vmg->pgoff = pgoff;
156 	vmg->flags = flags;
157 }
158 
159 /*
160  * Helper function to try to merge a new VMA.
161  *
162  * Update vmg and the iterator for it and try to merge, otherwise allocate a new
163  * VMA, link it to the maple tree and return it.
164  */
try_merge_new_vma(struct mm_struct * mm,struct vma_merge_struct * vmg,unsigned long start,unsigned long end,pgoff_t pgoff,vm_flags_t flags,bool * was_merged)165 static struct vm_area_struct *try_merge_new_vma(struct mm_struct *mm,
166 						struct vma_merge_struct *vmg,
167 						unsigned long start, unsigned long end,
168 						pgoff_t pgoff, vm_flags_t flags,
169 						bool *was_merged)
170 {
171 	struct vm_area_struct *merged;
172 
173 	vmg_set_range(vmg, start, end, pgoff, flags);
174 
175 	merged = merge_new(vmg);
176 	if (merged) {
177 		*was_merged = true;
178 		ASSERT_EQ(vmg->state, VMA_MERGE_SUCCESS);
179 		return merged;
180 	}
181 
182 	*was_merged = false;
183 
184 	ASSERT_EQ(vmg->state, VMA_MERGE_NOMERGE);
185 
186 	return alloc_and_link_vma(mm, start, end, pgoff, flags);
187 }
188 
189 /*
190  * Helper function to reset the dummy anon_vma to indicate it has not been
191  * duplicated.
192  */
reset_dummy_anon_vma(void)193 static void reset_dummy_anon_vma(void)
194 {
195 	dummy_anon_vma.was_cloned = false;
196 	dummy_anon_vma.was_unlinked = false;
197 }
198 
199 /*
200  * Helper function to remove all VMAs and destroy the maple tree associated with
201  * a virtual address space. Returns a count of VMAs in the tree.
202  */
cleanup_mm(struct mm_struct * mm,struct vma_iterator * vmi)203 static int cleanup_mm(struct mm_struct *mm, struct vma_iterator *vmi)
204 {
205 	struct vm_area_struct *vma;
206 	int count = 0;
207 
208 	fail_prealloc = false;
209 	reset_dummy_anon_vma();
210 
211 	vma_iter_set(vmi, 0);
212 	for_each_vma(*vmi, vma) {
213 		vm_area_free(vma);
214 		count++;
215 	}
216 
217 	mtree_destroy(&mm->mm_mt);
218 	mm->map_count = 0;
219 	return count;
220 }
221 
222 /* Helper function to determine if VMA has had vma_start_write() performed. */
vma_write_started(struct vm_area_struct * vma)223 static bool vma_write_started(struct vm_area_struct *vma)
224 {
225 	int seq = vma->vm_lock_seq;
226 
227 	/* We reset after each check. */
228 	vma->vm_lock_seq = UINT_MAX;
229 
230 	/* The vma_start_write() stub simply increments this value. */
231 	return seq > -1;
232 }
233 
234 /* Helper function providing a dummy vm_ops->close() method.*/
dummy_close(struct vm_area_struct *)235 static void dummy_close(struct vm_area_struct *)
236 {
237 }
238 
test_simple_merge(void)239 static bool test_simple_merge(void)
240 {
241 	struct vm_area_struct *vma;
242 	unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
243 	struct mm_struct mm = {};
244 	struct vm_area_struct *vma_left = alloc_vma(&mm, 0, 0x1000, 0, flags);
245 	struct vm_area_struct *vma_right = alloc_vma(&mm, 0x2000, 0x3000, 2, flags);
246 	VMA_ITERATOR(vmi, &mm, 0x1000);
247 	struct vma_merge_struct vmg = {
248 		.mm = &mm,
249 		.vmi = &vmi,
250 		.start = 0x1000,
251 		.end = 0x2000,
252 		.flags = flags,
253 		.pgoff = 1,
254 	};
255 
256 	ASSERT_FALSE(vma_link(&mm, vma_left));
257 	ASSERT_FALSE(vma_link(&mm, vma_right));
258 
259 	vma = merge_new(&vmg);
260 	ASSERT_NE(vma, NULL);
261 
262 	ASSERT_EQ(vma->vm_start, 0);
263 	ASSERT_EQ(vma->vm_end, 0x3000);
264 	ASSERT_EQ(vma->vm_pgoff, 0);
265 	ASSERT_EQ(vma->vm_flags, flags);
266 
267 	vm_area_free(vma);
268 	mtree_destroy(&mm.mm_mt);
269 
270 	return true;
271 }
272 
test_simple_modify(void)273 static bool test_simple_modify(void)
274 {
275 	struct vm_area_struct *vma;
276 	unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
277 	struct mm_struct mm = {};
278 	struct vm_area_struct *init_vma = alloc_vma(&mm, 0, 0x3000, 0, flags);
279 	VMA_ITERATOR(vmi, &mm, 0x1000);
280 
281 	ASSERT_FALSE(vma_link(&mm, init_vma));
282 
283 	/*
284 	 * The flags will not be changed, the vma_modify_flags() function
285 	 * performs the merge/split only.
286 	 */
287 	vma = vma_modify_flags(&vmi, init_vma, init_vma,
288 			       0x1000, 0x2000, VM_READ | VM_MAYREAD);
289 	ASSERT_NE(vma, NULL);
290 	/* We modify the provided VMA, and on split allocate new VMAs. */
291 	ASSERT_EQ(vma, init_vma);
292 
293 	ASSERT_EQ(vma->vm_start, 0x1000);
294 	ASSERT_EQ(vma->vm_end, 0x2000);
295 	ASSERT_EQ(vma->vm_pgoff, 1);
296 
297 	/*
298 	 * Now walk through the three split VMAs and make sure they are as
299 	 * expected.
300 	 */
301 
302 	vma_iter_set(&vmi, 0);
303 	vma = vma_iter_load(&vmi);
304 
305 	ASSERT_EQ(vma->vm_start, 0);
306 	ASSERT_EQ(vma->vm_end, 0x1000);
307 	ASSERT_EQ(vma->vm_pgoff, 0);
308 
309 	vm_area_free(vma);
310 	vma_iter_clear(&vmi);
311 
312 	vma = vma_next(&vmi);
313 
314 	ASSERT_EQ(vma->vm_start, 0x1000);
315 	ASSERT_EQ(vma->vm_end, 0x2000);
316 	ASSERT_EQ(vma->vm_pgoff, 1);
317 
318 	vm_area_free(vma);
319 	vma_iter_clear(&vmi);
320 
321 	vma = vma_next(&vmi);
322 
323 	ASSERT_EQ(vma->vm_start, 0x2000);
324 	ASSERT_EQ(vma->vm_end, 0x3000);
325 	ASSERT_EQ(vma->vm_pgoff, 2);
326 
327 	vm_area_free(vma);
328 	mtree_destroy(&mm.mm_mt);
329 
330 	return true;
331 }
332 
test_simple_expand(void)333 static bool test_simple_expand(void)
334 {
335 	unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
336 	struct mm_struct mm = {};
337 	struct vm_area_struct *vma = alloc_vma(&mm, 0, 0x1000, 0, flags);
338 	VMA_ITERATOR(vmi, &mm, 0);
339 	struct vma_merge_struct vmg = {
340 		.vmi = &vmi,
341 		.vma = vma,
342 		.start = 0,
343 		.end = 0x3000,
344 		.pgoff = 0,
345 	};
346 
347 	ASSERT_FALSE(vma_link(&mm, vma));
348 
349 	ASSERT_FALSE(expand_existing(&vmg));
350 
351 	ASSERT_EQ(vma->vm_start, 0);
352 	ASSERT_EQ(vma->vm_end, 0x3000);
353 	ASSERT_EQ(vma->vm_pgoff, 0);
354 
355 	vm_area_free(vma);
356 	mtree_destroy(&mm.mm_mt);
357 
358 	return true;
359 }
360 
test_simple_shrink(void)361 static bool test_simple_shrink(void)
362 {
363 	unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
364 	struct mm_struct mm = {};
365 	struct vm_area_struct *vma = alloc_vma(&mm, 0, 0x3000, 0, flags);
366 	VMA_ITERATOR(vmi, &mm, 0);
367 
368 	ASSERT_FALSE(vma_link(&mm, vma));
369 
370 	ASSERT_FALSE(vma_shrink(&vmi, vma, 0, 0x1000, 0));
371 
372 	ASSERT_EQ(vma->vm_start, 0);
373 	ASSERT_EQ(vma->vm_end, 0x1000);
374 	ASSERT_EQ(vma->vm_pgoff, 0);
375 
376 	vm_area_free(vma);
377 	mtree_destroy(&mm.mm_mt);
378 
379 	return true;
380 }
381 
test_merge_new(void)382 static bool test_merge_new(void)
383 {
384 	unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
385 	struct mm_struct mm = {};
386 	VMA_ITERATOR(vmi, &mm, 0);
387 	struct vma_merge_struct vmg = {
388 		.mm = &mm,
389 		.vmi = &vmi,
390 	};
391 	struct anon_vma_chain dummy_anon_vma_chain_a = {
392 		.anon_vma = &dummy_anon_vma,
393 	};
394 	struct anon_vma_chain dummy_anon_vma_chain_b = {
395 		.anon_vma = &dummy_anon_vma,
396 	};
397 	struct anon_vma_chain dummy_anon_vma_chain_c = {
398 		.anon_vma = &dummy_anon_vma,
399 	};
400 	struct anon_vma_chain dummy_anon_vma_chain_d = {
401 		.anon_vma = &dummy_anon_vma,
402 	};
403 	const struct vm_operations_struct vm_ops = {
404 		.close = dummy_close,
405 	};
406 	int count;
407 	struct vm_area_struct *vma, *vma_a, *vma_b, *vma_c, *vma_d;
408 	bool merged;
409 
410 	/*
411 	 * 0123456789abc
412 	 * AA B       CC
413 	 */
414 	vma_a = alloc_and_link_vma(&mm, 0, 0x2000, 0, flags);
415 	ASSERT_NE(vma_a, NULL);
416 	/* We give each VMA a single avc so we can test anon_vma duplication. */
417 	INIT_LIST_HEAD(&vma_a->anon_vma_chain);
418 	list_add(&dummy_anon_vma_chain_a.same_vma, &vma_a->anon_vma_chain);
419 
420 	vma_b = alloc_and_link_vma(&mm, 0x3000, 0x4000, 3, flags);
421 	ASSERT_NE(vma_b, NULL);
422 	INIT_LIST_HEAD(&vma_b->anon_vma_chain);
423 	list_add(&dummy_anon_vma_chain_b.same_vma, &vma_b->anon_vma_chain);
424 
425 	vma_c = alloc_and_link_vma(&mm, 0xb000, 0xc000, 0xb, flags);
426 	ASSERT_NE(vma_c, NULL);
427 	INIT_LIST_HEAD(&vma_c->anon_vma_chain);
428 	list_add(&dummy_anon_vma_chain_c.same_vma, &vma_c->anon_vma_chain);
429 
430 	/*
431 	 * NO merge.
432 	 *
433 	 * 0123456789abc
434 	 * AA B   **  CC
435 	 */
436 	vma_d = try_merge_new_vma(&mm, &vmg, 0x7000, 0x9000, 7, flags, &merged);
437 	ASSERT_NE(vma_d, NULL);
438 	INIT_LIST_HEAD(&vma_d->anon_vma_chain);
439 	list_add(&dummy_anon_vma_chain_d.same_vma, &vma_d->anon_vma_chain);
440 	ASSERT_FALSE(merged);
441 	ASSERT_EQ(mm.map_count, 4);
442 
443 	/*
444 	 * Merge BOTH sides.
445 	 *
446 	 * 0123456789abc
447 	 * AA*B   DD  CC
448 	 */
449 	vma_a->vm_ops = &vm_ops; /* This should have no impact. */
450 	vma_b->anon_vma = &dummy_anon_vma;
451 	vma = try_merge_new_vma(&mm, &vmg, 0x2000, 0x3000, 2, flags, &merged);
452 	ASSERT_EQ(vma, vma_a);
453 	/* Merge with A, delete B. */
454 	ASSERT_TRUE(merged);
455 	ASSERT_EQ(vma->vm_start, 0);
456 	ASSERT_EQ(vma->vm_end, 0x4000);
457 	ASSERT_EQ(vma->vm_pgoff, 0);
458 	ASSERT_EQ(vma->anon_vma, &dummy_anon_vma);
459 	ASSERT_TRUE(vma_write_started(vma));
460 	ASSERT_EQ(mm.map_count, 3);
461 
462 	/*
463 	 * Merge to PREVIOUS VMA.
464 	 *
465 	 * 0123456789abc
466 	 * AAAA*  DD  CC
467 	 */
468 	vma = try_merge_new_vma(&mm, &vmg, 0x4000, 0x5000, 4, flags, &merged);
469 	ASSERT_EQ(vma, vma_a);
470 	/* Extend A. */
471 	ASSERT_TRUE(merged);
472 	ASSERT_EQ(vma->vm_start, 0);
473 	ASSERT_EQ(vma->vm_end, 0x5000);
474 	ASSERT_EQ(vma->vm_pgoff, 0);
475 	ASSERT_EQ(vma->anon_vma, &dummy_anon_vma);
476 	ASSERT_TRUE(vma_write_started(vma));
477 	ASSERT_EQ(mm.map_count, 3);
478 
479 	/*
480 	 * Merge to NEXT VMA.
481 	 *
482 	 * 0123456789abc
483 	 * AAAAA *DD  CC
484 	 */
485 	vma_d->anon_vma = &dummy_anon_vma;
486 	vma_d->vm_ops = &vm_ops; /* This should have no impact. */
487 	vma = try_merge_new_vma(&mm, &vmg, 0x6000, 0x7000, 6, flags, &merged);
488 	ASSERT_EQ(vma, vma_d);
489 	/* Prepend. */
490 	ASSERT_TRUE(merged);
491 	ASSERT_EQ(vma->vm_start, 0x6000);
492 	ASSERT_EQ(vma->vm_end, 0x9000);
493 	ASSERT_EQ(vma->vm_pgoff, 6);
494 	ASSERT_EQ(vma->anon_vma, &dummy_anon_vma);
495 	ASSERT_TRUE(vma_write_started(vma));
496 	ASSERT_EQ(mm.map_count, 3);
497 
498 	/*
499 	 * Merge BOTH sides.
500 	 *
501 	 * 0123456789abc
502 	 * AAAAA*DDD  CC
503 	 */
504 	vma_d->vm_ops = NULL; /* This would otherwise degrade the merge. */
505 	vma = try_merge_new_vma(&mm, &vmg, 0x5000, 0x6000, 5, flags, &merged);
506 	ASSERT_EQ(vma, vma_a);
507 	/* Merge with A, delete D. */
508 	ASSERT_TRUE(merged);
509 	ASSERT_EQ(vma->vm_start, 0);
510 	ASSERT_EQ(vma->vm_end, 0x9000);
511 	ASSERT_EQ(vma->vm_pgoff, 0);
512 	ASSERT_EQ(vma->anon_vma, &dummy_anon_vma);
513 	ASSERT_TRUE(vma_write_started(vma));
514 	ASSERT_EQ(mm.map_count, 2);
515 
516 	/*
517 	 * Merge to NEXT VMA.
518 	 *
519 	 * 0123456789abc
520 	 * AAAAAAAAA *CC
521 	 */
522 	vma_c->anon_vma = &dummy_anon_vma;
523 	vma = try_merge_new_vma(&mm, &vmg, 0xa000, 0xb000, 0xa, flags, &merged);
524 	ASSERT_EQ(vma, vma_c);
525 	/* Prepend C. */
526 	ASSERT_TRUE(merged);
527 	ASSERT_EQ(vma->vm_start, 0xa000);
528 	ASSERT_EQ(vma->vm_end, 0xc000);
529 	ASSERT_EQ(vma->vm_pgoff, 0xa);
530 	ASSERT_EQ(vma->anon_vma, &dummy_anon_vma);
531 	ASSERT_TRUE(vma_write_started(vma));
532 	ASSERT_EQ(mm.map_count, 2);
533 
534 	/*
535 	 * Merge BOTH sides.
536 	 *
537 	 * 0123456789abc
538 	 * AAAAAAAAA*CCC
539 	 */
540 	vma = try_merge_new_vma(&mm, &vmg, 0x9000, 0xa000, 0x9, flags, &merged);
541 	ASSERT_EQ(vma, vma_a);
542 	/* Extend A and delete C. */
543 	ASSERT_TRUE(merged);
544 	ASSERT_EQ(vma->vm_start, 0);
545 	ASSERT_EQ(vma->vm_end, 0xc000);
546 	ASSERT_EQ(vma->vm_pgoff, 0);
547 	ASSERT_EQ(vma->anon_vma, &dummy_anon_vma);
548 	ASSERT_TRUE(vma_write_started(vma));
549 	ASSERT_EQ(mm.map_count, 1);
550 
551 	/*
552 	 * Final state.
553 	 *
554 	 * 0123456789abc
555 	 * AAAAAAAAAAAAA
556 	 */
557 
558 	count = 0;
559 	vma_iter_set(&vmi, 0);
560 	for_each_vma(vmi, vma) {
561 		ASSERT_NE(vma, NULL);
562 		ASSERT_EQ(vma->vm_start, 0);
563 		ASSERT_EQ(vma->vm_end, 0xc000);
564 		ASSERT_EQ(vma->vm_pgoff, 0);
565 		ASSERT_EQ(vma->anon_vma, &dummy_anon_vma);
566 
567 		vm_area_free(vma);
568 		count++;
569 	}
570 
571 	/* Should only have one VMA left (though freed) after all is done.*/
572 	ASSERT_EQ(count, 1);
573 
574 	mtree_destroy(&mm.mm_mt);
575 	return true;
576 }
577 
test_vma_merge_special_flags(void)578 static bool test_vma_merge_special_flags(void)
579 {
580 	unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
581 	struct mm_struct mm = {};
582 	VMA_ITERATOR(vmi, &mm, 0);
583 	struct vma_merge_struct vmg = {
584 		.mm = &mm,
585 		.vmi = &vmi,
586 	};
587 	vm_flags_t special_flags[] = { VM_IO, VM_DONTEXPAND, VM_PFNMAP, VM_MIXEDMAP };
588 	vm_flags_t all_special_flags = 0;
589 	int i;
590 	struct vm_area_struct *vma_left, *vma;
591 
592 	/* Make sure there aren't new VM_SPECIAL flags. */
593 	for (i = 0; i < ARRAY_SIZE(special_flags); i++) {
594 		all_special_flags |= special_flags[i];
595 	}
596 	ASSERT_EQ(all_special_flags, VM_SPECIAL);
597 
598 	/*
599 	 * 01234
600 	 * AAA
601 	 */
602 	vma_left = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
603 	ASSERT_NE(vma_left, NULL);
604 
605 	/* 1. Set up new VMA with special flag that would otherwise merge. */
606 
607 	/*
608 	 * 01234
609 	 * AAA*
610 	 *
611 	 * This should merge if not for the VM_SPECIAL flag.
612 	 */
613 	vmg_set_range(&vmg, 0x3000, 0x4000, 3, flags);
614 	for (i = 0; i < ARRAY_SIZE(special_flags); i++) {
615 		vm_flags_t special_flag = special_flags[i];
616 
617 		vma_left->__vm_flags = flags | special_flag;
618 		vmg.flags = flags | special_flag;
619 		vma = merge_new(&vmg);
620 		ASSERT_EQ(vma, NULL);
621 		ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
622 	}
623 
624 	/* 2. Modify VMA with special flag that would otherwise merge. */
625 
626 	/*
627 	 * 01234
628 	 * AAAB
629 	 *
630 	 * Create a VMA to modify.
631 	 */
632 	vma = alloc_and_link_vma(&mm, 0x3000, 0x4000, 3, flags);
633 	ASSERT_NE(vma, NULL);
634 	vmg.vma = vma;
635 
636 	for (i = 0; i < ARRAY_SIZE(special_flags); i++) {
637 		vm_flags_t special_flag = special_flags[i];
638 
639 		vma_left->__vm_flags = flags | special_flag;
640 		vmg.flags = flags | special_flag;
641 		vma = merge_existing(&vmg);
642 		ASSERT_EQ(vma, NULL);
643 		ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
644 	}
645 
646 	cleanup_mm(&mm, &vmi);
647 	return true;
648 }
649 
test_vma_merge_with_close(void)650 static bool test_vma_merge_with_close(void)
651 {
652 	unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
653 	struct mm_struct mm = {};
654 	VMA_ITERATOR(vmi, &mm, 0);
655 	struct vma_merge_struct vmg = {
656 		.mm = &mm,
657 		.vmi = &vmi,
658 	};
659 	const struct vm_operations_struct vm_ops = {
660 		.close = dummy_close,
661 	};
662 	struct vm_area_struct *vma_prev, *vma_next, *vma;
663 
664 	/*
665 	 * When merging VMAs we are not permitted to remove any VMA that has a
666 	 * vm_ops->close() hook.
667 	 *
668 	 * Considering the two possible adjacent VMAs to which a VMA can be
669 	 * merged:
670 	 *
671 	 * [ prev ][ vma ][ next ]
672 	 *
673 	 * In no case will we need to delete prev. If the operation is
674 	 * mergeable, then prev will be extended with one or both of vma and
675 	 * next deleted.
676 	 *
677 	 * As a result, during initial mergeability checks, only
678 	 * can_vma_merge_before() (which implies the VMA being merged with is
679 	 * 'next' as shown above) bothers to check to see whether the next VMA
680 	 * has a vm_ops->close() callback that will need to be called when
681 	 * removed.
682 	 *
683 	 * If it does, then we cannot merge as the resources that the close()
684 	 * operation potentially clears down are tied only to the existing VMA
685 	 * range and we have no way of extending those to the nearly merged one.
686 	 *
687 	 * We must consider two scenarios:
688 	 *
689 	 * A.
690 	 *
691 	 * vm_ops->close:     -       -    !NULL
692 	 *                 [ prev ][ vma ][ next ]
693 	 *
694 	 * Where prev may or may not be present/mergeable.
695 	 *
696 	 * This is picked up by a specific check in can_vma_merge_before().
697 	 *
698 	 * B.
699 	 *
700 	 * vm_ops->close:     -     !NULL
701 	 *                 [ prev ][ vma ]
702 	 *
703 	 * Where prev and vma are present and mergeable.
704 	 *
705 	 * This is picked up by a specific check in the modified VMA merge.
706 	 *
707 	 * IMPORTANT NOTE: We make the assumption that the following case:
708 	 *
709 	 *    -     !NULL   NULL
710 	 * [ prev ][ vma ][ next ]
711 	 *
712 	 * Cannot occur, because vma->vm_ops being the same implies the same
713 	 * vma->vm_file, and therefore this would mean that next->vm_ops->close
714 	 * would be set too, and thus scenario A would pick this up.
715 	 */
716 
717 	/*
718 	 * The only case of a new VMA merge that results in a VMA being deleted
719 	 * is one where both the previous and next VMAs are merged - in this
720 	 * instance the next VMA is deleted, and the previous VMA is extended.
721 	 *
722 	 * If we are unable to do so, we reduce the operation to simply
723 	 * extending the prev VMA and not merging next.
724 	 *
725 	 * 0123456789
726 	 * PPP**NNNN
727 	 *             ->
728 	 * 0123456789
729 	 * PPPPPPNNN
730 	 */
731 
732 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
733 	vma_next = alloc_and_link_vma(&mm, 0x5000, 0x9000, 5, flags);
734 	vma_next->vm_ops = &vm_ops;
735 
736 	vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags);
737 	ASSERT_EQ(merge_new(&vmg), vma_prev);
738 	ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
739 	ASSERT_EQ(vma_prev->vm_start, 0);
740 	ASSERT_EQ(vma_prev->vm_end, 0x5000);
741 	ASSERT_EQ(vma_prev->vm_pgoff, 0);
742 
743 	ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
744 
745 	/*
746 	 * When modifying an existing VMA there are further cases where we
747 	 * delete VMAs.
748 	 *
749 	 *    <>
750 	 * 0123456789
751 	 * PPPVV
752 	 *
753 	 * In this instance, if vma has a close hook, the merge simply cannot
754 	 * proceed.
755 	 */
756 
757 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
758 	vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags);
759 	vma->vm_ops = &vm_ops;
760 
761 	vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags);
762 	vmg.prev = vma_prev;
763 	vmg.vma = vma;
764 
765 	/*
766 	 * The VMA being modified in a way that would otherwise merge should
767 	 * also fail.
768 	 */
769 	ASSERT_EQ(merge_existing(&vmg), NULL);
770 	ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
771 
772 	ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
773 
774 	/*
775 	 * This case is mirrored if merging with next.
776 	 *
777 	 *    <>
778 	 * 0123456789
779 	 *    VVNNNN
780 	 *
781 	 * In this instance, if vma has a close hook, the merge simply cannot
782 	 * proceed.
783 	 */
784 
785 	vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags);
786 	vma_next = alloc_and_link_vma(&mm, 0x5000, 0x9000, 5, flags);
787 	vma->vm_ops = &vm_ops;
788 
789 	vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags);
790 	vmg.vma = vma;
791 	ASSERT_EQ(merge_existing(&vmg), NULL);
792 	/*
793 	 * Initially this is misapprehended as an out of memory report, as the
794 	 * close() check is handled in the same way as anon_vma duplication
795 	 * failures, however a subsequent patch resolves this.
796 	 */
797 	ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
798 
799 	ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
800 
801 	/*
802 	 * Finally, we consider two variants of the case where we modify a VMA
803 	 * to merge with both the previous and next VMAs.
804 	 *
805 	 * The first variant is where vma has a close hook. In this instance, no
806 	 * merge can proceed.
807 	 *
808 	 *    <>
809 	 * 0123456789
810 	 * PPPVVNNNN
811 	 */
812 
813 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
814 	vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags);
815 	vma_next = alloc_and_link_vma(&mm, 0x5000, 0x9000, 5, flags);
816 	vma->vm_ops = &vm_ops;
817 
818 	vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags);
819 	vmg.prev = vma_prev;
820 	vmg.vma = vma;
821 
822 	ASSERT_EQ(merge_existing(&vmg), NULL);
823 	ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
824 
825 	ASSERT_EQ(cleanup_mm(&mm, &vmi), 3);
826 
827 	/*
828 	 * The second variant is where next has a close hook. In this instance,
829 	 * we reduce the operation to a merge between prev and vma.
830 	 *
831 	 *    <>
832 	 * 0123456789
833 	 * PPPVVNNNN
834 	 *            ->
835 	 * 0123456789
836 	 * PPPPPNNNN
837 	 */
838 
839 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
840 	vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags);
841 	vma_next = alloc_and_link_vma(&mm, 0x5000, 0x9000, 5, flags);
842 	vma_next->vm_ops = &vm_ops;
843 
844 	vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags);
845 	vmg.prev = vma_prev;
846 	vmg.vma = vma;
847 
848 	ASSERT_EQ(merge_existing(&vmg), vma_prev);
849 	ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
850 	ASSERT_EQ(vma_prev->vm_start, 0);
851 	ASSERT_EQ(vma_prev->vm_end, 0x5000);
852 	ASSERT_EQ(vma_prev->vm_pgoff, 0);
853 
854 	ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
855 
856 	return true;
857 }
858 
test_vma_merge_new_with_close(void)859 static bool test_vma_merge_new_with_close(void)
860 {
861 	unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
862 	struct mm_struct mm = {};
863 	VMA_ITERATOR(vmi, &mm, 0);
864 	struct vma_merge_struct vmg = {
865 		.mm = &mm,
866 		.vmi = &vmi,
867 	};
868 	struct vm_area_struct *vma_prev = alloc_and_link_vma(&mm, 0, 0x2000, 0, flags);
869 	struct vm_area_struct *vma_next = alloc_and_link_vma(&mm, 0x5000, 0x7000, 5, flags);
870 	const struct vm_operations_struct vm_ops = {
871 		.close = dummy_close,
872 	};
873 	struct vm_area_struct *vma;
874 
875 	/*
876 	 * We should allow the partial merge of a proposed new VMA if the
877 	 * surrounding VMAs have vm_ops->close() hooks (but are otherwise
878 	 * compatible), e.g.:
879 	 *
880 	 *        New VMA
881 	 *    A  v-------v  B
882 	 * |-----|       |-----|
883 	 *  close         close
884 	 *
885 	 * Since the rule is to not DELETE a VMA with a close operation, this
886 	 * should be permitted, only rather than expanding A and deleting B, we
887 	 * should simply expand A and leave B intact, e.g.:
888 	 *
889 	 *        New VMA
890 	 *       A          B
891 	 * |------------||-----|
892 	 *  close         close
893 	 */
894 
895 	/* Have prev and next have a vm_ops->close() hook. */
896 	vma_prev->vm_ops = &vm_ops;
897 	vma_next->vm_ops = &vm_ops;
898 
899 	vmg_set_range(&vmg, 0x2000, 0x5000, 2, flags);
900 	vma = merge_new(&vmg);
901 	ASSERT_NE(vma, NULL);
902 	ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
903 	ASSERT_EQ(vma->vm_start, 0);
904 	ASSERT_EQ(vma->vm_end, 0x5000);
905 	ASSERT_EQ(vma->vm_pgoff, 0);
906 	ASSERT_EQ(vma->vm_ops, &vm_ops);
907 	ASSERT_TRUE(vma_write_started(vma));
908 	ASSERT_EQ(mm.map_count, 2);
909 
910 	cleanup_mm(&mm, &vmi);
911 	return true;
912 }
913 
test_merge_existing(void)914 static bool test_merge_existing(void)
915 {
916 	unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
917 	struct mm_struct mm = {};
918 	VMA_ITERATOR(vmi, &mm, 0);
919 	struct vm_area_struct *vma, *vma_prev, *vma_next;
920 	struct vma_merge_struct vmg = {
921 		.mm = &mm,
922 		.vmi = &vmi,
923 	};
924 	const struct vm_operations_struct vm_ops = {
925 		.close = dummy_close,
926 	};
927 
928 	/*
929 	 * Merge right case - partial span.
930 	 *
931 	 *    <->
932 	 * 0123456789
933 	 *   VVVVNNN
934 	 *            ->
935 	 * 0123456789
936 	 *   VNNNNNN
937 	 */
938 	vma = alloc_and_link_vma(&mm, 0x2000, 0x6000, 2, flags);
939 	vma->vm_ops = &vm_ops; /* This should have no impact. */
940 	vma_next = alloc_and_link_vma(&mm, 0x6000, 0x9000, 6, flags);
941 	vma_next->vm_ops = &vm_ops; /* This should have no impact. */
942 	vmg_set_range(&vmg, 0x3000, 0x6000, 3, flags);
943 	vmg.vma = vma;
944 	vmg.prev = vma;
945 	vma->anon_vma = &dummy_anon_vma;
946 	ASSERT_EQ(merge_existing(&vmg), vma_next);
947 	ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
948 	ASSERT_EQ(vma_next->vm_start, 0x3000);
949 	ASSERT_EQ(vma_next->vm_end, 0x9000);
950 	ASSERT_EQ(vma_next->vm_pgoff, 3);
951 	ASSERT_EQ(vma_next->anon_vma, &dummy_anon_vma);
952 	ASSERT_EQ(vma->vm_start, 0x2000);
953 	ASSERT_EQ(vma->vm_end, 0x3000);
954 	ASSERT_EQ(vma->vm_pgoff, 2);
955 	ASSERT_TRUE(vma_write_started(vma));
956 	ASSERT_TRUE(vma_write_started(vma_next));
957 	ASSERT_EQ(mm.map_count, 2);
958 
959 	/* Clear down and reset. */
960 	ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
961 
962 	/*
963 	 * Merge right case - full span.
964 	 *
965 	 *   <-->
966 	 * 0123456789
967 	 *   VVVVNNN
968 	 *            ->
969 	 * 0123456789
970 	 *   NNNNNNN
971 	 */
972 	vma = alloc_and_link_vma(&mm, 0x2000, 0x6000, 2, flags);
973 	vma_next = alloc_and_link_vma(&mm, 0x6000, 0x9000, 6, flags);
974 	vma_next->vm_ops = &vm_ops; /* This should have no impact. */
975 	vmg_set_range(&vmg, 0x2000, 0x6000, 2, flags);
976 	vmg.vma = vma;
977 	vma->anon_vma = &dummy_anon_vma;
978 	ASSERT_EQ(merge_existing(&vmg), vma_next);
979 	ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
980 	ASSERT_EQ(vma_next->vm_start, 0x2000);
981 	ASSERT_EQ(vma_next->vm_end, 0x9000);
982 	ASSERT_EQ(vma_next->vm_pgoff, 2);
983 	ASSERT_EQ(vma_next->anon_vma, &dummy_anon_vma);
984 	ASSERT_TRUE(vma_write_started(vma_next));
985 	ASSERT_EQ(mm.map_count, 1);
986 
987 	/* Clear down and reset. We should have deleted vma. */
988 	ASSERT_EQ(cleanup_mm(&mm, &vmi), 1);
989 
990 	/*
991 	 * Merge left case - partial span.
992 	 *
993 	 *    <->
994 	 * 0123456789
995 	 * PPPVVVV
996 	 *            ->
997 	 * 0123456789
998 	 * PPPPPPV
999 	 */
1000 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
1001 	vma_prev->vm_ops = &vm_ops; /* This should have no impact. */
1002 	vma = alloc_and_link_vma(&mm, 0x3000, 0x7000, 3, flags);
1003 	vma->vm_ops = &vm_ops; /* This should have no impact. */
1004 	vmg_set_range(&vmg, 0x3000, 0x6000, 3, flags);
1005 	vmg.prev = vma_prev;
1006 	vmg.vma = vma;
1007 	vma->anon_vma = &dummy_anon_vma;
1008 
1009 	ASSERT_EQ(merge_existing(&vmg), vma_prev);
1010 	ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
1011 	ASSERT_EQ(vma_prev->vm_start, 0);
1012 	ASSERT_EQ(vma_prev->vm_end, 0x6000);
1013 	ASSERT_EQ(vma_prev->vm_pgoff, 0);
1014 	ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
1015 	ASSERT_EQ(vma->vm_start, 0x6000);
1016 	ASSERT_EQ(vma->vm_end, 0x7000);
1017 	ASSERT_EQ(vma->vm_pgoff, 6);
1018 	ASSERT_TRUE(vma_write_started(vma_prev));
1019 	ASSERT_TRUE(vma_write_started(vma));
1020 	ASSERT_EQ(mm.map_count, 2);
1021 
1022 	/* Clear down and reset. */
1023 	ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
1024 
1025 	/*
1026 	 * Merge left case - full span.
1027 	 *
1028 	 *    <-->
1029 	 * 0123456789
1030 	 * PPPVVVV
1031 	 *            ->
1032 	 * 0123456789
1033 	 * PPPPPPP
1034 	 */
1035 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
1036 	vma_prev->vm_ops = &vm_ops; /* This should have no impact. */
1037 	vma = alloc_and_link_vma(&mm, 0x3000, 0x7000, 3, flags);
1038 	vmg_set_range(&vmg, 0x3000, 0x7000, 3, flags);
1039 	vmg.prev = vma_prev;
1040 	vmg.vma = vma;
1041 	vma->anon_vma = &dummy_anon_vma;
1042 	ASSERT_EQ(merge_existing(&vmg), vma_prev);
1043 	ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
1044 	ASSERT_EQ(vma_prev->vm_start, 0);
1045 	ASSERT_EQ(vma_prev->vm_end, 0x7000);
1046 	ASSERT_EQ(vma_prev->vm_pgoff, 0);
1047 	ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
1048 	ASSERT_TRUE(vma_write_started(vma_prev));
1049 	ASSERT_EQ(mm.map_count, 1);
1050 
1051 	/* Clear down and reset. We should have deleted vma. */
1052 	ASSERT_EQ(cleanup_mm(&mm, &vmi), 1);
1053 
1054 	/*
1055 	 * Merge both case.
1056 	 *
1057 	 *    <-->
1058 	 * 0123456789
1059 	 * PPPVVVVNNN
1060 	 *             ->
1061 	 * 0123456789
1062 	 * PPPPPPPPPP
1063 	 */
1064 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
1065 	vma_prev->vm_ops = &vm_ops; /* This should have no impact. */
1066 	vma = alloc_and_link_vma(&mm, 0x3000, 0x7000, 3, flags);
1067 	vma_next = alloc_and_link_vma(&mm, 0x7000, 0x9000, 7, flags);
1068 	vmg_set_range(&vmg, 0x3000, 0x7000, 3, flags);
1069 	vmg.prev = vma_prev;
1070 	vmg.vma = vma;
1071 	vma->anon_vma = &dummy_anon_vma;
1072 	ASSERT_EQ(merge_existing(&vmg), vma_prev);
1073 	ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
1074 	ASSERT_EQ(vma_prev->vm_start, 0);
1075 	ASSERT_EQ(vma_prev->vm_end, 0x9000);
1076 	ASSERT_EQ(vma_prev->vm_pgoff, 0);
1077 	ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
1078 	ASSERT_TRUE(vma_write_started(vma_prev));
1079 	ASSERT_EQ(mm.map_count, 1);
1080 
1081 	/* Clear down and reset. We should have deleted prev and next. */
1082 	ASSERT_EQ(cleanup_mm(&mm, &vmi), 1);
1083 
1084 	/*
1085 	 * Non-merge ranges. the modified VMA merge operation assumes that the
1086 	 * caller always specifies ranges within the input VMA so we need only
1087 	 * examine these cases.
1088 	 *
1089 	 *     -
1090 	 *      -
1091 	 *       -
1092 	 *     <->
1093 	 *     <>
1094 	 *      <>
1095 	 * 0123456789a
1096 	 * PPPVVVVVNNN
1097 	 */
1098 
1099 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
1100 	vma = alloc_and_link_vma(&mm, 0x3000, 0x8000, 3, flags);
1101 	vma_next = alloc_and_link_vma(&mm, 0x8000, 0xa000, 8, flags);
1102 
1103 	vmg_set_range(&vmg, 0x4000, 0x5000, 4, flags);
1104 	vmg.prev = vma;
1105 	vmg.vma = vma;
1106 	ASSERT_EQ(merge_existing(&vmg), NULL);
1107 	ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
1108 
1109 	vmg_set_range(&vmg, 0x5000, 0x6000, 5, flags);
1110 	vmg.prev = vma;
1111 	vmg.vma = vma;
1112 	ASSERT_EQ(merge_existing(&vmg), NULL);
1113 	ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
1114 
1115 	vmg_set_range(&vmg, 0x6000, 0x7000, 6, flags);
1116 	vmg.prev = vma;
1117 	vmg.vma = vma;
1118 	ASSERT_EQ(merge_existing(&vmg), NULL);
1119 	ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
1120 
1121 	vmg_set_range(&vmg, 0x4000, 0x7000, 4, flags);
1122 	vmg.prev = vma;
1123 	vmg.vma = vma;
1124 	ASSERT_EQ(merge_existing(&vmg), NULL);
1125 	ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
1126 
1127 	vmg_set_range(&vmg, 0x4000, 0x6000, 4, flags);
1128 	vmg.prev = vma;
1129 	vmg.vma = vma;
1130 	ASSERT_EQ(merge_existing(&vmg), NULL);
1131 	ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
1132 
1133 	vmg_set_range(&vmg, 0x5000, 0x6000, 5, flags);
1134 	vmg.prev = vma;
1135 	vmg.vma = vma;
1136 	ASSERT_EQ(merge_existing(&vmg), NULL);
1137 	ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
1138 
1139 	ASSERT_EQ(cleanup_mm(&mm, &vmi), 3);
1140 
1141 	return true;
1142 }
1143 
test_anon_vma_non_mergeable(void)1144 static bool test_anon_vma_non_mergeable(void)
1145 {
1146 	unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
1147 	struct mm_struct mm = {};
1148 	VMA_ITERATOR(vmi, &mm, 0);
1149 	struct vm_area_struct *vma, *vma_prev, *vma_next;
1150 	struct vma_merge_struct vmg = {
1151 		.mm = &mm,
1152 		.vmi = &vmi,
1153 	};
1154 	struct anon_vma_chain dummy_anon_vma_chain1 = {
1155 		.anon_vma = &dummy_anon_vma,
1156 	};
1157 	struct anon_vma_chain dummy_anon_vma_chain2 = {
1158 		.anon_vma = &dummy_anon_vma,
1159 	};
1160 
1161 	/*
1162 	 * In the case of modified VMA merge, merging both left and right VMAs
1163 	 * but where prev and next have incompatible anon_vma objects, we revert
1164 	 * to a merge of prev and VMA:
1165 	 *
1166 	 *    <-->
1167 	 * 0123456789
1168 	 * PPPVVVVNNN
1169 	 *            ->
1170 	 * 0123456789
1171 	 * PPPPPPPNNN
1172 	 */
1173 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
1174 	vma = alloc_and_link_vma(&mm, 0x3000, 0x7000, 3, flags);
1175 	vma_next = alloc_and_link_vma(&mm, 0x7000, 0x9000, 7, flags);
1176 
1177 	/*
1178 	 * Give both prev and next single anon_vma_chain fields, so they will
1179 	 * merge with the NULL vmg->anon_vma.
1180 	 *
1181 	 * However, when prev is compared to next, the merge should fail.
1182 	 */
1183 
1184 	INIT_LIST_HEAD(&vma_prev->anon_vma_chain);
1185 	list_add(&dummy_anon_vma_chain1.same_vma, &vma_prev->anon_vma_chain);
1186 	ASSERT_TRUE(list_is_singular(&vma_prev->anon_vma_chain));
1187 	vma_prev->anon_vma = &dummy_anon_vma;
1188 	ASSERT_TRUE(is_mergeable_anon_vma(NULL, vma_prev->anon_vma, vma_prev));
1189 
1190 	INIT_LIST_HEAD(&vma_next->anon_vma_chain);
1191 	list_add(&dummy_anon_vma_chain2.same_vma, &vma_next->anon_vma_chain);
1192 	ASSERT_TRUE(list_is_singular(&vma_next->anon_vma_chain));
1193 	vma_next->anon_vma = (struct anon_vma *)2;
1194 	ASSERT_TRUE(is_mergeable_anon_vma(NULL, vma_next->anon_vma, vma_next));
1195 
1196 	ASSERT_FALSE(is_mergeable_anon_vma(vma_prev->anon_vma, vma_next->anon_vma, NULL));
1197 
1198 	vmg_set_range(&vmg, 0x3000, 0x7000, 3, flags);
1199 	vmg.prev = vma_prev;
1200 	vmg.vma = vma;
1201 
1202 	ASSERT_EQ(merge_existing(&vmg), vma_prev);
1203 	ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
1204 	ASSERT_EQ(vma_prev->vm_start, 0);
1205 	ASSERT_EQ(vma_prev->vm_end, 0x7000);
1206 	ASSERT_EQ(vma_prev->vm_pgoff, 0);
1207 	ASSERT_TRUE(vma_write_started(vma_prev));
1208 	ASSERT_FALSE(vma_write_started(vma_next));
1209 
1210 	/* Clear down and reset. */
1211 	ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
1212 
1213 	/*
1214 	 * Now consider the new VMA case. This is equivalent, only adding a new
1215 	 * VMA in a gap between prev and next.
1216 	 *
1217 	 *    <-->
1218 	 * 0123456789
1219 	 * PPP****NNN
1220 	 *            ->
1221 	 * 0123456789
1222 	 * PPPPPPPNNN
1223 	 */
1224 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
1225 	vma_next = alloc_and_link_vma(&mm, 0x7000, 0x9000, 7, flags);
1226 
1227 	INIT_LIST_HEAD(&vma_prev->anon_vma_chain);
1228 	list_add(&dummy_anon_vma_chain1.same_vma, &vma_prev->anon_vma_chain);
1229 	vma_prev->anon_vma = (struct anon_vma *)1;
1230 
1231 	INIT_LIST_HEAD(&vma_next->anon_vma_chain);
1232 	list_add(&dummy_anon_vma_chain2.same_vma, &vma_next->anon_vma_chain);
1233 	vma_next->anon_vma = (struct anon_vma *)2;
1234 
1235 	vmg_set_range(&vmg, 0x3000, 0x7000, 3, flags);
1236 	vmg.prev = vma_prev;
1237 
1238 	ASSERT_EQ(merge_new(&vmg), vma_prev);
1239 	ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
1240 	ASSERT_EQ(vma_prev->vm_start, 0);
1241 	ASSERT_EQ(vma_prev->vm_end, 0x7000);
1242 	ASSERT_EQ(vma_prev->vm_pgoff, 0);
1243 	ASSERT_TRUE(vma_write_started(vma_prev));
1244 	ASSERT_FALSE(vma_write_started(vma_next));
1245 
1246 	/* Final cleanup. */
1247 	ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
1248 
1249 	return true;
1250 }
1251 
test_dup_anon_vma(void)1252 static bool test_dup_anon_vma(void)
1253 {
1254 	unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
1255 	struct mm_struct mm = {};
1256 	VMA_ITERATOR(vmi, &mm, 0);
1257 	struct vma_merge_struct vmg = {
1258 		.mm = &mm,
1259 		.vmi = &vmi,
1260 	};
1261 	struct anon_vma_chain dummy_anon_vma_chain = {
1262 		.anon_vma = &dummy_anon_vma,
1263 	};
1264 	struct vm_area_struct *vma_prev, *vma_next, *vma;
1265 
1266 	reset_dummy_anon_vma();
1267 
1268 	/*
1269 	 * Expanding a VMA delete the next one duplicates next's anon_vma and
1270 	 * assigns it to the expanded VMA.
1271 	 *
1272 	 * This covers new VMA merging, as these operations amount to a VMA
1273 	 * expand.
1274 	 */
1275 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
1276 	vma_next = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags);
1277 	vma_next->anon_vma = &dummy_anon_vma;
1278 
1279 	vmg_set_range(&vmg, 0, 0x5000, 0, flags);
1280 	vmg.vma = vma_prev;
1281 	vmg.next = vma_next;
1282 
1283 	ASSERT_EQ(expand_existing(&vmg), 0);
1284 
1285 	/* Will have been cloned. */
1286 	ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
1287 	ASSERT_TRUE(vma_prev->anon_vma->was_cloned);
1288 
1289 	/* Cleanup ready for next run. */
1290 	cleanup_mm(&mm, &vmi);
1291 
1292 	/*
1293 	 * next has anon_vma, we assign to prev.
1294 	 *
1295 	 *         |<----->|
1296 	 * |-------*********-------|
1297 	 *   prev     vma     next
1298 	 *  extend   delete  delete
1299 	 */
1300 
1301 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
1302 	vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags);
1303 	vma_next = alloc_and_link_vma(&mm, 0x5000, 0x8000, 5, flags);
1304 
1305 	/* Initialise avc so mergeability check passes. */
1306 	INIT_LIST_HEAD(&vma_next->anon_vma_chain);
1307 	list_add(&dummy_anon_vma_chain.same_vma, &vma_next->anon_vma_chain);
1308 
1309 	vma_next->anon_vma = &dummy_anon_vma;
1310 	vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags);
1311 	vmg.prev = vma_prev;
1312 	vmg.vma = vma;
1313 
1314 	ASSERT_EQ(merge_existing(&vmg), vma_prev);
1315 	ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
1316 
1317 	ASSERT_EQ(vma_prev->vm_start, 0);
1318 	ASSERT_EQ(vma_prev->vm_end, 0x8000);
1319 
1320 	ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
1321 	ASSERT_TRUE(vma_prev->anon_vma->was_cloned);
1322 
1323 	cleanup_mm(&mm, &vmi);
1324 
1325 	/*
1326 	 * vma has anon_vma, we assign to prev.
1327 	 *
1328 	 *         |<----->|
1329 	 * |-------*********-------|
1330 	 *   prev     vma     next
1331 	 *  extend   delete  delete
1332 	 */
1333 
1334 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
1335 	vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags);
1336 	vma_next = alloc_and_link_vma(&mm, 0x5000, 0x8000, 5, flags);
1337 
1338 	vma->anon_vma = &dummy_anon_vma;
1339 	vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags);
1340 	vmg.prev = vma_prev;
1341 	vmg.vma = vma;
1342 
1343 	ASSERT_EQ(merge_existing(&vmg), vma_prev);
1344 	ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
1345 
1346 	ASSERT_EQ(vma_prev->vm_start, 0);
1347 	ASSERT_EQ(vma_prev->vm_end, 0x8000);
1348 
1349 	ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
1350 	ASSERT_TRUE(vma_prev->anon_vma->was_cloned);
1351 
1352 	cleanup_mm(&mm, &vmi);
1353 
1354 	/*
1355 	 * vma has anon_vma, we assign to prev.
1356 	 *
1357 	 *         |<----->|
1358 	 * |-------*************
1359 	 *   prev       vma
1360 	 *  extend shrink/delete
1361 	 */
1362 
1363 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
1364 	vma = alloc_and_link_vma(&mm, 0x3000, 0x8000, 3, flags);
1365 
1366 	vma->anon_vma = &dummy_anon_vma;
1367 	vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags);
1368 	vmg.prev = vma_prev;
1369 	vmg.vma = vma;
1370 
1371 	ASSERT_EQ(merge_existing(&vmg), vma_prev);
1372 	ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
1373 
1374 	ASSERT_EQ(vma_prev->vm_start, 0);
1375 	ASSERT_EQ(vma_prev->vm_end, 0x5000);
1376 
1377 	ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
1378 	ASSERT_TRUE(vma_prev->anon_vma->was_cloned);
1379 
1380 	cleanup_mm(&mm, &vmi);
1381 
1382 	/*
1383 	 * vma has anon_vma, we assign to next.
1384 	 *
1385 	 *     |<----->|
1386 	 * *************-------|
1387 	 *      vma       next
1388 	 * shrink/delete extend
1389 	 */
1390 
1391 	vma = alloc_and_link_vma(&mm, 0, 0x5000, 0, flags);
1392 	vma_next = alloc_and_link_vma(&mm, 0x5000, 0x8000, 5, flags);
1393 
1394 	vma->anon_vma = &dummy_anon_vma;
1395 	vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags);
1396 	vmg.prev = vma;
1397 	vmg.vma = vma;
1398 
1399 	ASSERT_EQ(merge_existing(&vmg), vma_next);
1400 	ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
1401 
1402 	ASSERT_EQ(vma_next->vm_start, 0x3000);
1403 	ASSERT_EQ(vma_next->vm_end, 0x8000);
1404 
1405 	ASSERT_EQ(vma_next->anon_vma, &dummy_anon_vma);
1406 	ASSERT_TRUE(vma_next->anon_vma->was_cloned);
1407 
1408 	cleanup_mm(&mm, &vmi);
1409 	return true;
1410 }
1411 
test_vmi_prealloc_fail(void)1412 static bool test_vmi_prealloc_fail(void)
1413 {
1414 	unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
1415 	struct mm_struct mm = {};
1416 	VMA_ITERATOR(vmi, &mm, 0);
1417 	struct vma_merge_struct vmg = {
1418 		.mm = &mm,
1419 		.vmi = &vmi,
1420 	};
1421 	struct vm_area_struct *vma_prev, *vma;
1422 
1423 	/*
1424 	 * We are merging vma into prev, with vma possessing an anon_vma, which
1425 	 * will be duplicated. We cause the vmi preallocation to fail and assert
1426 	 * the duplicated anon_vma is unlinked.
1427 	 */
1428 
1429 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
1430 	vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags);
1431 	vma->anon_vma = &dummy_anon_vma;
1432 
1433 	vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags);
1434 	vmg.prev = vma_prev;
1435 	vmg.vma = vma;
1436 
1437 	fail_prealloc = true;
1438 
1439 	/* This will cause the merge to fail. */
1440 	ASSERT_EQ(merge_existing(&vmg), NULL);
1441 	ASSERT_EQ(vmg.state, VMA_MERGE_ERROR_NOMEM);
1442 	/* We will already have assigned the anon_vma. */
1443 	ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
1444 	/* And it was both cloned and unlinked. */
1445 	ASSERT_TRUE(dummy_anon_vma.was_cloned);
1446 	ASSERT_TRUE(dummy_anon_vma.was_unlinked);
1447 
1448 	cleanup_mm(&mm, &vmi); /* Resets fail_prealloc too. */
1449 
1450 	/*
1451 	 * We repeat the same operation for expanding a VMA, which is what new
1452 	 * VMA merging ultimately uses too. This asserts that unlinking is
1453 	 * performed in this case too.
1454 	 */
1455 
1456 	vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
1457 	vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags);
1458 	vma->anon_vma = &dummy_anon_vma;
1459 
1460 	vmg_set_range(&vmg, 0, 0x5000, 3, flags);
1461 	vmg.vma = vma_prev;
1462 	vmg.next = vma;
1463 
1464 	fail_prealloc = true;
1465 	ASSERT_EQ(expand_existing(&vmg), -ENOMEM);
1466 	ASSERT_EQ(vmg.state, VMA_MERGE_ERROR_NOMEM);
1467 
1468 	ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
1469 	ASSERT_TRUE(dummy_anon_vma.was_cloned);
1470 	ASSERT_TRUE(dummy_anon_vma.was_unlinked);
1471 
1472 	cleanup_mm(&mm, &vmi);
1473 	return true;
1474 }
1475 
test_merge_extend(void)1476 static bool test_merge_extend(void)
1477 {
1478 	unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
1479 	struct mm_struct mm = {};
1480 	VMA_ITERATOR(vmi, &mm, 0x1000);
1481 	struct vm_area_struct *vma;
1482 
1483 	vma = alloc_and_link_vma(&mm, 0, 0x1000, 0, flags);
1484 	alloc_and_link_vma(&mm, 0x3000, 0x4000, 3, flags);
1485 
1486 	/*
1487 	 * Extend a VMA into the gap between itself and the following VMA.
1488 	 * This should result in a merge.
1489 	 *
1490 	 * <->
1491 	 * *  *
1492 	 *
1493 	 */
1494 
1495 	ASSERT_EQ(vma_merge_extend(&vmi, vma, 0x2000), vma);
1496 	ASSERT_EQ(vma->vm_start, 0);
1497 	ASSERT_EQ(vma->vm_end, 0x4000);
1498 	ASSERT_EQ(vma->vm_pgoff, 0);
1499 	ASSERT_TRUE(vma_write_started(vma));
1500 	ASSERT_EQ(mm.map_count, 1);
1501 
1502 	cleanup_mm(&mm, &vmi);
1503 	return true;
1504 }
1505 
test_copy_vma(void)1506 static bool test_copy_vma(void)
1507 {
1508 	unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
1509 	struct mm_struct mm = {};
1510 	bool need_locks = false;
1511 	VMA_ITERATOR(vmi, &mm, 0);
1512 	struct vm_area_struct *vma, *vma_new, *vma_next;
1513 
1514 	/* Move backwards and do not merge. */
1515 
1516 	vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags);
1517 	vma_new = copy_vma(&vma, 0, 0x2000, 0, &need_locks);
1518 
1519 	ASSERT_NE(vma_new, vma);
1520 	ASSERT_EQ(vma_new->vm_start, 0);
1521 	ASSERT_EQ(vma_new->vm_end, 0x2000);
1522 	ASSERT_EQ(vma_new->vm_pgoff, 0);
1523 
1524 	cleanup_mm(&mm, &vmi);
1525 
1526 	/* Move a VMA into position next to another and merge the two. */
1527 
1528 	vma = alloc_and_link_vma(&mm, 0, 0x2000, 0, flags);
1529 	vma_next = alloc_and_link_vma(&mm, 0x6000, 0x8000, 6, flags);
1530 	vma_new = copy_vma(&vma, 0x4000, 0x2000, 4, &need_locks);
1531 
1532 	ASSERT_EQ(vma_new, vma_next);
1533 
1534 	cleanup_mm(&mm, &vmi);
1535 	return true;
1536 }
1537 
test_expand_only_mode(void)1538 static bool test_expand_only_mode(void)
1539 {
1540 	unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
1541 	struct mm_struct mm = {};
1542 	VMA_ITERATOR(vmi, &mm, 0);
1543 	struct vm_area_struct *vma_prev, *vma;
1544 	VMG_STATE(vmg, &mm, &vmi, 0x5000, 0x9000, flags, 5);
1545 
1546 	/*
1547 	 * Place a VMA prior to the one we're expanding so we assert that we do
1548 	 * not erroneously try to traverse to the previous VMA even though we
1549 	 * have, through the use of VMG_FLAG_JUST_EXPAND, indicated we do not
1550 	 * need to do so.
1551 	 */
1552 	alloc_and_link_vma(&mm, 0, 0x2000, 0, flags);
1553 
1554 	/*
1555 	 * We will be positioned at the prev VMA, but looking to expand to
1556 	 * 0x9000.
1557 	 */
1558 	vma_iter_set(&vmi, 0x3000);
1559 	vma_prev = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags);
1560 	vmg.prev = vma_prev;
1561 	vmg.merge_flags = VMG_FLAG_JUST_EXPAND;
1562 
1563 	vma = vma_merge_new_range(&vmg);
1564 	ASSERT_NE(vma, NULL);
1565 	ASSERT_EQ(vma, vma_prev);
1566 	ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
1567 	ASSERT_EQ(vma->vm_start, 0x3000);
1568 	ASSERT_EQ(vma->vm_end, 0x9000);
1569 	ASSERT_EQ(vma->vm_pgoff, 3);
1570 	ASSERT_TRUE(vma_write_started(vma));
1571 	ASSERT_EQ(vma_iter_addr(&vmi), 0x3000);
1572 
1573 	cleanup_mm(&mm, &vmi);
1574 	return true;
1575 }
1576 
test_mmap_region_basic(void)1577 static bool test_mmap_region_basic(void)
1578 {
1579 	struct mm_struct mm = {};
1580 	unsigned long addr;
1581 	struct vm_area_struct *vma;
1582 	VMA_ITERATOR(vmi, &mm, 0);
1583 
1584 	current->mm = &mm;
1585 
1586 	/* Map at 0x300000, length 0x3000. */
1587 	addr = __mmap_region(NULL, 0x300000, 0x3000,
1588 			     VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE,
1589 			     0x300, NULL);
1590 	ASSERT_EQ(addr, 0x300000);
1591 
1592 	/* Map at 0x250000, length 0x3000. */
1593 	addr = __mmap_region(NULL, 0x250000, 0x3000,
1594 			     VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE,
1595 			     0x250, NULL);
1596 	ASSERT_EQ(addr, 0x250000);
1597 
1598 	/* Map at 0x303000, merging to 0x300000 of length 0x6000. */
1599 	addr = __mmap_region(NULL, 0x303000, 0x3000,
1600 			     VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE,
1601 			     0x303, NULL);
1602 	ASSERT_EQ(addr, 0x303000);
1603 
1604 	/* Map at 0x24d000, merging to 0x250000 of length 0x6000. */
1605 	addr = __mmap_region(NULL, 0x24d000, 0x3000,
1606 			     VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE,
1607 			     0x24d, NULL);
1608 	ASSERT_EQ(addr, 0x24d000);
1609 
1610 	ASSERT_EQ(mm.map_count, 2);
1611 
1612 	for_each_vma(vmi, vma) {
1613 		if (vma->vm_start == 0x300000) {
1614 			ASSERT_EQ(vma->vm_end, 0x306000);
1615 			ASSERT_EQ(vma->vm_pgoff, 0x300);
1616 		} else if (vma->vm_start == 0x24d000) {
1617 			ASSERT_EQ(vma->vm_end, 0x253000);
1618 			ASSERT_EQ(vma->vm_pgoff, 0x24d);
1619 		} else {
1620 			ASSERT_FALSE(true);
1621 		}
1622 	}
1623 
1624 	cleanup_mm(&mm, &vmi);
1625 	return true;
1626 }
1627 
main(void)1628 int main(void)
1629 {
1630 	int num_tests = 0, num_fail = 0;
1631 
1632 	maple_tree_init();
1633 
1634 #define TEST(name)							\
1635 	do {								\
1636 		num_tests++;						\
1637 		if (!test_##name()) {					\
1638 			num_fail++;					\
1639 			fprintf(stderr, "Test " #name " FAILED\n");	\
1640 		}							\
1641 	} while (0)
1642 
1643 	/* Very simple tests to kick the tyres. */
1644 	TEST(simple_merge);
1645 	TEST(simple_modify);
1646 	TEST(simple_expand);
1647 	TEST(simple_shrink);
1648 
1649 	TEST(merge_new);
1650 	TEST(vma_merge_special_flags);
1651 	TEST(vma_merge_with_close);
1652 	TEST(vma_merge_new_with_close);
1653 	TEST(merge_existing);
1654 	TEST(anon_vma_non_mergeable);
1655 	TEST(dup_anon_vma);
1656 	TEST(vmi_prealloc_fail);
1657 	TEST(merge_extend);
1658 	TEST(copy_vma);
1659 	TEST(expand_only_mode);
1660 
1661 	TEST(mmap_region_basic);
1662 
1663 #undef TEST
1664 
1665 	printf("%d tests run, %d passed, %d failed.\n",
1666 	       num_tests, num_tests - num_fail, num_fail);
1667 
1668 	return num_fail == 0 ? EXIT_SUCCESS : EXIT_FAILURE;
1669 }
1670