Lines Matching refs:mr

90 void io_free_region(struct io_ring_ctx *ctx, struct io_mapped_region *mr)  in io_free_region()  argument
92 if (mr->pages) { in io_free_region()
93 long nr_refs = mr->nr_pages; in io_free_region()
95 if (mr->flags & IO_REGION_F_SINGLE_REF) in io_free_region()
98 if (mr->flags & IO_REGION_F_USER_PROVIDED) in io_free_region()
99 unpin_user_pages(mr->pages, nr_refs); in io_free_region()
101 release_pages(mr->pages, nr_refs); in io_free_region()
103 kvfree(mr->pages); in io_free_region()
105 if ((mr->flags & IO_REGION_F_VMAP) && mr->ptr) in io_free_region()
106 vunmap(mr->ptr); in io_free_region()
107 if (mr->nr_pages && ctx->user) in io_free_region()
108 __io_unaccount_mem(ctx->user, mr->nr_pages); in io_free_region()
110 memset(mr, 0, sizeof(*mr)); in io_free_region()
113 static int io_region_init_ptr(struct io_mapped_region *mr) in io_region_init_ptr() argument
118 if (io_check_coalesce_buffer(mr->pages, mr->nr_pages, &ifd)) { in io_region_init_ptr()
120 mr->ptr = page_address(mr->pages[0]); in io_region_init_ptr()
124 ptr = vmap(mr->pages, mr->nr_pages, VM_MAP, PAGE_KERNEL); in io_region_init_ptr()
128 mr->ptr = ptr; in io_region_init_ptr()
129 mr->flags |= IO_REGION_F_VMAP; in io_region_init_ptr()
134 struct io_mapped_region *mr, in io_region_pin_pages() argument
137 unsigned long size = mr->nr_pages << PAGE_SHIFT; in io_region_pin_pages()
144 if (WARN_ON_ONCE(nr_pages != mr->nr_pages)) in io_region_pin_pages()
147 mr->pages = pages; in io_region_pin_pages()
148 mr->flags |= IO_REGION_F_USER_PROVIDED; in io_region_pin_pages()
153 struct io_mapped_region *mr, in io_region_allocate_pages() argument
158 unsigned long size = mr->nr_pages << PAGE_SHIFT; in io_region_allocate_pages()
163 pages = kvmalloc_array(mr->nr_pages, sizeof(*pages), gfp); in io_region_allocate_pages()
167 p = io_mem_alloc_compound(pages, mr->nr_pages, size, gfp); in io_region_allocate_pages()
169 mr->flags |= IO_REGION_F_SINGLE_REF; in io_region_allocate_pages()
174 mr->nr_pages, pages); in io_region_allocate_pages()
175 if (nr_allocated != mr->nr_pages) { in io_region_allocate_pages()
183 mr->pages = pages; in io_region_allocate_pages()
187 int io_create_region(struct io_ring_ctx *ctx, struct io_mapped_region *mr, in io_create_region() argument
194 if (WARN_ON_ONCE(mr->pages || mr->ptr || mr->nr_pages)) in io_create_region()
218 mr->nr_pages = nr_pages; in io_create_region()
221 ret = io_region_pin_pages(ctx, mr, reg); in io_create_region()
223 ret = io_region_allocate_pages(ctx, mr, reg, mmap_offset); in io_create_region()
227 ret = io_region_init_ptr(mr); in io_create_region()
232 io_free_region(ctx, mr); in io_create_region()
236 int io_create_region_mmap_safe(struct io_ring_ctx *ctx, struct io_mapped_region *mr, in io_create_region_mmap_safe() argument
243 memcpy(&tmp_mr, mr, sizeof(tmp_mr)); in io_create_region_mmap_safe()
253 memcpy(mr, &tmp_mr, sizeof(tmp_mr)); in io_create_region_mmap_safe()
279 struct io_mapped_region *mr) in io_region_validate_mmap() argument
283 if (!io_region_is_set(mr)) in io_region_validate_mmap()
285 if (mr->flags & IO_REGION_F_USER_PROVIDED) in io_region_validate_mmap()
288 return io_region_get_ptr(mr); in io_region_validate_mmap()
306 struct io_mapped_region *mr, in io_region_mmap() argument
310 unsigned long nr_pages = min(mr->nr_pages, max_pages); in io_region_mmap()
313 return vm_insert_pages(vma, vma->vm_start, mr->pages, &nr_pages); in io_region_mmap()