Lines Matching full:mapping

139 file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping)  in file_ra_state_init()  argument
141 ra->ra_pages = inode_to_bdi(mapping->host)->ra_pages; in file_ra_state_init()
148 const struct address_space_operations *aops = rac->mapping->a_ops; in read_pages()
210 struct address_space *mapping = ractl->mapping; in page_cache_ra_unbounded() local
212 gfp_t gfp_mask = readahead_gfp_mask(mapping); in page_cache_ra_unbounded()
214 unsigned int min_nrpages = mapping_min_folio_nrpages(mapping); in page_cache_ra_unbounded()
223 * filesystems already specify __GFP_NOFS in their mapping's in page_cache_ra_unbounded()
228 filemap_invalidate_lock_shared(mapping); in page_cache_ra_unbounded()
229 index = mapping_align_index(mapping, index); in page_cache_ra_unbounded()
252 struct folio *folio = xa_load(&mapping->i_pages, index + i); in page_cache_ra_unbounded()
271 mapping_min_folio_order(mapping)); in page_cache_ra_unbounded()
275 ret = filemap_add_folio(mapping, folio, index + i, gfp_mask); in page_cache_ra_unbounded()
298 filemap_invalidate_unlock_shared(mapping); in page_cache_ra_unbounded()
312 struct inode *inode = ractl->mapping->host; in do_page_cache_ra()
337 struct address_space *mapping = ractl->mapping; in force_page_cache_ra() local
339 struct backing_dev_info *bdi = inode_to_bdi(mapping->host); in force_page_cache_ra()
342 if (unlikely(!mapping->a_ops->read_folio && !mapping->a_ops->readahead)) in force_page_cache_ra()
448 err = filemap_add_folio(ractl->mapping, folio, index, gfp); in ra_alloc_folio()
462 struct address_space *mapping = ractl->mapping; in page_cache_ra_order() local
465 unsigned int min_order = mapping_min_folio_order(mapping); in page_cache_ra_order()
466 pgoff_t limit = (i_size_read(mapping->host) - 1) >> PAGE_SHIFT; in page_cache_ra_order()
470 gfp_t gfp = readahead_gfp_mask(mapping); in page_cache_ra_order()
471 unsigned int min_ra_size = max(4, mapping_min_folio_nrpages(mapping)); in page_cache_ra_order()
477 if (!mapping_large_folio_support(mapping) || ra->size < min_ra_size) in page_cache_ra_order()
482 if (new_order < mapping_max_folio_order(mapping)) in page_cache_ra_order()
485 new_order = min(mapping_max_folio_order(mapping), new_order); in page_cache_ra_order()
491 filemap_invalidate_lock_shared(mapping); in page_cache_ra_order()
497 ractl->_index = mapping_align_index(mapping, index); in page_cache_ra_order()
516 filemap_invalidate_unlock_shared(mapping); in page_cache_ra_order()
539 struct backing_dev_info *bdi = inode_to_bdi(ractl->mapping->host); in ractl_max_pages()
599 miss = page_cache_prev_miss(ractl->mapping, index - 1, max_pages); in page_cache_sync_ra()
674 start = page_cache_next_miss(ractl->mapping, index + 1, max_pages); in page_cache_async_ra()
744 struct address_space *mapping = ractl->mapping; in readahead_expand() local
747 gfp_t gfp_mask = readahead_gfp_mask(mapping); in readahead_expand()
748 unsigned long min_nrpages = mapping_min_folio_nrpages(mapping); in readahead_expand()
749 unsigned int min_order = mapping_min_folio_order(mapping); in readahead_expand()
761 struct folio *folio = xa_load(&mapping->i_pages, index); in readahead_expand()
770 index = mapping_align_index(mapping, index); in readahead_expand()
771 if (filemap_add_folio(mapping, folio, index, gfp_mask) < 0) { in readahead_expand()
790 struct folio *folio = xa_load(&mapping->i_pages, index); in readahead_expand()
799 index = mapping_align_index(mapping, index); in readahead_expand()
800 if (filemap_add_folio(mapping, folio, index, gfp_mask) < 0) { in readahead_expand()