| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
|  | 2 | *	linux/mm/filemap.c | 
|  | 3 | * | 
|  | 4 | * Copyright (C) 1994-1999  Linus Torvalds | 
|  | 5 | */ | 
|  | 6 |  | 
|  | 7 | /* | 
|  | 8 | * This file handles the generic file mmap semantics used by | 
|  | 9 | * most "normal" filesystems (but you don't /have/ to use this: | 
|  | 10 | * the NFS filesystem used to do this differently, for example) | 
|  | 11 | */ | 
| Paul Gortmaker | b95f1b31 | 2011-10-16 02:01:52 -0400 | [diff] [blame] | 12 | #include <linux/export.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13 | #include <linux/compiler.h> | 
| Ross Zwisler | f9fe48b | 2016-01-22 15:10:40 -0800 | [diff] [blame] | 14 | #include <linux/dax.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 15 | #include <linux/fs.h> | 
| Ingo Molnar | 3f07c01 | 2017-02-08 18:51:30 +0100 | [diff] [blame] | 16 | #include <linux/sched/signal.h> | 
| Hiro Yoshioka | c22ce14 | 2006-06-23 02:04:16 -0700 | [diff] [blame] | 17 | #include <linux/uaccess.h> | 
| Randy.Dunlap | c59ede7 | 2006-01-11 12:17:46 -0800 | [diff] [blame] | 18 | #include <linux/capability.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 19 | #include <linux/kernel_stat.h> | 
| Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 20 | #include <linux/gfp.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 21 | #include <linux/mm.h> | 
|  | 22 | #include <linux/swap.h> | 
|  | 23 | #include <linux/mman.h> | 
|  | 24 | #include <linux/pagemap.h> | 
|  | 25 | #include <linux/file.h> | 
|  | 26 | #include <linux/uio.h> | 
|  | 27 | #include <linux/hash.h> | 
|  | 28 | #include <linux/writeback.h> | 
| Linus Torvalds | 5325338 | 2007-10-18 14:47:32 -0700 | [diff] [blame] | 29 | #include <linux/backing-dev.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 30 | #include <linux/pagevec.h> | 
|  | 31 | #include <linux/blkdev.h> | 
|  | 32 | #include <linux/security.h> | 
| Paul Jackson | 44110fe | 2006-03-24 03:16:04 -0800 | [diff] [blame] | 33 | #include <linux/cpuset.h> | 
| Nick Piggin | 2f718ff | 2007-10-16 01:24:59 -0700 | [diff] [blame] | 34 | #include <linux/hardirq.h> /* for BUG_ON(!in_atomic()) only */ | 
| Johannes Weiner | 00501b5 | 2014-08-08 14:19:20 -0700 | [diff] [blame] | 35 | #include <linux/hugetlb.h> | 
| Balbir Singh | 8a9f3cc | 2008-02-07 00:13:53 -0800 | [diff] [blame] | 36 | #include <linux/memcontrol.h> | 
| Dan Magenheimer | c515e1f | 2011-05-26 10:01:43 -0600 | [diff] [blame] | 37 | #include <linux/cleancache.h> | 
| Kirill A. Shutemov | f182036 | 2014-04-07 15:37:19 -0700 | [diff] [blame] | 38 | #include <linux/rmap.h> | 
| Nick Piggin | 0f8053a | 2006-03-22 00:08:33 -0800 | [diff] [blame] | 39 | #include "internal.h" | 
|  | 40 |  | 
| Robert Jarzmik | fe0bfaa | 2013-04-29 15:06:10 -0700 | [diff] [blame] | 41 | #define CREATE_TRACE_POINTS | 
|  | 42 | #include <trace/events/filemap.h> | 
|  | 43 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 44 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 45 | * FIXME: remove all knowledge of the buffer layer from the core VM | 
|  | 46 | */ | 
| Jan Kara | 148f948 | 2009-08-17 19:52:36 +0200 | [diff] [blame] | 47 | #include <linux/buffer_head.h> /* for try_to_free_buffers */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 48 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 49 | #include <asm/mman.h> | 
|  | 50 |  | 
|  | 51 | /* | 
|  | 52 | * Shared mappings implemented 30.11.1994. It's not fully working yet, | 
|  | 53 | * though. | 
|  | 54 | * | 
|  | 55 | * Shared mappings now work. 15.8.1995  Bruno. | 
|  | 56 | * | 
|  | 57 | * finished 'unifying' the page and buffer cache and SMP-threaded the | 
|  | 58 | * page-cache, 21.05.1999, Ingo Molnar <mingo@redhat.com> | 
|  | 59 | * | 
|  | 60 | * SMP-threaded pagemap-LRU 1999, Andrea Arcangeli <andrea@suse.de> | 
|  | 61 | */ | 
|  | 62 |  | 
|  | 63 | /* | 
|  | 64 | * Lock ordering: | 
|  | 65 | * | 
| Davidlohr Bueso | c8c06ef | 2014-12-12 16:54:24 -0800 | [diff] [blame] | 66 | *  ->i_mmap_rwsem		(truncate_pagecache) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 67 | *    ->private_lock		(__free_pte->__set_page_dirty_buffers) | 
| Hugh Dickins | 5d337b9 | 2005-09-03 15:54:41 -0700 | [diff] [blame] | 68 | *      ->swap_lock		(exclusive_swap_page, others) | 
|  | 69 | *        ->mapping->tree_lock | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 70 | * | 
| Jes Sorensen | 1b1dcc1 | 2006-01-09 15:59:24 -0800 | [diff] [blame] | 71 | *  ->i_mutex | 
| Davidlohr Bueso | c8c06ef | 2014-12-12 16:54:24 -0800 | [diff] [blame] | 72 | *    ->i_mmap_rwsem		(truncate->unmap_mapping_range) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 73 | * | 
|  | 74 | *  ->mmap_sem | 
| Davidlohr Bueso | c8c06ef | 2014-12-12 16:54:24 -0800 | [diff] [blame] | 75 | *    ->i_mmap_rwsem | 
| Hugh Dickins | b8072f0 | 2005-10-29 18:16:41 -0700 | [diff] [blame] | 76 | *      ->page_table_lock or pte_lock	(various, mainly in memory.c) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 77 | *        ->mapping->tree_lock	(arch-dependent flush_dcache_mmap_lock) | 
|  | 78 | * | 
|  | 79 | *  ->mmap_sem | 
|  | 80 | *    ->lock_page		(access_process_vm) | 
|  | 81 | * | 
| Al Viro | ccad236 | 2014-02-11 22:36:48 -0500 | [diff] [blame] | 82 | *  ->i_mutex			(generic_perform_write) | 
| Nick Piggin | 82591e6 | 2006-10-19 23:29:10 -0700 | [diff] [blame] | 83 | *    ->mmap_sem		(fault_in_pages_readable->do_page_fault) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 84 | * | 
| Christoph Hellwig | f758eea | 2011-04-21 18:19:44 -0600 | [diff] [blame] | 85 | *  bdi->wb.list_lock | 
| Dave Chinner | a66979a | 2011-03-22 22:23:41 +1100 | [diff] [blame] | 86 | *    sb_lock			(fs/fs-writeback.c) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 87 | *    ->mapping->tree_lock	(__sync_single_inode) | 
|  | 88 | * | 
| Davidlohr Bueso | c8c06ef | 2014-12-12 16:54:24 -0800 | [diff] [blame] | 89 | *  ->i_mmap_rwsem | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 90 | *    ->anon_vma.lock		(vma_adjust) | 
|  | 91 | * | 
|  | 92 | *  ->anon_vma.lock | 
| Hugh Dickins | b8072f0 | 2005-10-29 18:16:41 -0700 | [diff] [blame] | 93 | *    ->page_table_lock or pte_lock	(anon_vma_prepare and various) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 94 | * | 
| Hugh Dickins | b8072f0 | 2005-10-29 18:16:41 -0700 | [diff] [blame] | 95 | *  ->page_table_lock or pte_lock | 
| Hugh Dickins | 5d337b9 | 2005-09-03 15:54:41 -0700 | [diff] [blame] | 96 | *    ->swap_lock		(try_to_unmap_one) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 97 | *    ->private_lock		(try_to_unmap_one) | 
|  | 98 | *    ->tree_lock		(try_to_unmap_one) | 
| Mel Gorman | a52633d | 2016-07-28 15:45:28 -0700 | [diff] [blame] | 99 | *    ->zone_lru_lock(zone)	(follow_page->mark_page_accessed) | 
|  | 100 | *    ->zone_lru_lock(zone)	(check_pte_range->isolate_lru_page) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 101 | *    ->private_lock		(page_remove_rmap->set_page_dirty) | 
|  | 102 | *    ->tree_lock		(page_remove_rmap->set_page_dirty) | 
| Christoph Hellwig | f758eea | 2011-04-21 18:19:44 -0600 | [diff] [blame] | 103 | *    bdi.wb->list_lock		(page_remove_rmap->set_page_dirty) | 
| Dave Chinner | 250df6e | 2011-03-22 22:23:36 +1100 | [diff] [blame] | 104 | *    ->inode->i_lock		(page_remove_rmap->set_page_dirty) | 
| Johannes Weiner | 81f8c3a | 2016-03-15 14:57:04 -0700 | [diff] [blame] | 105 | *    ->memcg->move_lock	(page_remove_rmap->lock_page_memcg) | 
| Christoph Hellwig | f758eea | 2011-04-21 18:19:44 -0600 | [diff] [blame] | 106 | *    bdi.wb->list_lock		(zap_pte_range->set_page_dirty) | 
| Dave Chinner | 250df6e | 2011-03-22 22:23:36 +1100 | [diff] [blame] | 107 | *    ->inode->i_lock		(zap_pte_range->set_page_dirty) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 108 | *    ->private_lock		(zap_pte_range->__set_page_dirty_buffers) | 
|  | 109 | * | 
| Davidlohr Bueso | c8c06ef | 2014-12-12 16:54:24 -0800 | [diff] [blame] | 110 | * ->i_mmap_rwsem | 
| Andi Kleen | 9a3c531 | 2012-03-21 16:34:09 -0700 | [diff] [blame] | 111 | *   ->tasklist_lock            (memory_failure, collect_procs_ao) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 112 | */ | 
|  | 113 |  | 
| Johannes Weiner | 22f2ac5 | 2016-09-30 15:11:29 -0700 | [diff] [blame] | 114 | static int page_cache_tree_insert(struct address_space *mapping, | 
|  | 115 | struct page *page, void **shadowp) | 
|  | 116 | { | 
|  | 117 | struct radix_tree_node *node; | 
|  | 118 | void **slot; | 
|  | 119 | int error; | 
|  | 120 |  | 
|  | 121 | error = __radix_tree_create(&mapping->page_tree, page->index, 0, | 
|  | 122 | &node, &slot); | 
|  | 123 | if (error) | 
|  | 124 | return error; | 
|  | 125 | if (*slot) { | 
|  | 126 | void *p; | 
|  | 127 |  | 
|  | 128 | p = radix_tree_deref_slot_protected(slot, &mapping->tree_lock); | 
|  | 129 | if (!radix_tree_exceptional_entry(p)) | 
|  | 130 | return -EEXIST; | 
|  | 131 |  | 
|  | 132 | mapping->nrexceptional--; | 
|  | 133 | if (!dax_mapping(mapping)) { | 
|  | 134 | if (shadowp) | 
|  | 135 | *shadowp = p; | 
| Johannes Weiner | 22f2ac5 | 2016-09-30 15:11:29 -0700 | [diff] [blame] | 136 | } else { | 
|  | 137 | /* DAX can replace empty locked entry with a hole */ | 
|  | 138 | WARN_ON_ONCE(p != | 
| Ross Zwisler | 642261a | 2016-11-08 11:34:45 +1100 | [diff] [blame] | 139 | dax_radix_locked_entry(0, RADIX_DAX_EMPTY)); | 
| Johannes Weiner | 22f2ac5 | 2016-09-30 15:11:29 -0700 | [diff] [blame] | 140 | /* Wakeup waiters for exceptional entry lock */ | 
| Ross Zwisler | 63e95b5 | 2016-11-08 11:32:20 +1100 | [diff] [blame] | 141 | dax_wake_mapping_entry_waiter(mapping, page->index, p, | 
| Ross Zwisler | 965d004 | 2017-01-10 16:57:15 -0800 | [diff] [blame] | 142 | true); | 
| Johannes Weiner | 22f2ac5 | 2016-09-30 15:11:29 -0700 | [diff] [blame] | 143 | } | 
|  | 144 | } | 
| Johannes Weiner | 14b4687 | 2016-12-12 16:43:52 -0800 | [diff] [blame] | 145 | __radix_tree_replace(&mapping->page_tree, node, slot, page, | 
|  | 146 | workingset_update_node, mapping); | 
| Johannes Weiner | 22f2ac5 | 2016-09-30 15:11:29 -0700 | [diff] [blame] | 147 | mapping->nrpages++; | 
| Johannes Weiner | 22f2ac5 | 2016-09-30 15:11:29 -0700 | [diff] [blame] | 148 | return 0; | 
|  | 149 | } | 
|  | 150 |  | 
| Johannes Weiner | 91b0abe | 2014-04-03 14:47:49 -0700 | [diff] [blame] | 151 | static void page_cache_tree_delete(struct address_space *mapping, | 
|  | 152 | struct page *page, void *shadow) | 
|  | 153 | { | 
| Kirill A. Shutemov | c70b647 | 2016-12-12 16:43:17 -0800 | [diff] [blame] | 154 | int i, nr; | 
|  | 155 |  | 
|  | 156 | /* hugetlb pages are represented by one entry in the radix tree */ | 
|  | 157 | nr = PageHuge(page) ? 1 : hpage_nr_pages(page); | 
| Johannes Weiner | 91b0abe | 2014-04-03 14:47:49 -0700 | [diff] [blame] | 158 |  | 
| Kirill A. Shutemov | 8392937 | 2016-07-26 15:26:04 -0700 | [diff] [blame] | 159 | VM_BUG_ON_PAGE(!PageLocked(page), page); | 
|  | 160 | VM_BUG_ON_PAGE(PageTail(page), page); | 
|  | 161 | VM_BUG_ON_PAGE(nr != 1 && shadow, page); | 
| Johannes Weiner | 449dd69 | 2014-04-03 14:47:56 -0700 | [diff] [blame] | 162 |  | 
| Kirill A. Shutemov | 8392937 | 2016-07-26 15:26:04 -0700 | [diff] [blame] | 163 | for (i = 0; i < nr; i++) { | 
| Johannes Weiner | d3798ae | 2016-10-04 22:02:08 +0200 | [diff] [blame] | 164 | struct radix_tree_node *node; | 
|  | 165 | void **slot; | 
|  | 166 |  | 
|  | 167 | __radix_tree_lookup(&mapping->page_tree, page->index + i, | 
|  | 168 | &node, &slot); | 
|  | 169 |  | 
| Johannes Weiner | dbc446b | 2016-12-12 16:43:55 -0800 | [diff] [blame] | 170 | VM_BUG_ON_PAGE(!node && nr != 1, page); | 
| Johannes Weiner | 449dd69 | 2014-04-03 14:47:56 -0700 | [diff] [blame] | 171 |  | 
| Johannes Weiner | 14b4687 | 2016-12-12 16:43:52 -0800 | [diff] [blame] | 172 | radix_tree_clear_tags(&mapping->page_tree, node, slot); | 
|  | 173 | __radix_tree_replace(&mapping->page_tree, node, slot, shadow, | 
|  | 174 | workingset_update_node, mapping); | 
| Johannes Weiner | 449dd69 | 2014-04-03 14:47:56 -0700 | [diff] [blame] | 175 | } | 
| Johannes Weiner | d3798ae | 2016-10-04 22:02:08 +0200 | [diff] [blame] | 176 |  | 
|  | 177 | if (shadow) { | 
|  | 178 | mapping->nrexceptional += nr; | 
|  | 179 | /* | 
|  | 180 | * Make sure the nrexceptional update is committed before | 
|  | 181 | * the nrpages update so that final truncate racing | 
|  | 182 | * with reclaim does not see both counters 0 at the | 
|  | 183 | * same time and miss a shadow entry. | 
|  | 184 | */ | 
|  | 185 | smp_wmb(); | 
|  | 186 | } | 
|  | 187 | mapping->nrpages -= nr; | 
| Johannes Weiner | 91b0abe | 2014-04-03 14:47:49 -0700 | [diff] [blame] | 188 | } | 
|  | 189 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 190 | /* | 
| Minchan Kim | e64a782 | 2011-03-22 16:32:44 -0700 | [diff] [blame] | 191 | * Delete a page from the page cache and free it. Caller has to make | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 192 | * sure the page is locked and that nobody else uses it - or that usage | 
| Johannes Weiner | fdf1cdb | 2016-03-15 14:57:25 -0700 | [diff] [blame] | 193 | * is safe.  The caller must hold the mapping's tree_lock. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 194 | */ | 
| Johannes Weiner | 62cccb8 | 2016-03-15 14:57:22 -0700 | [diff] [blame] | 195 | void __delete_from_page_cache(struct page *page, void *shadow) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 196 | { | 
|  | 197 | struct address_space *mapping = page->mapping; | 
| Kirill A. Shutemov | 8392937 | 2016-07-26 15:26:04 -0700 | [diff] [blame] | 198 | int nr = hpage_nr_pages(page); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 199 |  | 
| Robert Jarzmik | fe0bfaa | 2013-04-29 15:06:10 -0700 | [diff] [blame] | 200 | trace_mm_filemap_delete_from_page_cache(page); | 
| Dan Magenheimer | c515e1f | 2011-05-26 10:01:43 -0600 | [diff] [blame] | 201 | /* | 
|  | 202 | * if we're uptodate, flush out into the cleancache, otherwise | 
|  | 203 | * invalidate any existing cleancache entries.  We can't leave | 
|  | 204 | * stale data around in the cleancache once our page is gone | 
|  | 205 | */ | 
|  | 206 | if (PageUptodate(page) && PageMappedToDisk(page)) | 
|  | 207 | cleancache_put_page(page); | 
|  | 208 | else | 
| Dan Magenheimer | 3167760 | 2011-09-21 11:56:28 -0400 | [diff] [blame] | 209 | cleancache_invalidate_page(mapping, page); | 
| Dan Magenheimer | c515e1f | 2011-05-26 10:01:43 -0600 | [diff] [blame] | 210 |  | 
| Kirill A. Shutemov | 8392937 | 2016-07-26 15:26:04 -0700 | [diff] [blame] | 211 | VM_BUG_ON_PAGE(PageTail(page), page); | 
| Hugh Dickins | 06b241f | 2016-03-09 14:08:07 -0800 | [diff] [blame] | 212 | VM_BUG_ON_PAGE(page_mapped(page), page); | 
|  | 213 | if (!IS_ENABLED(CONFIG_DEBUG_VM) && unlikely(page_mapped(page))) { | 
|  | 214 | int mapcount; | 
|  | 215 |  | 
|  | 216 | pr_alert("BUG: Bad page cache in process %s  pfn:%05lx\n", | 
|  | 217 | current->comm, page_to_pfn(page)); | 
|  | 218 | dump_page(page, "still mapped when deleted"); | 
|  | 219 | dump_stack(); | 
|  | 220 | add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); | 
|  | 221 |  | 
|  | 222 | mapcount = page_mapcount(page); | 
|  | 223 | if (mapping_exiting(mapping) && | 
|  | 224 | page_count(page) >= mapcount + 2) { | 
|  | 225 | /* | 
|  | 226 | * All vmas have already been torn down, so it's | 
|  | 227 | * a good bet that actually the page is unmapped, | 
|  | 228 | * and we'd prefer not to leak it: if we're wrong, | 
|  | 229 | * some other bad page check should catch it later. | 
|  | 230 | */ | 
|  | 231 | page_mapcount_reset(page); | 
| Joonsoo Kim | 6d061f9 | 2016-05-19 17:10:46 -0700 | [diff] [blame] | 232 | page_ref_sub(page, mapcount); | 
| Hugh Dickins | 06b241f | 2016-03-09 14:08:07 -0800 | [diff] [blame] | 233 | } | 
|  | 234 | } | 
|  | 235 |  | 
| Johannes Weiner | 91b0abe | 2014-04-03 14:47:49 -0700 | [diff] [blame] | 236 | page_cache_tree_delete(mapping, page, shadow); | 
|  | 237 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 238 | page->mapping = NULL; | 
| Hugh Dickins | b85e0ef | 2011-07-25 17:12:25 -0700 | [diff] [blame] | 239 | /* Leave page->index set: truncation lookup relies upon it */ | 
| Johannes Weiner | 91b0abe | 2014-04-03 14:47:49 -0700 | [diff] [blame] | 240 |  | 
| Michal Hocko | 4165b9b | 2015-06-24 16:57:24 -0700 | [diff] [blame] | 241 | /* hugetlb pages do not participate in page cache accounting. */ | 
|  | 242 | if (!PageHuge(page)) | 
| Mel Gorman | 11fb998 | 2016-07-28 15:46:20 -0700 | [diff] [blame] | 243 | __mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, -nr); | 
| Kirill A. Shutemov | 800d8c6 | 2016-07-26 15:26:18 -0700 | [diff] [blame] | 244 | if (PageSwapBacked(page)) { | 
| Mel Gorman | 11fb998 | 2016-07-28 15:46:20 -0700 | [diff] [blame] | 245 | __mod_node_page_state(page_pgdat(page), NR_SHMEM, -nr); | 
| Kirill A. Shutemov | 800d8c6 | 2016-07-26 15:26:18 -0700 | [diff] [blame] | 246 | if (PageTransHuge(page)) | 
| Mel Gorman | 11fb998 | 2016-07-28 15:46:20 -0700 | [diff] [blame] | 247 | __dec_node_page_state(page, NR_SHMEM_THPS); | 
| Kirill A. Shutemov | 800d8c6 | 2016-07-26 15:26:18 -0700 | [diff] [blame] | 248 | } else { | 
|  | 249 | VM_BUG_ON_PAGE(PageTransHuge(page) && !PageHuge(page), page); | 
|  | 250 | } | 
| Linus Torvalds | 3a69279 | 2007-12-19 14:05:13 -0800 | [diff] [blame] | 251 |  | 
|  | 252 | /* | 
| Konstantin Khlebnikov | b9ea251 | 2015-04-14 15:45:27 -0700 | [diff] [blame] | 253 | * At this point page must be either written or cleaned by truncate. | 
|  | 254 | * Dirty page here signals a bug and loss of unwritten data. | 
| Linus Torvalds | 3a69279 | 2007-12-19 14:05:13 -0800 | [diff] [blame] | 255 | * | 
| Konstantin Khlebnikov | b9ea251 | 2015-04-14 15:45:27 -0700 | [diff] [blame] | 256 | * This fixes dirty accounting after removing the page entirely but | 
|  | 257 | * leaves PageDirty set: it has no effect for truncated page and | 
|  | 258 | * anyway will be cleared before returning page into buddy allocator. | 
| Linus Torvalds | 3a69279 | 2007-12-19 14:05:13 -0800 | [diff] [blame] | 259 | */ | 
| Konstantin Khlebnikov | b9ea251 | 2015-04-14 15:45:27 -0700 | [diff] [blame] | 260 | if (WARN_ON_ONCE(PageDirty(page))) | 
| Johannes Weiner | 62cccb8 | 2016-03-15 14:57:22 -0700 | [diff] [blame] | 261 | account_page_cleaned(page, mapping, inode_to_wb(mapping->host)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 262 | } | 
|  | 263 |  | 
| Minchan Kim | 702cfbf | 2011-03-22 16:32:43 -0700 | [diff] [blame] | 264 | /** | 
|  | 265 | * delete_from_page_cache - delete page from page cache | 
|  | 266 | * @page: the page which the kernel is trying to remove from page cache | 
|  | 267 | * | 
|  | 268 | * This must be called only on pages that have been verified to be in the page | 
|  | 269 | * cache and locked.  It will never put the page into the free list, the caller | 
|  | 270 | * has a reference on the page. | 
|  | 271 | */ | 
|  | 272 | void delete_from_page_cache(struct page *page) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 273 | { | 
| Kirill A. Shutemov | 8392937 | 2016-07-26 15:26:04 -0700 | [diff] [blame] | 274 | struct address_space *mapping = page_mapping(page); | 
| Greg Thelen | c4843a7 | 2015-05-22 17:13:16 -0400 | [diff] [blame] | 275 | unsigned long flags; | 
| Linus Torvalds | 6072d13 | 2010-12-01 13:35:19 -0500 | [diff] [blame] | 276 | void (*freepage)(struct page *); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 277 |  | 
| Matt Mackall | cd7619d | 2005-05-01 08:59:01 -0700 | [diff] [blame] | 278 | BUG_ON(!PageLocked(page)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 279 |  | 
| Linus Torvalds | 6072d13 | 2010-12-01 13:35:19 -0500 | [diff] [blame] | 280 | freepage = mapping->a_ops->freepage; | 
| Greg Thelen | c4843a7 | 2015-05-22 17:13:16 -0400 | [diff] [blame] | 281 |  | 
| Greg Thelen | c4843a7 | 2015-05-22 17:13:16 -0400 | [diff] [blame] | 282 | spin_lock_irqsave(&mapping->tree_lock, flags); | 
| Johannes Weiner | 62cccb8 | 2016-03-15 14:57:22 -0700 | [diff] [blame] | 283 | __delete_from_page_cache(page, NULL); | 
| Greg Thelen | c4843a7 | 2015-05-22 17:13:16 -0400 | [diff] [blame] | 284 | spin_unlock_irqrestore(&mapping->tree_lock, flags); | 
| Linus Torvalds | 6072d13 | 2010-12-01 13:35:19 -0500 | [diff] [blame] | 285 |  | 
|  | 286 | if (freepage) | 
|  | 287 | freepage(page); | 
| Kirill A. Shutemov | 8392937 | 2016-07-26 15:26:04 -0700 | [diff] [blame] | 288 |  | 
|  | 289 | if (PageTransHuge(page) && !PageHuge(page)) { | 
|  | 290 | page_ref_sub(page, HPAGE_PMD_NR); | 
|  | 291 | VM_BUG_ON_PAGE(page_count(page) <= 0, page); | 
|  | 292 | } else { | 
|  | 293 | put_page(page); | 
|  | 294 | } | 
| Minchan Kim | 97cecb5 | 2011-03-22 16:30:53 -0700 | [diff] [blame] | 295 | } | 
|  | 296 | EXPORT_SYMBOL(delete_from_page_cache); | 
|  | 297 |  | 
| Miklos Szeredi | d72d9e2 | 2016-07-29 14:10:57 +0200 | [diff] [blame] | 298 | int filemap_check_errors(struct address_space *mapping) | 
| Dmitry Monakhov | 865ffef3 | 2013-04-29 15:08:42 -0700 | [diff] [blame] | 299 | { | 
|  | 300 | int ret = 0; | 
|  | 301 | /* Check for outstanding write errors */ | 
| Jens Axboe | 7fcbbaf | 2014-05-22 11:54:16 -0700 | [diff] [blame] | 302 | if (test_bit(AS_ENOSPC, &mapping->flags) && | 
|  | 303 | test_and_clear_bit(AS_ENOSPC, &mapping->flags)) | 
| Dmitry Monakhov | 865ffef3 | 2013-04-29 15:08:42 -0700 | [diff] [blame] | 304 | ret = -ENOSPC; | 
| Jens Axboe | 7fcbbaf | 2014-05-22 11:54:16 -0700 | [diff] [blame] | 305 | if (test_bit(AS_EIO, &mapping->flags) && | 
|  | 306 | test_and_clear_bit(AS_EIO, &mapping->flags)) | 
| Dmitry Monakhov | 865ffef3 | 2013-04-29 15:08:42 -0700 | [diff] [blame] | 307 | ret = -EIO; | 
|  | 308 | return ret; | 
|  | 309 | } | 
| Miklos Szeredi | d72d9e2 | 2016-07-29 14:10:57 +0200 | [diff] [blame] | 310 | EXPORT_SYMBOL(filemap_check_errors); | 
| Dmitry Monakhov | 865ffef3 | 2013-04-29 15:08:42 -0700 | [diff] [blame] | 311 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 312 | /** | 
| Randy Dunlap | 485bb99 | 2006-06-23 02:03:49 -0700 | [diff] [blame] | 313 | * __filemap_fdatawrite_range - start writeback on mapping dirty pages in range | 
| Martin Waitz | 67be2dd | 2005-05-01 08:59:26 -0700 | [diff] [blame] | 314 | * @mapping:	address space structure to write | 
|  | 315 | * @start:	offset in bytes where the range starts | 
| Andrew Morton | 469eb4d | 2006-03-24 03:17:45 -0800 | [diff] [blame] | 316 | * @end:	offset in bytes where the range ends (inclusive) | 
| Martin Waitz | 67be2dd | 2005-05-01 08:59:26 -0700 | [diff] [blame] | 317 | * @sync_mode:	enable synchronous operation | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 318 | * | 
| Randy Dunlap | 485bb99 | 2006-06-23 02:03:49 -0700 | [diff] [blame] | 319 | * Start writeback against all of a mapping's dirty pages that lie | 
|  | 320 | * within the byte offsets <start, end> inclusive. | 
|  | 321 | * | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 322 | * If sync_mode is WB_SYNC_ALL then this is a "data integrity" operation, as | 
| Randy Dunlap | 485bb99 | 2006-06-23 02:03:49 -0700 | [diff] [blame] | 323 | * opposed to a regular memory cleansing writeback.  The difference between | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 324 | * these two operations is that if a dirty page/buffer is encountered, it must | 
|  | 325 | * be waited upon, and not just skipped over. | 
|  | 326 | */ | 
| Andrew Morton | ebcf28e | 2006-03-24 03:18:04 -0800 | [diff] [blame] | 327 | int __filemap_fdatawrite_range(struct address_space *mapping, loff_t start, | 
|  | 328 | loff_t end, int sync_mode) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 329 | { | 
|  | 330 | int ret; | 
|  | 331 | struct writeback_control wbc = { | 
|  | 332 | .sync_mode = sync_mode, | 
| Nick Piggin | 05fe478 | 2009-01-06 14:39:08 -0800 | [diff] [blame] | 333 | .nr_to_write = LONG_MAX, | 
| OGAWA Hirofumi | 111ebb6 | 2006-06-23 02:03:26 -0700 | [diff] [blame] | 334 | .range_start = start, | 
|  | 335 | .range_end = end, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 336 | }; | 
|  | 337 |  | 
|  | 338 | if (!mapping_cap_writeback_dirty(mapping)) | 
|  | 339 | return 0; | 
|  | 340 |  | 
| Tejun Heo | b16b1de | 2015-06-02 08:39:48 -0600 | [diff] [blame] | 341 | wbc_attach_fdatawrite_inode(&wbc, mapping->host); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 342 | ret = do_writepages(mapping, &wbc); | 
| Tejun Heo | b16b1de | 2015-06-02 08:39:48 -0600 | [diff] [blame] | 343 | wbc_detach_inode(&wbc); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 344 | return ret; | 
|  | 345 | } | 
|  | 346 |  | 
|  | 347 | static inline int __filemap_fdatawrite(struct address_space *mapping, | 
|  | 348 | int sync_mode) | 
|  | 349 | { | 
| OGAWA Hirofumi | 111ebb6 | 2006-06-23 02:03:26 -0700 | [diff] [blame] | 350 | return __filemap_fdatawrite_range(mapping, 0, LLONG_MAX, sync_mode); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 351 | } | 
|  | 352 |  | 
|  | 353 | int filemap_fdatawrite(struct address_space *mapping) | 
|  | 354 | { | 
|  | 355 | return __filemap_fdatawrite(mapping, WB_SYNC_ALL); | 
|  | 356 | } | 
|  | 357 | EXPORT_SYMBOL(filemap_fdatawrite); | 
|  | 358 |  | 
| Jan Kara | f4c0a0f | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 359 | int filemap_fdatawrite_range(struct address_space *mapping, loff_t start, | 
| Andrew Morton | ebcf28e | 2006-03-24 03:18:04 -0800 | [diff] [blame] | 360 | loff_t end) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 361 | { | 
|  | 362 | return __filemap_fdatawrite_range(mapping, start, end, WB_SYNC_ALL); | 
|  | 363 | } | 
| Jan Kara | f4c0a0f | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 364 | EXPORT_SYMBOL(filemap_fdatawrite_range); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 365 |  | 
| Randy Dunlap | 485bb99 | 2006-06-23 02:03:49 -0700 | [diff] [blame] | 366 | /** | 
|  | 367 | * filemap_flush - mostly a non-blocking flush | 
|  | 368 | * @mapping:	target address_space | 
|  | 369 | * | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 370 | * This is a mostly non-blocking flush.  Not suitable for data-integrity | 
|  | 371 | * purposes - I/O may not be started against all dirty pages. | 
|  | 372 | */ | 
|  | 373 | int filemap_flush(struct address_space *mapping) | 
|  | 374 | { | 
|  | 375 | return __filemap_fdatawrite(mapping, WB_SYNC_NONE); | 
|  | 376 | } | 
|  | 377 | EXPORT_SYMBOL(filemap_flush); | 
|  | 378 |  | 
| Goldwyn Rodrigues | 7fc9e47 | 2017-06-20 07:05:41 -0500 | [diff] [blame] | 379 | /** | 
|  | 380 | * filemap_range_has_page - check if a page exists in range. | 
|  | 381 | * @mapping:           address space within which to check | 
|  | 382 | * @start_byte:        offset in bytes where the range starts | 
|  | 383 | * @end_byte:          offset in bytes where the range ends (inclusive) | 
|  | 384 | * | 
|  | 385 | * Find at least one page in the range supplied, usually used to check if | 
|  | 386 | * direct writing in this range will trigger a writeback. | 
|  | 387 | */ | 
|  | 388 | bool filemap_range_has_page(struct address_space *mapping, | 
|  | 389 | loff_t start_byte, loff_t end_byte) | 
|  | 390 | { | 
|  | 391 | pgoff_t index = start_byte >> PAGE_SHIFT; | 
|  | 392 | pgoff_t end = end_byte >> PAGE_SHIFT; | 
|  | 393 | struct pagevec pvec; | 
|  | 394 | bool ret; | 
|  | 395 |  | 
|  | 396 | if (end_byte < start_byte) | 
|  | 397 | return false; | 
|  | 398 |  | 
|  | 399 | if (mapping->nrpages == 0) | 
|  | 400 | return false; | 
|  | 401 |  | 
|  | 402 | pagevec_init(&pvec, 0); | 
|  | 403 | if (!pagevec_lookup(&pvec, mapping, index, 1)) | 
|  | 404 | return false; | 
|  | 405 | ret = (pvec.pages[0]->index <= end); | 
|  | 406 | pagevec_release(&pvec); | 
|  | 407 | return ret; | 
|  | 408 | } | 
|  | 409 | EXPORT_SYMBOL(filemap_range_has_page); | 
|  | 410 |  | 
| Junichi Nomura | aa750fd | 2015-11-05 18:47:23 -0800 | [diff] [blame] | 411 | static int __filemap_fdatawait_range(struct address_space *mapping, | 
|  | 412 | loff_t start_byte, loff_t end_byte) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 413 | { | 
| Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 414 | pgoff_t index = start_byte >> PAGE_SHIFT; | 
|  | 415 | pgoff_t end = end_byte >> PAGE_SHIFT; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 416 | struct pagevec pvec; | 
|  | 417 | int nr_pages; | 
| Junichi Nomura | aa750fd | 2015-11-05 18:47:23 -0800 | [diff] [blame] | 418 | int ret = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 419 |  | 
| Christoph Hellwig | 94004ed | 2009-09-30 22:16:33 +0200 | [diff] [blame] | 420 | if (end_byte < start_byte) | 
| Dmitry Monakhov | 865ffef3 | 2013-04-29 15:08:42 -0700 | [diff] [blame] | 421 | goto out; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 422 |  | 
|  | 423 | pagevec_init(&pvec, 0); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 424 | while ((index <= end) && | 
|  | 425 | (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, | 
|  | 426 | PAGECACHE_TAG_WRITEBACK, | 
|  | 427 | min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1)) != 0) { | 
|  | 428 | unsigned i; | 
|  | 429 |  | 
|  | 430 | for (i = 0; i < nr_pages; i++) { | 
|  | 431 | struct page *page = pvec.pages[i]; | 
|  | 432 |  | 
|  | 433 | /* until radix tree lookup accepts end_index */ | 
|  | 434 | if (page->index > end) | 
|  | 435 | continue; | 
|  | 436 |  | 
|  | 437 | wait_on_page_writeback(page); | 
| Rik van Riel | 212260a | 2011-01-13 15:46:06 -0800 | [diff] [blame] | 438 | if (TestClearPageError(page)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 439 | ret = -EIO; | 
|  | 440 | } | 
|  | 441 | pagevec_release(&pvec); | 
|  | 442 | cond_resched(); | 
|  | 443 | } | 
| Dmitry Monakhov | 865ffef3 | 2013-04-29 15:08:42 -0700 | [diff] [blame] | 444 | out: | 
| Junichi Nomura | aa750fd | 2015-11-05 18:47:23 -0800 | [diff] [blame] | 445 | return ret; | 
|  | 446 | } | 
|  | 447 |  | 
|  | 448 | /** | 
|  | 449 | * filemap_fdatawait_range - wait for writeback to complete | 
|  | 450 | * @mapping:		address space structure to wait for | 
|  | 451 | * @start_byte:		offset in bytes where the range starts | 
|  | 452 | * @end_byte:		offset in bytes where the range ends (inclusive) | 
|  | 453 | * | 
|  | 454 | * Walk the list of under-writeback pages of the given address space | 
|  | 455 | * in the given range and wait for all of them.  Check error status of | 
|  | 456 | * the address space and return it. | 
|  | 457 | * | 
|  | 458 | * Since the error status of the address space is cleared by this function, | 
|  | 459 | * callers are responsible for checking the return value and handling and/or | 
|  | 460 | * reporting the error. | 
|  | 461 | */ | 
|  | 462 | int filemap_fdatawait_range(struct address_space *mapping, loff_t start_byte, | 
|  | 463 | loff_t end_byte) | 
|  | 464 | { | 
|  | 465 | int ret, ret2; | 
|  | 466 |  | 
|  | 467 | ret = __filemap_fdatawait_range(mapping, start_byte, end_byte); | 
| Dmitry Monakhov | 865ffef3 | 2013-04-29 15:08:42 -0700 | [diff] [blame] | 468 | ret2 = filemap_check_errors(mapping); | 
|  | 469 | if (!ret) | 
|  | 470 | ret = ret2; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 471 |  | 
|  | 472 | return ret; | 
|  | 473 | } | 
| Jan Kara | d3bccb6f | 2009-08-17 19:30:27 +0200 | [diff] [blame] | 474 | EXPORT_SYMBOL(filemap_fdatawait_range); | 
|  | 475 |  | 
|  | 476 | /** | 
| Junichi Nomura | aa750fd | 2015-11-05 18:47:23 -0800 | [diff] [blame] | 477 | * filemap_fdatawait_keep_errors - wait for writeback without clearing errors | 
|  | 478 | * @mapping: address space structure to wait for | 
|  | 479 | * | 
|  | 480 | * Walk the list of under-writeback pages of the given address space | 
|  | 481 | * and wait for all of them.  Unlike filemap_fdatawait(), this function | 
|  | 482 | * does not clear error status of the address space. | 
|  | 483 | * | 
|  | 484 | * Use this function if callers don't handle errors themselves.  Expected | 
|  | 485 | * call sites are system-wide / filesystem-wide data flushers: e.g. sync(2), | 
|  | 486 | * fsfreeze(8) | 
|  | 487 | */ | 
|  | 488 | void filemap_fdatawait_keep_errors(struct address_space *mapping) | 
|  | 489 | { | 
|  | 490 | loff_t i_size = i_size_read(mapping->host); | 
|  | 491 |  | 
|  | 492 | if (i_size == 0) | 
|  | 493 | return; | 
|  | 494 |  | 
|  | 495 | __filemap_fdatawait_range(mapping, 0, i_size - 1); | 
|  | 496 | } | 
|  | 497 |  | 
|  | 498 | /** | 
| Randy Dunlap | 485bb99 | 2006-06-23 02:03:49 -0700 | [diff] [blame] | 499 | * filemap_fdatawait - wait for all under-writeback pages to complete | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 500 | * @mapping: address space structure to wait for | 
| Randy Dunlap | 485bb99 | 2006-06-23 02:03:49 -0700 | [diff] [blame] | 501 | * | 
|  | 502 | * Walk the list of under-writeback pages of the given address space | 
| Junichi Nomura | aa750fd | 2015-11-05 18:47:23 -0800 | [diff] [blame] | 503 | * and wait for all of them.  Check error status of the address space | 
|  | 504 | * and return it. | 
|  | 505 | * | 
|  | 506 | * Since the error status of the address space is cleared by this function, | 
|  | 507 | * callers are responsible for checking the return value and handling and/or | 
|  | 508 | * reporting the error. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 509 | */ | 
|  | 510 | int filemap_fdatawait(struct address_space *mapping) | 
|  | 511 | { | 
|  | 512 | loff_t i_size = i_size_read(mapping->host); | 
|  | 513 |  | 
|  | 514 | if (i_size == 0) | 
|  | 515 | return 0; | 
|  | 516 |  | 
| Christoph Hellwig | 94004ed | 2009-09-30 22:16:33 +0200 | [diff] [blame] | 517 | return filemap_fdatawait_range(mapping, 0, i_size - 1); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 518 | } | 
|  | 519 | EXPORT_SYMBOL(filemap_fdatawait); | 
|  | 520 |  | 
|  | 521 | int filemap_write_and_wait(struct address_space *mapping) | 
|  | 522 | { | 
| OGAWA Hirofumi | 28fd129 | 2006-01-08 01:02:14 -0800 | [diff] [blame] | 523 | int err = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 524 |  | 
| Ross Zwisler | 7f6d5b5 | 2016-02-26 15:19:55 -0800 | [diff] [blame] | 525 | if ((!dax_mapping(mapping) && mapping->nrpages) || | 
|  | 526 | (dax_mapping(mapping) && mapping->nrexceptional)) { | 
| OGAWA Hirofumi | 28fd129 | 2006-01-08 01:02:14 -0800 | [diff] [blame] | 527 | err = filemap_fdatawrite(mapping); | 
|  | 528 | /* | 
|  | 529 | * Even if the above returned error, the pages may be | 
|  | 530 | * written partially (e.g. -ENOSPC), so we wait for it. | 
|  | 531 | * But the -EIO is special case, it may indicate the worst | 
|  | 532 | * thing (e.g. bug) happened, so we avoid waiting for it. | 
|  | 533 | */ | 
|  | 534 | if (err != -EIO) { | 
|  | 535 | int err2 = filemap_fdatawait(mapping); | 
|  | 536 | if (!err) | 
|  | 537 | err = err2; | 
|  | 538 | } | 
| Dmitry Monakhov | 865ffef3 | 2013-04-29 15:08:42 -0700 | [diff] [blame] | 539 | } else { | 
|  | 540 | err = filemap_check_errors(mapping); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 541 | } | 
| OGAWA Hirofumi | 28fd129 | 2006-01-08 01:02:14 -0800 | [diff] [blame] | 542 | return err; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 543 | } | 
| OGAWA Hirofumi | 28fd129 | 2006-01-08 01:02:14 -0800 | [diff] [blame] | 544 | EXPORT_SYMBOL(filemap_write_and_wait); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 545 |  | 
| Randy Dunlap | 485bb99 | 2006-06-23 02:03:49 -0700 | [diff] [blame] | 546 | /** | 
|  | 547 | * filemap_write_and_wait_range - write out & wait on a file range | 
|  | 548 | * @mapping:	the address_space for the pages | 
|  | 549 | * @lstart:	offset in bytes where the range starts | 
|  | 550 | * @lend:	offset in bytes where the range ends (inclusive) | 
|  | 551 | * | 
| Andrew Morton | 469eb4d | 2006-03-24 03:17:45 -0800 | [diff] [blame] | 552 | * Write out and wait upon file offsets lstart->lend, inclusive. | 
|  | 553 | * | 
| mchehab@s-opensource.com | 0e056eb | 2017-03-30 17:11:36 -0300 | [diff] [blame] | 554 | * Note that @lend is inclusive (describes the last byte to be written) so | 
| Andrew Morton | 469eb4d | 2006-03-24 03:17:45 -0800 | [diff] [blame] | 555 | * that this function can be used to write to the very end-of-file (end = -1). | 
|  | 556 | */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 557 | int filemap_write_and_wait_range(struct address_space *mapping, | 
|  | 558 | loff_t lstart, loff_t lend) | 
|  | 559 | { | 
| OGAWA Hirofumi | 28fd129 | 2006-01-08 01:02:14 -0800 | [diff] [blame] | 560 | int err = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 561 |  | 
| Ross Zwisler | 7f6d5b5 | 2016-02-26 15:19:55 -0800 | [diff] [blame] | 562 | if ((!dax_mapping(mapping) && mapping->nrpages) || | 
|  | 563 | (dax_mapping(mapping) && mapping->nrexceptional)) { | 
| OGAWA Hirofumi | 28fd129 | 2006-01-08 01:02:14 -0800 | [diff] [blame] | 564 | err = __filemap_fdatawrite_range(mapping, lstart, lend, | 
|  | 565 | WB_SYNC_ALL); | 
|  | 566 | /* See comment of filemap_write_and_wait() */ | 
|  | 567 | if (err != -EIO) { | 
| Christoph Hellwig | 94004ed | 2009-09-30 22:16:33 +0200 | [diff] [blame] | 568 | int err2 = filemap_fdatawait_range(mapping, | 
|  | 569 | lstart, lend); | 
| OGAWA Hirofumi | 28fd129 | 2006-01-08 01:02:14 -0800 | [diff] [blame] | 570 | if (!err) | 
|  | 571 | err = err2; | 
|  | 572 | } | 
| Dmitry Monakhov | 865ffef3 | 2013-04-29 15:08:42 -0700 | [diff] [blame] | 573 | } else { | 
|  | 574 | err = filemap_check_errors(mapping); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 575 | } | 
| OGAWA Hirofumi | 28fd129 | 2006-01-08 01:02:14 -0800 | [diff] [blame] | 576 | return err; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 577 | } | 
| Chris Mason | f699558 | 2009-04-15 13:22:37 -0400 | [diff] [blame] | 578 | EXPORT_SYMBOL(filemap_write_and_wait_range); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 579 |  | 
| Randy Dunlap | 485bb99 | 2006-06-23 02:03:49 -0700 | [diff] [blame] | 580 | /** | 
| Miklos Szeredi | ef6a3c6 | 2011-03-22 16:30:52 -0700 | [diff] [blame] | 581 | * replace_page_cache_page - replace a pagecache page with a new one | 
|  | 582 | * @old:	page to be replaced | 
|  | 583 | * @new:	page to replace with | 
|  | 584 | * @gfp_mask:	allocation mode | 
|  | 585 | * | 
|  | 586 | * This function replaces a page in the pagecache with a new one.  On | 
|  | 587 | * success it acquires the pagecache reference for the new page and | 
|  | 588 | * drops it for the old page.  Both the old and new pages must be | 
|  | 589 | * locked.  This function does not add the new page to the LRU, the | 
|  | 590 | * caller must do that. | 
|  | 591 | * | 
|  | 592 | * The remove + add is atomic.  The only way this function can fail is | 
|  | 593 | * memory allocation failure. | 
|  | 594 | */ | 
|  | 595 | int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask) | 
|  | 596 | { | 
|  | 597 | int error; | 
| Miklos Szeredi | ef6a3c6 | 2011-03-22 16:30:52 -0700 | [diff] [blame] | 598 |  | 
| Sasha Levin | 309381fea | 2014-01-23 15:52:54 -0800 | [diff] [blame] | 599 | VM_BUG_ON_PAGE(!PageLocked(old), old); | 
|  | 600 | VM_BUG_ON_PAGE(!PageLocked(new), new); | 
|  | 601 | VM_BUG_ON_PAGE(new->mapping, new); | 
| Miklos Szeredi | ef6a3c6 | 2011-03-22 16:30:52 -0700 | [diff] [blame] | 602 |  | 
| Miklos Szeredi | ef6a3c6 | 2011-03-22 16:30:52 -0700 | [diff] [blame] | 603 | error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM); | 
|  | 604 | if (!error) { | 
|  | 605 | struct address_space *mapping = old->mapping; | 
|  | 606 | void (*freepage)(struct page *); | 
| Greg Thelen | c4843a7 | 2015-05-22 17:13:16 -0400 | [diff] [blame] | 607 | unsigned long flags; | 
| Miklos Szeredi | ef6a3c6 | 2011-03-22 16:30:52 -0700 | [diff] [blame] | 608 |  | 
|  | 609 | pgoff_t offset = old->index; | 
|  | 610 | freepage = mapping->a_ops->freepage; | 
|  | 611 |  | 
| Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 612 | get_page(new); | 
| Miklos Szeredi | ef6a3c6 | 2011-03-22 16:30:52 -0700 | [diff] [blame] | 613 | new->mapping = mapping; | 
|  | 614 | new->index = offset; | 
|  | 615 |  | 
| Greg Thelen | c4843a7 | 2015-05-22 17:13:16 -0400 | [diff] [blame] | 616 | spin_lock_irqsave(&mapping->tree_lock, flags); | 
| Johannes Weiner | 62cccb8 | 2016-03-15 14:57:22 -0700 | [diff] [blame] | 617 | __delete_from_page_cache(old, NULL); | 
| Johannes Weiner | 22f2ac5 | 2016-09-30 15:11:29 -0700 | [diff] [blame] | 618 | error = page_cache_tree_insert(mapping, new, NULL); | 
| Miklos Szeredi | ef6a3c6 | 2011-03-22 16:30:52 -0700 | [diff] [blame] | 619 | BUG_ON(error); | 
| Michal Hocko | 4165b9b | 2015-06-24 16:57:24 -0700 | [diff] [blame] | 620 |  | 
|  | 621 | /* | 
|  | 622 | * hugetlb pages do not participate in page cache accounting. | 
|  | 623 | */ | 
|  | 624 | if (!PageHuge(new)) | 
| Mel Gorman | 11fb998 | 2016-07-28 15:46:20 -0700 | [diff] [blame] | 625 | __inc_node_page_state(new, NR_FILE_PAGES); | 
| Miklos Szeredi | ef6a3c6 | 2011-03-22 16:30:52 -0700 | [diff] [blame] | 626 | if (PageSwapBacked(new)) | 
| Mel Gorman | 11fb998 | 2016-07-28 15:46:20 -0700 | [diff] [blame] | 627 | __inc_node_page_state(new, NR_SHMEM); | 
| Greg Thelen | c4843a7 | 2015-05-22 17:13:16 -0400 | [diff] [blame] | 628 | spin_unlock_irqrestore(&mapping->tree_lock, flags); | 
| Johannes Weiner | 6a93ca8 | 2016-03-15 14:57:19 -0700 | [diff] [blame] | 629 | mem_cgroup_migrate(old, new); | 
| Miklos Szeredi | ef6a3c6 | 2011-03-22 16:30:52 -0700 | [diff] [blame] | 630 | radix_tree_preload_end(); | 
|  | 631 | if (freepage) | 
|  | 632 | freepage(old); | 
| Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 633 | put_page(old); | 
| Miklos Szeredi | ef6a3c6 | 2011-03-22 16:30:52 -0700 | [diff] [blame] | 634 | } | 
|  | 635 |  | 
|  | 636 | return error; | 
|  | 637 | } | 
|  | 638 | EXPORT_SYMBOL_GPL(replace_page_cache_page); | 
|  | 639 |  | 
| Johannes Weiner | a528910 | 2014-04-03 14:47:51 -0700 | [diff] [blame] | 640 | static int __add_to_page_cache_locked(struct page *page, | 
|  | 641 | struct address_space *mapping, | 
|  | 642 | pgoff_t offset, gfp_t gfp_mask, | 
|  | 643 | void **shadowp) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 644 | { | 
| Johannes Weiner | 00501b5 | 2014-08-08 14:19:20 -0700 | [diff] [blame] | 645 | int huge = PageHuge(page); | 
|  | 646 | struct mem_cgroup *memcg; | 
| Nick Piggin | e286781 | 2008-07-25 19:45:30 -0700 | [diff] [blame] | 647 | int error; | 
|  | 648 |  | 
| Sasha Levin | 309381fea | 2014-01-23 15:52:54 -0800 | [diff] [blame] | 649 | VM_BUG_ON_PAGE(!PageLocked(page), page); | 
|  | 650 | VM_BUG_ON_PAGE(PageSwapBacked(page), page); | 
| Nick Piggin | e286781 | 2008-07-25 19:45:30 -0700 | [diff] [blame] | 651 |  | 
| Johannes Weiner | 00501b5 | 2014-08-08 14:19:20 -0700 | [diff] [blame] | 652 | if (!huge) { | 
|  | 653 | error = mem_cgroup_try_charge(page, current->mm, | 
| Kirill A. Shutemov | f627c2f | 2016-01-15 16:52:20 -0800 | [diff] [blame] | 654 | gfp_mask, &memcg, false); | 
| Johannes Weiner | 00501b5 | 2014-08-08 14:19:20 -0700 | [diff] [blame] | 655 | if (error) | 
|  | 656 | return error; | 
|  | 657 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 658 |  | 
| Jan Kara | 5e4c0d97 | 2013-09-11 14:26:05 -0700 | [diff] [blame] | 659 | error = radix_tree_maybe_preload(gfp_mask & ~__GFP_HIGHMEM); | 
| Kirill A. Shutemov | 66a0c8e | 2013-09-12 15:13:59 -0700 | [diff] [blame] | 660 | if (error) { | 
| Johannes Weiner | 00501b5 | 2014-08-08 14:19:20 -0700 | [diff] [blame] | 661 | if (!huge) | 
| Kirill A. Shutemov | f627c2f | 2016-01-15 16:52:20 -0800 | [diff] [blame] | 662 | mem_cgroup_cancel_charge(page, memcg, false); | 
| Kirill A. Shutemov | 66a0c8e | 2013-09-12 15:13:59 -0700 | [diff] [blame] | 663 | return error; | 
|  | 664 | } | 
|  | 665 |  | 
| Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 666 | get_page(page); | 
| Kirill A. Shutemov | 66a0c8e | 2013-09-12 15:13:59 -0700 | [diff] [blame] | 667 | page->mapping = mapping; | 
|  | 668 | page->index = offset; | 
|  | 669 |  | 
|  | 670 | spin_lock_irq(&mapping->tree_lock); | 
| Johannes Weiner | a528910 | 2014-04-03 14:47:51 -0700 | [diff] [blame] | 671 | error = page_cache_tree_insert(mapping, page, shadowp); | 
| Kirill A. Shutemov | 66a0c8e | 2013-09-12 15:13:59 -0700 | [diff] [blame] | 672 | radix_tree_preload_end(); | 
|  | 673 | if (unlikely(error)) | 
|  | 674 | goto err_insert; | 
| Michal Hocko | 4165b9b | 2015-06-24 16:57:24 -0700 | [diff] [blame] | 675 |  | 
|  | 676 | /* hugetlb pages do not participate in page cache accounting. */ | 
|  | 677 | if (!huge) | 
| Mel Gorman | 11fb998 | 2016-07-28 15:46:20 -0700 | [diff] [blame] | 678 | __inc_node_page_state(page, NR_FILE_PAGES); | 
| Kirill A. Shutemov | 66a0c8e | 2013-09-12 15:13:59 -0700 | [diff] [blame] | 679 | spin_unlock_irq(&mapping->tree_lock); | 
| Johannes Weiner | 00501b5 | 2014-08-08 14:19:20 -0700 | [diff] [blame] | 680 | if (!huge) | 
| Kirill A. Shutemov | f627c2f | 2016-01-15 16:52:20 -0800 | [diff] [blame] | 681 | mem_cgroup_commit_charge(page, memcg, false, false); | 
| Kirill A. Shutemov | 66a0c8e | 2013-09-12 15:13:59 -0700 | [diff] [blame] | 682 | trace_mm_filemap_add_to_page_cache(page); | 
|  | 683 | return 0; | 
|  | 684 | err_insert: | 
|  | 685 | page->mapping = NULL; | 
|  | 686 | /* Leave page->index set: truncation relies upon it */ | 
|  | 687 | spin_unlock_irq(&mapping->tree_lock); | 
| Johannes Weiner | 00501b5 | 2014-08-08 14:19:20 -0700 | [diff] [blame] | 688 | if (!huge) | 
| Kirill A. Shutemov | f627c2f | 2016-01-15 16:52:20 -0800 | [diff] [blame] | 689 | mem_cgroup_cancel_charge(page, memcg, false); | 
| Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 690 | put_page(page); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 691 | return error; | 
|  | 692 | } | 
| Johannes Weiner | a528910 | 2014-04-03 14:47:51 -0700 | [diff] [blame] | 693 |  | 
|  | 694 | /** | 
|  | 695 | * add_to_page_cache_locked - add a locked page to the pagecache | 
|  | 696 | * @page:	page to add | 
|  | 697 | * @mapping:	the page's address_space | 
|  | 698 | * @offset:	page index | 
|  | 699 | * @gfp_mask:	page allocation mode | 
|  | 700 | * | 
|  | 701 | * This function is used to add a page to the pagecache. It must be locked. | 
|  | 702 | * This function does not add the page to the LRU.  The caller must do that. | 
|  | 703 | */ | 
|  | 704 | int add_to_page_cache_locked(struct page *page, struct address_space *mapping, | 
|  | 705 | pgoff_t offset, gfp_t gfp_mask) | 
|  | 706 | { | 
|  | 707 | return __add_to_page_cache_locked(page, mapping, offset, | 
|  | 708 | gfp_mask, NULL); | 
|  | 709 | } | 
| Nick Piggin | e286781 | 2008-07-25 19:45:30 -0700 | [diff] [blame] | 710 | EXPORT_SYMBOL(add_to_page_cache_locked); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 711 |  | 
|  | 712 | int add_to_page_cache_lru(struct page *page, struct address_space *mapping, | 
| Al Viro | 6daa0e2 | 2005-10-21 03:18:50 -0400 | [diff] [blame] | 713 | pgoff_t offset, gfp_t gfp_mask) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 714 | { | 
| Johannes Weiner | a528910 | 2014-04-03 14:47:51 -0700 | [diff] [blame] | 715 | void *shadow = NULL; | 
| Rik van Riel | 4f98a2f | 2008-10-18 20:26:32 -0700 | [diff] [blame] | 716 | int ret; | 
|  | 717 |  | 
| Kirill A. Shutemov | 48c935a | 2016-01-15 16:51:24 -0800 | [diff] [blame] | 718 | __SetPageLocked(page); | 
| Johannes Weiner | a528910 | 2014-04-03 14:47:51 -0700 | [diff] [blame] | 719 | ret = __add_to_page_cache_locked(page, mapping, offset, | 
|  | 720 | gfp_mask, &shadow); | 
|  | 721 | if (unlikely(ret)) | 
| Kirill A. Shutemov | 48c935a | 2016-01-15 16:51:24 -0800 | [diff] [blame] | 722 | __ClearPageLocked(page); | 
| Johannes Weiner | a528910 | 2014-04-03 14:47:51 -0700 | [diff] [blame] | 723 | else { | 
|  | 724 | /* | 
|  | 725 | * The page might have been evicted from cache only | 
|  | 726 | * recently, in which case it should be activated like | 
|  | 727 | * any other repeatedly accessed page. | 
| Rik van Riel | f0281a0 | 2016-05-20 16:56:25 -0700 | [diff] [blame] | 728 | * The exception is pages getting rewritten; evicting other | 
|  | 729 | * data from the working set, only to cache data that will | 
|  | 730 | * get overwritten with something else, is a waste of memory. | 
| Johannes Weiner | a528910 | 2014-04-03 14:47:51 -0700 | [diff] [blame] | 731 | */ | 
| Rik van Riel | f0281a0 | 2016-05-20 16:56:25 -0700 | [diff] [blame] | 732 | if (!(gfp_mask & __GFP_WRITE) && | 
|  | 733 | shadow && workingset_refault(shadow)) { | 
| Johannes Weiner | a528910 | 2014-04-03 14:47:51 -0700 | [diff] [blame] | 734 | SetPageActive(page); | 
|  | 735 | workingset_activation(page); | 
|  | 736 | } else | 
|  | 737 | ClearPageActive(page); | 
|  | 738 | lru_cache_add(page); | 
|  | 739 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 740 | return ret; | 
|  | 741 | } | 
| Evgeniy Polyakov | 18bc0bb | 2009-02-09 17:02:42 +0300 | [diff] [blame] | 742 | EXPORT_SYMBOL_GPL(add_to_page_cache_lru); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 743 |  | 
| Paul Jackson | 44110fe | 2006-03-24 03:16:04 -0800 | [diff] [blame] | 744 | #ifdef CONFIG_NUMA | 
| Nick Piggin | 2ae8814 | 2006-10-28 10:38:23 -0700 | [diff] [blame] | 745 | struct page *__page_cache_alloc(gfp_t gfp) | 
| Paul Jackson | 44110fe | 2006-03-24 03:16:04 -0800 | [diff] [blame] | 746 | { | 
| Miao Xie | c0ff745 | 2010-05-24 14:32:08 -0700 | [diff] [blame] | 747 | int n; | 
|  | 748 | struct page *page; | 
|  | 749 |  | 
| Paul Jackson | 44110fe | 2006-03-24 03:16:04 -0800 | [diff] [blame] | 750 | if (cpuset_do_page_mem_spread()) { | 
| Mel Gorman | cc9a6c8 | 2012-03-21 16:34:11 -0700 | [diff] [blame] | 751 | unsigned int cpuset_mems_cookie; | 
|  | 752 | do { | 
| Mel Gorman | d26914d | 2014-04-03 14:47:24 -0700 | [diff] [blame] | 753 | cpuset_mems_cookie = read_mems_allowed_begin(); | 
| Mel Gorman | cc9a6c8 | 2012-03-21 16:34:11 -0700 | [diff] [blame] | 754 | n = cpuset_mem_spread_node(); | 
| Vlastimil Babka | 96db800 | 2015-09-08 15:03:50 -0700 | [diff] [blame] | 755 | page = __alloc_pages_node(n, gfp, 0); | 
| Mel Gorman | d26914d | 2014-04-03 14:47:24 -0700 | [diff] [blame] | 756 | } while (!page && read_mems_allowed_retry(cpuset_mems_cookie)); | 
| Mel Gorman | cc9a6c8 | 2012-03-21 16:34:11 -0700 | [diff] [blame] | 757 |  | 
| Miao Xie | c0ff745 | 2010-05-24 14:32:08 -0700 | [diff] [blame] | 758 | return page; | 
| Paul Jackson | 44110fe | 2006-03-24 03:16:04 -0800 | [diff] [blame] | 759 | } | 
| Nick Piggin | 2ae8814 | 2006-10-28 10:38:23 -0700 | [diff] [blame] | 760 | return alloc_pages(gfp, 0); | 
| Paul Jackson | 44110fe | 2006-03-24 03:16:04 -0800 | [diff] [blame] | 761 | } | 
| Nick Piggin | 2ae8814 | 2006-10-28 10:38:23 -0700 | [diff] [blame] | 762 | EXPORT_SYMBOL(__page_cache_alloc); | 
| Paul Jackson | 44110fe | 2006-03-24 03:16:04 -0800 | [diff] [blame] | 763 | #endif | 
|  | 764 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 765 | /* | 
|  | 766 | * In order to wait for pages to become available there must be | 
|  | 767 | * waitqueues associated with pages. By using a hash table of | 
|  | 768 | * waitqueues where the bucket discipline is to maintain all | 
|  | 769 | * waiters on the same queue and wake all when any of the pages | 
|  | 770 | * become available, and for the woken contexts to check to be | 
|  | 771 | * sure the appropriate page became available, this saves space | 
|  | 772 | * at a cost of "thundering herd" phenomena during rare hash | 
|  | 773 | * collisions. | 
|  | 774 | */ | 
| Nicholas Piggin | 6290602 | 2016-12-25 13:00:30 +1000 | [diff] [blame] | 775 | #define PAGE_WAIT_TABLE_BITS 8 | 
|  | 776 | #define PAGE_WAIT_TABLE_SIZE (1 << PAGE_WAIT_TABLE_BITS) | 
|  | 777 | static wait_queue_head_t page_wait_table[PAGE_WAIT_TABLE_SIZE] __cacheline_aligned; | 
|  | 778 |  | 
|  | 779 | static wait_queue_head_t *page_waitqueue(struct page *page) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 780 | { | 
| Nicholas Piggin | 6290602 | 2016-12-25 13:00:30 +1000 | [diff] [blame] | 781 | return &page_wait_table[hash_ptr(page, PAGE_WAIT_TABLE_BITS)]; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 782 | } | 
| Nicholas Piggin | 6290602 | 2016-12-25 13:00:30 +1000 | [diff] [blame] | 783 |  | 
|  | 784 | void __init pagecache_init(void) | 
|  | 785 | { | 
|  | 786 | int i; | 
|  | 787 |  | 
|  | 788 | for (i = 0; i < PAGE_WAIT_TABLE_SIZE; i++) | 
|  | 789 | init_waitqueue_head(&page_wait_table[i]); | 
|  | 790 |  | 
|  | 791 | page_writeback_init(); | 
|  | 792 | } | 
|  | 793 |  | 
|  | 794 | struct wait_page_key { | 
|  | 795 | struct page *page; | 
|  | 796 | int bit_nr; | 
|  | 797 | int page_match; | 
|  | 798 | }; | 
|  | 799 |  | 
|  | 800 | struct wait_page_queue { | 
|  | 801 | struct page *page; | 
|  | 802 | int bit_nr; | 
| Ingo Molnar | ac6424b | 2017-06-20 12:06:13 +0200 | [diff] [blame] | 803 | wait_queue_entry_t wait; | 
| Nicholas Piggin | 6290602 | 2016-12-25 13:00:30 +1000 | [diff] [blame] | 804 | }; | 
|  | 805 |  | 
| Ingo Molnar | ac6424b | 2017-06-20 12:06:13 +0200 | [diff] [blame] | 806 | static int wake_page_function(wait_queue_entry_t *wait, unsigned mode, int sync, void *arg) | 
| Nicholas Piggin | 6290602 | 2016-12-25 13:00:30 +1000 | [diff] [blame] | 807 | { | 
|  | 808 | struct wait_page_key *key = arg; | 
|  | 809 | struct wait_page_queue *wait_page | 
|  | 810 | = container_of(wait, struct wait_page_queue, wait); | 
|  | 811 |  | 
|  | 812 | if (wait_page->page != key->page) | 
|  | 813 | return 0; | 
|  | 814 | key->page_match = 1; | 
|  | 815 |  | 
|  | 816 | if (wait_page->bit_nr != key->bit_nr) | 
|  | 817 | return 0; | 
|  | 818 | if (test_bit(key->bit_nr, &key->page->flags)) | 
|  | 819 | return 0; | 
|  | 820 |  | 
|  | 821 | return autoremove_wake_function(wait, mode, sync, key); | 
|  | 822 | } | 
|  | 823 |  | 
| Nicholas Piggin | 74d81bf | 2017-02-22 15:44:41 -0800 | [diff] [blame] | 824 | static void wake_up_page_bit(struct page *page, int bit_nr) | 
| Nicholas Piggin | 6290602 | 2016-12-25 13:00:30 +1000 | [diff] [blame] | 825 | { | 
|  | 826 | wait_queue_head_t *q = page_waitqueue(page); | 
|  | 827 | struct wait_page_key key; | 
|  | 828 | unsigned long flags; | 
|  | 829 |  | 
|  | 830 | key.page = page; | 
|  | 831 | key.bit_nr = bit_nr; | 
|  | 832 | key.page_match = 0; | 
|  | 833 |  | 
|  | 834 | spin_lock_irqsave(&q->lock, flags); | 
|  | 835 | __wake_up_locked_key(q, TASK_NORMAL, &key); | 
|  | 836 | /* | 
|  | 837 | * It is possible for other pages to have collided on the waitqueue | 
|  | 838 | * hash, so in that case check for a page match. That prevents a long- | 
|  | 839 | * term waiter | 
|  | 840 | * | 
|  | 841 | * It is still possible to miss a case here, when we woke page waiters | 
|  | 842 | * and removed them from the waitqueue, but there are still other | 
|  | 843 | * page waiters. | 
|  | 844 | */ | 
|  | 845 | if (!waitqueue_active(q) || !key.page_match) { | 
|  | 846 | ClearPageWaiters(page); | 
|  | 847 | /* | 
|  | 848 | * It's possible to miss clearing Waiters here, when we woke | 
|  | 849 | * our page waiters, but the hashed waitqueue has waiters for | 
|  | 850 | * other pages on it. | 
|  | 851 | * | 
|  | 852 | * That's okay, it's a rare case. The next waker will clear it. | 
|  | 853 | */ | 
|  | 854 | } | 
|  | 855 | spin_unlock_irqrestore(&q->lock, flags); | 
|  | 856 | } | 
| Nicholas Piggin | 74d81bf | 2017-02-22 15:44:41 -0800 | [diff] [blame] | 857 |  | 
|  | 858 | static void wake_up_page(struct page *page, int bit) | 
|  | 859 | { | 
|  | 860 | if (!PageWaiters(page)) | 
|  | 861 | return; | 
|  | 862 | wake_up_page_bit(page, bit); | 
|  | 863 | } | 
| Nicholas Piggin | 6290602 | 2016-12-25 13:00:30 +1000 | [diff] [blame] | 864 |  | 
|  | 865 | static inline int wait_on_page_bit_common(wait_queue_head_t *q, | 
|  | 866 | struct page *page, int bit_nr, int state, bool lock) | 
|  | 867 | { | 
|  | 868 | struct wait_page_queue wait_page; | 
| Ingo Molnar | ac6424b | 2017-06-20 12:06:13 +0200 | [diff] [blame] | 869 | wait_queue_entry_t *wait = &wait_page.wait; | 
| Nicholas Piggin | 6290602 | 2016-12-25 13:00:30 +1000 | [diff] [blame] | 870 | int ret = 0; | 
|  | 871 |  | 
|  | 872 | init_wait(wait); | 
|  | 873 | wait->func = wake_page_function; | 
|  | 874 | wait_page.page = page; | 
|  | 875 | wait_page.bit_nr = bit_nr; | 
|  | 876 |  | 
|  | 877 | for (;;) { | 
|  | 878 | spin_lock_irq(&q->lock); | 
|  | 879 |  | 
| Ingo Molnar | 2055da9 | 2017-06-20 12:06:46 +0200 | [diff] [blame] | 880 | if (likely(list_empty(&wait->entry))) { | 
| Nicholas Piggin | 6290602 | 2016-12-25 13:00:30 +1000 | [diff] [blame] | 881 | if (lock) | 
| Ingo Molnar | ac6424b | 2017-06-20 12:06:13 +0200 | [diff] [blame] | 882 | __add_wait_queue_entry_tail_exclusive(q, wait); | 
| Nicholas Piggin | 6290602 | 2016-12-25 13:00:30 +1000 | [diff] [blame] | 883 | else | 
|  | 884 | __add_wait_queue(q, wait); | 
|  | 885 | SetPageWaiters(page); | 
|  | 886 | } | 
|  | 887 |  | 
|  | 888 | set_current_state(state); | 
|  | 889 |  | 
|  | 890 | spin_unlock_irq(&q->lock); | 
|  | 891 |  | 
|  | 892 | if (likely(test_bit(bit_nr, &page->flags))) { | 
|  | 893 | io_schedule(); | 
|  | 894 | if (unlikely(signal_pending_state(state, current))) { | 
|  | 895 | ret = -EINTR; | 
|  | 896 | break; | 
|  | 897 | } | 
|  | 898 | } | 
|  | 899 |  | 
|  | 900 | if (lock) { | 
|  | 901 | if (!test_and_set_bit_lock(bit_nr, &page->flags)) | 
|  | 902 | break; | 
|  | 903 | } else { | 
|  | 904 | if (!test_bit(bit_nr, &page->flags)) | 
|  | 905 | break; | 
|  | 906 | } | 
|  | 907 | } | 
|  | 908 |  | 
|  | 909 | finish_wait(q, wait); | 
|  | 910 |  | 
|  | 911 | /* | 
|  | 912 | * A signal could leave PageWaiters set. Clearing it here if | 
|  | 913 | * !waitqueue_active would be possible (by open-coding finish_wait), | 
|  | 914 | * but still fail to catch it in the case of wait hash collision. We | 
|  | 915 | * already can fail to clear wait hash collision cases, so don't | 
|  | 916 | * bother with signals either. | 
|  | 917 | */ | 
|  | 918 |  | 
|  | 919 | return ret; | 
|  | 920 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 921 |  | 
| Harvey Harrison | 920c7a5 | 2008-02-04 22:29:26 -0800 | [diff] [blame] | 922 | void wait_on_page_bit(struct page *page, int bit_nr) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 923 | { | 
| Nicholas Piggin | 6290602 | 2016-12-25 13:00:30 +1000 | [diff] [blame] | 924 | wait_queue_head_t *q = page_waitqueue(page); | 
|  | 925 | wait_on_page_bit_common(q, page, bit_nr, TASK_UNINTERRUPTIBLE, false); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 926 | } | 
|  | 927 | EXPORT_SYMBOL(wait_on_page_bit); | 
|  | 928 |  | 
| KOSAKI Motohiro | f62e00c | 2011-05-24 17:11:29 -0700 | [diff] [blame] | 929 | int wait_on_page_bit_killable(struct page *page, int bit_nr) | 
|  | 930 | { | 
| Nicholas Piggin | 6290602 | 2016-12-25 13:00:30 +1000 | [diff] [blame] | 931 | wait_queue_head_t *q = page_waitqueue(page); | 
|  | 932 | return wait_on_page_bit_common(q, page, bit_nr, TASK_KILLABLE, false); | 
| KOSAKI Motohiro | f62e00c | 2011-05-24 17:11:29 -0700 | [diff] [blame] | 933 | } | 
|  | 934 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 935 | /** | 
| David Howells | 385e1ca5f | 2009-04-03 16:42:39 +0100 | [diff] [blame] | 936 | * add_page_wait_queue - Add an arbitrary waiter to a page's wait queue | 
| Randy Dunlap | 697f619 | 2009-04-13 14:39:54 -0700 | [diff] [blame] | 937 | * @page: Page defining the wait queue of interest | 
|  | 938 | * @waiter: Waiter to add to the queue | 
| David Howells | 385e1ca5f | 2009-04-03 16:42:39 +0100 | [diff] [blame] | 939 | * | 
|  | 940 | * Add an arbitrary @waiter to the wait queue for the nominated @page. | 
|  | 941 | */ | 
| Ingo Molnar | ac6424b | 2017-06-20 12:06:13 +0200 | [diff] [blame] | 942 | void add_page_wait_queue(struct page *page, wait_queue_entry_t *waiter) | 
| David Howells | 385e1ca5f | 2009-04-03 16:42:39 +0100 | [diff] [blame] | 943 | { | 
|  | 944 | wait_queue_head_t *q = page_waitqueue(page); | 
|  | 945 | unsigned long flags; | 
|  | 946 |  | 
|  | 947 | spin_lock_irqsave(&q->lock, flags); | 
|  | 948 | __add_wait_queue(q, waiter); | 
| Nicholas Piggin | 6290602 | 2016-12-25 13:00:30 +1000 | [diff] [blame] | 949 | SetPageWaiters(page); | 
| David Howells | 385e1ca5f | 2009-04-03 16:42:39 +0100 | [diff] [blame] | 950 | spin_unlock_irqrestore(&q->lock, flags); | 
|  | 951 | } | 
|  | 952 | EXPORT_SYMBOL_GPL(add_page_wait_queue); | 
|  | 953 |  | 
| Linus Torvalds | b91e130 | 2016-12-27 11:40:38 -0800 | [diff] [blame] | 954 | #ifndef clear_bit_unlock_is_negative_byte | 
|  | 955 |  | 
|  | 956 | /* | 
|  | 957 | * PG_waiters is the high bit in the same byte as PG_lock. | 
|  | 958 | * | 
|  | 959 | * On x86 (and on many other architectures), we can clear PG_lock and | 
|  | 960 | * test the sign bit at the same time. But if the architecture does | 
|  | 961 | * not support that special operation, we just do this all by hand | 
|  | 962 | * instead. | 
|  | 963 | * | 
|  | 964 | * The read of PG_waiters has to be after (or concurrently with) PG_locked | 
|  | 965 | * being cleared, but a memory barrier should be unneccssary since it is | 
|  | 966 | * in the same byte as PG_locked. | 
|  | 967 | */ | 
|  | 968 | static inline bool clear_bit_unlock_is_negative_byte(long nr, volatile void *mem) | 
|  | 969 | { | 
|  | 970 | clear_bit_unlock(nr, mem); | 
|  | 971 | /* smp_mb__after_atomic(); */ | 
| Olof Johansson | 98473f9 | 2016-12-29 14:16:07 -0800 | [diff] [blame] | 972 | return test_bit(PG_waiters, mem); | 
| Linus Torvalds | b91e130 | 2016-12-27 11:40:38 -0800 | [diff] [blame] | 973 | } | 
|  | 974 |  | 
|  | 975 | #endif | 
|  | 976 |  | 
| David Howells | 385e1ca5f | 2009-04-03 16:42:39 +0100 | [diff] [blame] | 977 | /** | 
| Randy Dunlap | 485bb99 | 2006-06-23 02:03:49 -0700 | [diff] [blame] | 978 | * unlock_page - unlock a locked page | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 979 | * @page: the page | 
|  | 980 | * | 
|  | 981 | * Unlocks the page and wakes up sleepers in ___wait_on_page_locked(). | 
|  | 982 | * Also wakes sleepers in wait_on_page_writeback() because the wakeup | 
| Masanari Iida | da3dae5 | 2014-09-09 01:27:23 +0900 | [diff] [blame] | 983 | * mechanism between PageLocked pages and PageWriteback pages is shared. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 984 | * But that's OK - sleepers in wait_on_page_writeback() just go back to sleep. | 
|  | 985 | * | 
| Linus Torvalds | b91e130 | 2016-12-27 11:40:38 -0800 | [diff] [blame] | 986 | * Note that this depends on PG_waiters being the sign bit in the byte | 
|  | 987 | * that contains PG_locked - thus the BUILD_BUG_ON(). That allows us to | 
|  | 988 | * clear the PG_locked bit and test PG_waiters at the same time fairly | 
|  | 989 | * portably (architectures that do LL/SC can test any bit, while x86 can | 
|  | 990 | * test the sign bit). | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 991 | */ | 
| Harvey Harrison | 920c7a5 | 2008-02-04 22:29:26 -0800 | [diff] [blame] | 992 | void unlock_page(struct page *page) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 993 | { | 
| Linus Torvalds | b91e130 | 2016-12-27 11:40:38 -0800 | [diff] [blame] | 994 | BUILD_BUG_ON(PG_waiters != 7); | 
| Kirill A. Shutemov | 48c935a | 2016-01-15 16:51:24 -0800 | [diff] [blame] | 995 | page = compound_head(page); | 
| Sasha Levin | 309381fea | 2014-01-23 15:52:54 -0800 | [diff] [blame] | 996 | VM_BUG_ON_PAGE(!PageLocked(page), page); | 
| Linus Torvalds | b91e130 | 2016-12-27 11:40:38 -0800 | [diff] [blame] | 997 | if (clear_bit_unlock_is_negative_byte(PG_locked, &page->flags)) | 
|  | 998 | wake_up_page_bit(page, PG_locked); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 999 | } | 
|  | 1000 | EXPORT_SYMBOL(unlock_page); | 
|  | 1001 |  | 
| Randy Dunlap | 485bb99 | 2006-06-23 02:03:49 -0700 | [diff] [blame] | 1002 | /** | 
|  | 1003 | * end_page_writeback - end writeback against a page | 
|  | 1004 | * @page: the page | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1005 | */ | 
|  | 1006 | void end_page_writeback(struct page *page) | 
|  | 1007 | { | 
| Mel Gorman | 888cf2d | 2014-06-04 16:10:34 -0700 | [diff] [blame] | 1008 | /* | 
|  | 1009 | * TestClearPageReclaim could be used here but it is an atomic | 
|  | 1010 | * operation and overkill in this particular case. Failing to | 
|  | 1011 | * shuffle a page marked for immediate reclaim is too mild to | 
|  | 1012 | * justify taking an atomic operation penalty at the end of | 
|  | 1013 | * ever page writeback. | 
|  | 1014 | */ | 
|  | 1015 | if (PageReclaim(page)) { | 
|  | 1016 | ClearPageReclaim(page); | 
| Miklos Szeredi | ac6aadb | 2008-04-28 02:12:38 -0700 | [diff] [blame] | 1017 | rotate_reclaimable_page(page); | 
| Mel Gorman | 888cf2d | 2014-06-04 16:10:34 -0700 | [diff] [blame] | 1018 | } | 
| Miklos Szeredi | ac6aadb | 2008-04-28 02:12:38 -0700 | [diff] [blame] | 1019 |  | 
|  | 1020 | if (!test_clear_page_writeback(page)) | 
|  | 1021 | BUG(); | 
|  | 1022 |  | 
| Peter Zijlstra | 4e857c5 | 2014-03-17 18:06:10 +0100 | [diff] [blame] | 1023 | smp_mb__after_atomic(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1024 | wake_up_page(page, PG_writeback); | 
|  | 1025 | } | 
|  | 1026 | EXPORT_SYMBOL(end_page_writeback); | 
|  | 1027 |  | 
| Matthew Wilcox | 57d9984 | 2014-06-04 16:07:45 -0700 | [diff] [blame] | 1028 | /* | 
|  | 1029 | * After completing I/O on a page, call this routine to update the page | 
|  | 1030 | * flags appropriately | 
|  | 1031 | */ | 
| Jens Axboe | c11f0c0 | 2016-08-05 08:11:04 -0600 | [diff] [blame] | 1032 | void page_endio(struct page *page, bool is_write, int err) | 
| Matthew Wilcox | 57d9984 | 2014-06-04 16:07:45 -0700 | [diff] [blame] | 1033 | { | 
| Jens Axboe | c11f0c0 | 2016-08-05 08:11:04 -0600 | [diff] [blame] | 1034 | if (!is_write) { | 
| Matthew Wilcox | 57d9984 | 2014-06-04 16:07:45 -0700 | [diff] [blame] | 1035 | if (!err) { | 
|  | 1036 | SetPageUptodate(page); | 
|  | 1037 | } else { | 
|  | 1038 | ClearPageUptodate(page); | 
|  | 1039 | SetPageError(page); | 
|  | 1040 | } | 
|  | 1041 | unlock_page(page); | 
| Mike Christie | abf5454 | 2016-08-04 14:23:34 -0600 | [diff] [blame] | 1042 | } else { | 
| Matthew Wilcox | 57d9984 | 2014-06-04 16:07:45 -0700 | [diff] [blame] | 1043 | if (err) { | 
| Minchan Kim | dd8416c | 2017-02-24 14:59:59 -0800 | [diff] [blame] | 1044 | struct address_space *mapping; | 
|  | 1045 |  | 
| Matthew Wilcox | 57d9984 | 2014-06-04 16:07:45 -0700 | [diff] [blame] | 1046 | SetPageError(page); | 
| Minchan Kim | dd8416c | 2017-02-24 14:59:59 -0800 | [diff] [blame] | 1047 | mapping = page_mapping(page); | 
|  | 1048 | if (mapping) | 
|  | 1049 | mapping_set_error(mapping, err); | 
| Matthew Wilcox | 57d9984 | 2014-06-04 16:07:45 -0700 | [diff] [blame] | 1050 | } | 
|  | 1051 | end_page_writeback(page); | 
|  | 1052 | } | 
|  | 1053 | } | 
|  | 1054 | EXPORT_SYMBOL_GPL(page_endio); | 
|  | 1055 |  | 
| Randy Dunlap | 485bb99 | 2006-06-23 02:03:49 -0700 | [diff] [blame] | 1056 | /** | 
|  | 1057 | * __lock_page - get a lock on the page, assuming we need to sleep to get it | 
| Randy Dunlap | 8706675 | 2017-02-22 15:44:44 -0800 | [diff] [blame] | 1058 | * @__page: the page to lock | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1059 | */ | 
| Nicholas Piggin | 6290602 | 2016-12-25 13:00:30 +1000 | [diff] [blame] | 1060 | void __lock_page(struct page *__page) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1061 | { | 
| Nicholas Piggin | 6290602 | 2016-12-25 13:00:30 +1000 | [diff] [blame] | 1062 | struct page *page = compound_head(__page); | 
|  | 1063 | wait_queue_head_t *q = page_waitqueue(page); | 
|  | 1064 | wait_on_page_bit_common(q, page, PG_locked, TASK_UNINTERRUPTIBLE, true); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1065 | } | 
|  | 1066 | EXPORT_SYMBOL(__lock_page); | 
|  | 1067 |  | 
| Nicholas Piggin | 6290602 | 2016-12-25 13:00:30 +1000 | [diff] [blame] | 1068 | int __lock_page_killable(struct page *__page) | 
| Matthew Wilcox | 2687a35 | 2007-12-06 11:18:49 -0500 | [diff] [blame] | 1069 | { | 
| Nicholas Piggin | 6290602 | 2016-12-25 13:00:30 +1000 | [diff] [blame] | 1070 | struct page *page = compound_head(__page); | 
|  | 1071 | wait_queue_head_t *q = page_waitqueue(page); | 
|  | 1072 | return wait_on_page_bit_common(q, page, PG_locked, TASK_KILLABLE, true); | 
| Matthew Wilcox | 2687a35 | 2007-12-06 11:18:49 -0500 | [diff] [blame] | 1073 | } | 
| Evgeniy Polyakov | 18bc0bb | 2009-02-09 17:02:42 +0300 | [diff] [blame] | 1074 | EXPORT_SYMBOL_GPL(__lock_page_killable); | 
| Matthew Wilcox | 2687a35 | 2007-12-06 11:18:49 -0500 | [diff] [blame] | 1075 |  | 
| Paul Cassella | 9a95f3c | 2014-08-06 16:07:24 -0700 | [diff] [blame] | 1076 | /* | 
|  | 1077 | * Return values: | 
|  | 1078 | * 1 - page is locked; mmap_sem is still held. | 
|  | 1079 | * 0 - page is not locked. | 
|  | 1080 | *     mmap_sem has been released (up_read()), unless flags had both | 
|  | 1081 | *     FAULT_FLAG_ALLOW_RETRY and FAULT_FLAG_RETRY_NOWAIT set, in | 
|  | 1082 | *     which case mmap_sem is still held. | 
|  | 1083 | * | 
|  | 1084 | * If neither ALLOW_RETRY nor KILLABLE are set, will always return 1 | 
|  | 1085 | * with the page locked and the mmap_sem unperturbed. | 
|  | 1086 | */ | 
| Michel Lespinasse | d065bd8 | 2010-10-26 14:21:57 -0700 | [diff] [blame] | 1087 | int __lock_page_or_retry(struct page *page, struct mm_struct *mm, | 
|  | 1088 | unsigned int flags) | 
|  | 1089 | { | 
| KOSAKI Motohiro | 37b23e0 | 2011-05-24 17:11:30 -0700 | [diff] [blame] | 1090 | if (flags & FAULT_FLAG_ALLOW_RETRY) { | 
|  | 1091 | /* | 
|  | 1092 | * CAUTION! In this case, mmap_sem is not released | 
|  | 1093 | * even though return 0. | 
|  | 1094 | */ | 
|  | 1095 | if (flags & FAULT_FLAG_RETRY_NOWAIT) | 
|  | 1096 | return 0; | 
|  | 1097 |  | 
|  | 1098 | up_read(&mm->mmap_sem); | 
|  | 1099 | if (flags & FAULT_FLAG_KILLABLE) | 
|  | 1100 | wait_on_page_locked_killable(page); | 
|  | 1101 | else | 
| Gleb Natapov | 318b275 | 2011-03-22 16:30:51 -0700 | [diff] [blame] | 1102 | wait_on_page_locked(page); | 
| Michel Lespinasse | d065bd8 | 2010-10-26 14:21:57 -0700 | [diff] [blame] | 1103 | return 0; | 
| KOSAKI Motohiro | 37b23e0 | 2011-05-24 17:11:30 -0700 | [diff] [blame] | 1104 | } else { | 
|  | 1105 | if (flags & FAULT_FLAG_KILLABLE) { | 
|  | 1106 | int ret; | 
|  | 1107 |  | 
|  | 1108 | ret = __lock_page_killable(page); | 
|  | 1109 | if (ret) { | 
|  | 1110 | up_read(&mm->mmap_sem); | 
|  | 1111 | return 0; | 
|  | 1112 | } | 
|  | 1113 | } else | 
|  | 1114 | __lock_page(page); | 
|  | 1115 | return 1; | 
| Michel Lespinasse | d065bd8 | 2010-10-26 14:21:57 -0700 | [diff] [blame] | 1116 | } | 
|  | 1117 | } | 
|  | 1118 |  | 
| Randy Dunlap | 485bb99 | 2006-06-23 02:03:49 -0700 | [diff] [blame] | 1119 | /** | 
| Johannes Weiner | e7b563b | 2014-04-03 14:47:44 -0700 | [diff] [blame] | 1120 | * page_cache_next_hole - find the next hole (not-present entry) | 
|  | 1121 | * @mapping: mapping | 
|  | 1122 | * @index: index | 
|  | 1123 | * @max_scan: maximum range to search | 
|  | 1124 | * | 
|  | 1125 | * Search the set [index, min(index+max_scan-1, MAX_INDEX)] for the | 
|  | 1126 | * lowest indexed hole. | 
|  | 1127 | * | 
|  | 1128 | * Returns: the index of the hole if found, otherwise returns an index | 
|  | 1129 | * outside of the set specified (in which case 'return - index >= | 
|  | 1130 | * max_scan' will be true). In rare cases of index wrap-around, 0 will | 
|  | 1131 | * be returned. | 
|  | 1132 | * | 
|  | 1133 | * page_cache_next_hole may be called under rcu_read_lock. However, | 
|  | 1134 | * like radix_tree_gang_lookup, this will not atomically search a | 
|  | 1135 | * snapshot of the tree at a single point in time. For example, if a | 
|  | 1136 | * hole is created at index 5, then subsequently a hole is created at | 
|  | 1137 | * index 10, page_cache_next_hole covering both indexes may return 10 | 
|  | 1138 | * if called under rcu_read_lock. | 
|  | 1139 | */ | 
|  | 1140 | pgoff_t page_cache_next_hole(struct address_space *mapping, | 
|  | 1141 | pgoff_t index, unsigned long max_scan) | 
|  | 1142 | { | 
|  | 1143 | unsigned long i; | 
|  | 1144 |  | 
|  | 1145 | for (i = 0; i < max_scan; i++) { | 
| Johannes Weiner | 0cd6144 | 2014-04-03 14:47:46 -0700 | [diff] [blame] | 1146 | struct page *page; | 
|  | 1147 |  | 
|  | 1148 | page = radix_tree_lookup(&mapping->page_tree, index); | 
|  | 1149 | if (!page || radix_tree_exceptional_entry(page)) | 
| Johannes Weiner | e7b563b | 2014-04-03 14:47:44 -0700 | [diff] [blame] | 1150 | break; | 
|  | 1151 | index++; | 
|  | 1152 | if (index == 0) | 
|  | 1153 | break; | 
|  | 1154 | } | 
|  | 1155 |  | 
|  | 1156 | return index; | 
|  | 1157 | } | 
|  | 1158 | EXPORT_SYMBOL(page_cache_next_hole); | 
|  | 1159 |  | 
|  | 1160 | /** | 
|  | 1161 | * page_cache_prev_hole - find the prev hole (not-present entry) | 
|  | 1162 | * @mapping: mapping | 
|  | 1163 | * @index: index | 
|  | 1164 | * @max_scan: maximum range to search | 
|  | 1165 | * | 
|  | 1166 | * Search backwards in the range [max(index-max_scan+1, 0), index] for | 
|  | 1167 | * the first hole. | 
|  | 1168 | * | 
|  | 1169 | * Returns: the index of the hole if found, otherwise returns an index | 
|  | 1170 | * outside of the set specified (in which case 'index - return >= | 
|  | 1171 | * max_scan' will be true). In rare cases of wrap-around, ULONG_MAX | 
|  | 1172 | * will be returned. | 
|  | 1173 | * | 
|  | 1174 | * page_cache_prev_hole may be called under rcu_read_lock. However, | 
|  | 1175 | * like radix_tree_gang_lookup, this will not atomically search a | 
|  | 1176 | * snapshot of the tree at a single point in time. For example, if a | 
|  | 1177 | * hole is created at index 10, then subsequently a hole is created at | 
|  | 1178 | * index 5, page_cache_prev_hole covering both indexes may return 5 if | 
|  | 1179 | * called under rcu_read_lock. | 
|  | 1180 | */ | 
|  | 1181 | pgoff_t page_cache_prev_hole(struct address_space *mapping, | 
|  | 1182 | pgoff_t index, unsigned long max_scan) | 
|  | 1183 | { | 
|  | 1184 | unsigned long i; | 
|  | 1185 |  | 
|  | 1186 | for (i = 0; i < max_scan; i++) { | 
| Johannes Weiner | 0cd6144 | 2014-04-03 14:47:46 -0700 | [diff] [blame] | 1187 | struct page *page; | 
|  | 1188 |  | 
|  | 1189 | page = radix_tree_lookup(&mapping->page_tree, index); | 
|  | 1190 | if (!page || radix_tree_exceptional_entry(page)) | 
| Johannes Weiner | e7b563b | 2014-04-03 14:47:44 -0700 | [diff] [blame] | 1191 | break; | 
|  | 1192 | index--; | 
|  | 1193 | if (index == ULONG_MAX) | 
|  | 1194 | break; | 
|  | 1195 | } | 
|  | 1196 |  | 
|  | 1197 | return index; | 
|  | 1198 | } | 
|  | 1199 | EXPORT_SYMBOL(page_cache_prev_hole); | 
|  | 1200 |  | 
|  | 1201 | /** | 
| Johannes Weiner | 0cd6144 | 2014-04-03 14:47:46 -0700 | [diff] [blame] | 1202 | * find_get_entry - find and get a page cache entry | 
| Randy Dunlap | 485bb99 | 2006-06-23 02:03:49 -0700 | [diff] [blame] | 1203 | * @mapping: the address_space to search | 
| Johannes Weiner | 0cd6144 | 2014-04-03 14:47:46 -0700 | [diff] [blame] | 1204 | * @offset: the page cache index | 
| Randy Dunlap | 485bb99 | 2006-06-23 02:03:49 -0700 | [diff] [blame] | 1205 | * | 
| Johannes Weiner | 0cd6144 | 2014-04-03 14:47:46 -0700 | [diff] [blame] | 1206 | * Looks up the page cache slot at @mapping & @offset.  If there is a | 
|  | 1207 | * page cache page, it is returned with an increased refcount. | 
|  | 1208 | * | 
| Johannes Weiner | 139b6a6 | 2014-05-06 12:50:05 -0700 | [diff] [blame] | 1209 | * If the slot holds a shadow entry of a previously evicted page, or a | 
|  | 1210 | * swap entry from shmem/tmpfs, it is returned. | 
| Johannes Weiner | 0cd6144 | 2014-04-03 14:47:46 -0700 | [diff] [blame] | 1211 | * | 
|  | 1212 | * Otherwise, %NULL is returned. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1213 | */ | 
| Johannes Weiner | 0cd6144 | 2014-04-03 14:47:46 -0700 | [diff] [blame] | 1214 | struct page *find_get_entry(struct address_space *mapping, pgoff_t offset) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1215 | { | 
| Nick Piggin | a60637c | 2008-07-25 19:45:31 -0700 | [diff] [blame] | 1216 | void **pagep; | 
| Kirill A. Shutemov | 8392937 | 2016-07-26 15:26:04 -0700 | [diff] [blame] | 1217 | struct page *head, *page; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1218 |  | 
| Nick Piggin | a60637c | 2008-07-25 19:45:31 -0700 | [diff] [blame] | 1219 | rcu_read_lock(); | 
|  | 1220 | repeat: | 
|  | 1221 | page = NULL; | 
|  | 1222 | pagep = radix_tree_lookup_slot(&mapping->page_tree, offset); | 
|  | 1223 | if (pagep) { | 
|  | 1224 | page = radix_tree_deref_slot(pagep); | 
| Nick Piggin | 27d20fd | 2010-11-11 14:05:19 -0800 | [diff] [blame] | 1225 | if (unlikely(!page)) | 
|  | 1226 | goto out; | 
| Hugh Dickins | a2c16d6 | 2011-08-03 16:21:19 -0700 | [diff] [blame] | 1227 | if (radix_tree_exception(page)) { | 
| Hugh Dickins | 8079b1c | 2011-08-03 16:21:28 -0700 | [diff] [blame] | 1228 | if (radix_tree_deref_retry(page)) | 
|  | 1229 | goto repeat; | 
|  | 1230 | /* | 
| Johannes Weiner | 139b6a6 | 2014-05-06 12:50:05 -0700 | [diff] [blame] | 1231 | * A shadow entry of a recently evicted page, | 
|  | 1232 | * or a swap entry from shmem/tmpfs.  Return | 
|  | 1233 | * it without attempting to raise page count. | 
| Hugh Dickins | 8079b1c | 2011-08-03 16:21:28 -0700 | [diff] [blame] | 1234 | */ | 
|  | 1235 | goto out; | 
| Hugh Dickins | a2c16d6 | 2011-08-03 16:21:19 -0700 | [diff] [blame] | 1236 | } | 
| Kirill A. Shutemov | 8392937 | 2016-07-26 15:26:04 -0700 | [diff] [blame] | 1237 |  | 
|  | 1238 | head = compound_head(page); | 
|  | 1239 | if (!page_cache_get_speculative(head)) | 
| Nick Piggin | a60637c | 2008-07-25 19:45:31 -0700 | [diff] [blame] | 1240 | goto repeat; | 
|  | 1241 |  | 
| Kirill A. Shutemov | 8392937 | 2016-07-26 15:26:04 -0700 | [diff] [blame] | 1242 | /* The page was split under us? */ | 
|  | 1243 | if (compound_head(page) != head) { | 
|  | 1244 | put_page(head); | 
|  | 1245 | goto repeat; | 
|  | 1246 | } | 
|  | 1247 |  | 
| Nick Piggin | a60637c | 2008-07-25 19:45:31 -0700 | [diff] [blame] | 1248 | /* | 
|  | 1249 | * Has the page moved? | 
|  | 1250 | * This is part of the lockless pagecache protocol. See | 
|  | 1251 | * include/linux/pagemap.h for details. | 
|  | 1252 | */ | 
|  | 1253 | if (unlikely(page != *pagep)) { | 
| Kirill A. Shutemov | 8392937 | 2016-07-26 15:26:04 -0700 | [diff] [blame] | 1254 | put_page(head); | 
| Nick Piggin | a60637c | 2008-07-25 19:45:31 -0700 | [diff] [blame] | 1255 | goto repeat; | 
|  | 1256 | } | 
|  | 1257 | } | 
| Nick Piggin | 27d20fd | 2010-11-11 14:05:19 -0800 | [diff] [blame] | 1258 | out: | 
| Nick Piggin | a60637c | 2008-07-25 19:45:31 -0700 | [diff] [blame] | 1259 | rcu_read_unlock(); | 
|  | 1260 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1261 | return page; | 
|  | 1262 | } | 
| Johannes Weiner | 0cd6144 | 2014-04-03 14:47:46 -0700 | [diff] [blame] | 1263 | EXPORT_SYMBOL(find_get_entry); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1264 |  | 
| Randy Dunlap | 485bb99 | 2006-06-23 02:03:49 -0700 | [diff] [blame] | 1265 | /** | 
| Johannes Weiner | 0cd6144 | 2014-04-03 14:47:46 -0700 | [diff] [blame] | 1266 | * find_lock_entry - locate, pin and lock a page cache entry | 
|  | 1267 | * @mapping: the address_space to search | 
|  | 1268 | * @offset: the page cache index | 
|  | 1269 | * | 
|  | 1270 | * Looks up the page cache slot at @mapping & @offset.  If there is a | 
|  | 1271 | * page cache page, it is returned locked and with an increased | 
|  | 1272 | * refcount. | 
|  | 1273 | * | 
| Johannes Weiner | 139b6a6 | 2014-05-06 12:50:05 -0700 | [diff] [blame] | 1274 | * If the slot holds a shadow entry of a previously evicted page, or a | 
|  | 1275 | * swap entry from shmem/tmpfs, it is returned. | 
| Johannes Weiner | 0cd6144 | 2014-04-03 14:47:46 -0700 | [diff] [blame] | 1276 | * | 
|  | 1277 | * Otherwise, %NULL is returned. | 
|  | 1278 | * | 
|  | 1279 | * find_lock_entry() may sleep. | 
|  | 1280 | */ | 
|  | 1281 | struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1282 | { | 
|  | 1283 | struct page *page; | 
|  | 1284 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1285 | repeat: | 
| Johannes Weiner | 0cd6144 | 2014-04-03 14:47:46 -0700 | [diff] [blame] | 1286 | page = find_get_entry(mapping, offset); | 
| Hugh Dickins | a2c16d6 | 2011-08-03 16:21:19 -0700 | [diff] [blame] | 1287 | if (page && !radix_tree_exception(page)) { | 
| Nick Piggin | a60637c | 2008-07-25 19:45:31 -0700 | [diff] [blame] | 1288 | lock_page(page); | 
|  | 1289 | /* Has the page been truncated? */ | 
| Kirill A. Shutemov | 8392937 | 2016-07-26 15:26:04 -0700 | [diff] [blame] | 1290 | if (unlikely(page_mapping(page) != mapping)) { | 
| Nick Piggin | a60637c | 2008-07-25 19:45:31 -0700 | [diff] [blame] | 1291 | unlock_page(page); | 
| Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 1292 | put_page(page); | 
| Nick Piggin | a60637c | 2008-07-25 19:45:31 -0700 | [diff] [blame] | 1293 | goto repeat; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1294 | } | 
| Kirill A. Shutemov | 8392937 | 2016-07-26 15:26:04 -0700 | [diff] [blame] | 1295 | VM_BUG_ON_PAGE(page_to_pgoff(page) != offset, page); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1296 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1297 | return page; | 
|  | 1298 | } | 
| Johannes Weiner | 0cd6144 | 2014-04-03 14:47:46 -0700 | [diff] [blame] | 1299 | EXPORT_SYMBOL(find_lock_entry); | 
|  | 1300 |  | 
|  | 1301 | /** | 
| Mel Gorman | 2457aec | 2014-06-04 16:10:31 -0700 | [diff] [blame] | 1302 | * pagecache_get_page - find and get a page reference | 
| Johannes Weiner | 0cd6144 | 2014-04-03 14:47:46 -0700 | [diff] [blame] | 1303 | * @mapping: the address_space to search | 
|  | 1304 | * @offset: the page index | 
| Mel Gorman | 2457aec | 2014-06-04 16:10:31 -0700 | [diff] [blame] | 1305 | * @fgp_flags: PCG flags | 
| Michal Hocko | 45f87de | 2014-12-29 20:30:35 +0100 | [diff] [blame] | 1306 | * @gfp_mask: gfp mask to use for the page cache data page allocation | 
| Johannes Weiner | 0cd6144 | 2014-04-03 14:47:46 -0700 | [diff] [blame] | 1307 | * | 
| Mel Gorman | 2457aec | 2014-06-04 16:10:31 -0700 | [diff] [blame] | 1308 | * Looks up the page cache slot at @mapping & @offset. | 
| Johannes Weiner | 0cd6144 | 2014-04-03 14:47:46 -0700 | [diff] [blame] | 1309 | * | 
| Randy Dunlap | 7532518 | 2014-07-30 16:08:37 -0700 | [diff] [blame] | 1310 | * PCG flags modify how the page is returned. | 
| Johannes Weiner | 0cd6144 | 2014-04-03 14:47:46 -0700 | [diff] [blame] | 1311 | * | 
| mchehab@s-opensource.com | 0e056eb | 2017-03-30 17:11:36 -0300 | [diff] [blame] | 1312 | * @fgp_flags can be: | 
|  | 1313 | * | 
|  | 1314 | * - FGP_ACCESSED: the page will be marked accessed | 
|  | 1315 | * - FGP_LOCK: Page is return locked | 
|  | 1316 | * - FGP_CREAT: If page is not present then a new page is allocated using | 
|  | 1317 | *   @gfp_mask and added to the page cache and the VM's LRU | 
|  | 1318 | *   list. The page is returned locked and with an increased | 
|  | 1319 | *   refcount. Otherwise, NULL is returned. | 
| Mel Gorman | 2457aec | 2014-06-04 16:10:31 -0700 | [diff] [blame] | 1320 | * | 
|  | 1321 | * If FGP_LOCK or FGP_CREAT are specified then the function may sleep even | 
|  | 1322 | * if the GFP flags specified for FGP_CREAT are atomic. | 
|  | 1323 | * | 
|  | 1324 | * If there is a page cache page, it is returned with an increased refcount. | 
| Johannes Weiner | 0cd6144 | 2014-04-03 14:47:46 -0700 | [diff] [blame] | 1325 | */ | 
| Mel Gorman | 2457aec | 2014-06-04 16:10:31 -0700 | [diff] [blame] | 1326 | struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset, | 
| Michal Hocko | 45f87de | 2014-12-29 20:30:35 +0100 | [diff] [blame] | 1327 | int fgp_flags, gfp_t gfp_mask) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1328 | { | 
| Nick Piggin | eb2be18 | 2007-10-16 01:24:57 -0700 | [diff] [blame] | 1329 | struct page *page; | 
| Mel Gorman | 2457aec | 2014-06-04 16:10:31 -0700 | [diff] [blame] | 1330 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1331 | repeat: | 
| Mel Gorman | 2457aec | 2014-06-04 16:10:31 -0700 | [diff] [blame] | 1332 | page = find_get_entry(mapping, offset); | 
|  | 1333 | if (radix_tree_exceptional_entry(page)) | 
|  | 1334 | page = NULL; | 
|  | 1335 | if (!page) | 
|  | 1336 | goto no_page; | 
|  | 1337 |  | 
|  | 1338 | if (fgp_flags & FGP_LOCK) { | 
|  | 1339 | if (fgp_flags & FGP_NOWAIT) { | 
|  | 1340 | if (!trylock_page(page)) { | 
| Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 1341 | put_page(page); | 
| Mel Gorman | 2457aec | 2014-06-04 16:10:31 -0700 | [diff] [blame] | 1342 | return NULL; | 
|  | 1343 | } | 
|  | 1344 | } else { | 
|  | 1345 | lock_page(page); | 
|  | 1346 | } | 
|  | 1347 |  | 
|  | 1348 | /* Has the page been truncated? */ | 
|  | 1349 | if (unlikely(page->mapping != mapping)) { | 
|  | 1350 | unlock_page(page); | 
| Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 1351 | put_page(page); | 
| Mel Gorman | 2457aec | 2014-06-04 16:10:31 -0700 | [diff] [blame] | 1352 | goto repeat; | 
|  | 1353 | } | 
|  | 1354 | VM_BUG_ON_PAGE(page->index != offset, page); | 
|  | 1355 | } | 
|  | 1356 |  | 
|  | 1357 | if (page && (fgp_flags & FGP_ACCESSED)) | 
|  | 1358 | mark_page_accessed(page); | 
|  | 1359 |  | 
|  | 1360 | no_page: | 
|  | 1361 | if (!page && (fgp_flags & FGP_CREAT)) { | 
|  | 1362 | int err; | 
|  | 1363 | if ((fgp_flags & FGP_WRITE) && mapping_cap_account_dirty(mapping)) | 
| Michal Hocko | 45f87de | 2014-12-29 20:30:35 +0100 | [diff] [blame] | 1364 | gfp_mask |= __GFP_WRITE; | 
|  | 1365 | if (fgp_flags & FGP_NOFS) | 
|  | 1366 | gfp_mask &= ~__GFP_FS; | 
| Mel Gorman | 2457aec | 2014-06-04 16:10:31 -0700 | [diff] [blame] | 1367 |  | 
| Michal Hocko | 45f87de | 2014-12-29 20:30:35 +0100 | [diff] [blame] | 1368 | page = __page_cache_alloc(gfp_mask); | 
| Nick Piggin | eb2be18 | 2007-10-16 01:24:57 -0700 | [diff] [blame] | 1369 | if (!page) | 
|  | 1370 | return NULL; | 
| Mel Gorman | 2457aec | 2014-06-04 16:10:31 -0700 | [diff] [blame] | 1371 |  | 
|  | 1372 | if (WARN_ON_ONCE(!(fgp_flags & FGP_LOCK))) | 
|  | 1373 | fgp_flags |= FGP_LOCK; | 
|  | 1374 |  | 
| Hugh Dickins | eb39d61 | 2014-08-06 16:06:43 -0700 | [diff] [blame] | 1375 | /* Init accessed so avoid atomic mark_page_accessed later */ | 
| Mel Gorman | 2457aec | 2014-06-04 16:10:31 -0700 | [diff] [blame] | 1376 | if (fgp_flags & FGP_ACCESSED) | 
| Hugh Dickins | eb39d61 | 2014-08-06 16:06:43 -0700 | [diff] [blame] | 1377 | __SetPageReferenced(page); | 
| Mel Gorman | 2457aec | 2014-06-04 16:10:31 -0700 | [diff] [blame] | 1378 |  | 
| Michal Hocko | 45f87de | 2014-12-29 20:30:35 +0100 | [diff] [blame] | 1379 | err = add_to_page_cache_lru(page, mapping, offset, | 
|  | 1380 | gfp_mask & GFP_RECLAIM_MASK); | 
| Nick Piggin | eb2be18 | 2007-10-16 01:24:57 -0700 | [diff] [blame] | 1381 | if (unlikely(err)) { | 
| Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 1382 | put_page(page); | 
| Nick Piggin | eb2be18 | 2007-10-16 01:24:57 -0700 | [diff] [blame] | 1383 | page = NULL; | 
|  | 1384 | if (err == -EEXIST) | 
|  | 1385 | goto repeat; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1386 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1387 | } | 
| Mel Gorman | 2457aec | 2014-06-04 16:10:31 -0700 | [diff] [blame] | 1388 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1389 | return page; | 
|  | 1390 | } | 
| Mel Gorman | 2457aec | 2014-06-04 16:10:31 -0700 | [diff] [blame] | 1391 | EXPORT_SYMBOL(pagecache_get_page); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1392 |  | 
|  | 1393 | /** | 
| Johannes Weiner | 0cd6144 | 2014-04-03 14:47:46 -0700 | [diff] [blame] | 1394 | * find_get_entries - gang pagecache lookup | 
|  | 1395 | * @mapping:	The address_space to search | 
|  | 1396 | * @start:	The starting page cache index | 
|  | 1397 | * @nr_entries:	The maximum number of entries | 
|  | 1398 | * @entries:	Where the resulting entries are placed | 
|  | 1399 | * @indices:	The cache indices corresponding to the entries in @entries | 
|  | 1400 | * | 
|  | 1401 | * find_get_entries() will search for and return a group of up to | 
|  | 1402 | * @nr_entries entries in the mapping.  The entries are placed at | 
|  | 1403 | * @entries.  find_get_entries() takes a reference against any actual | 
|  | 1404 | * pages it returns. | 
|  | 1405 | * | 
|  | 1406 | * The search returns a group of mapping-contiguous page cache entries | 
|  | 1407 | * with ascending indexes.  There may be holes in the indices due to | 
|  | 1408 | * not-present pages. | 
|  | 1409 | * | 
| Johannes Weiner | 139b6a6 | 2014-05-06 12:50:05 -0700 | [diff] [blame] | 1410 | * Any shadow entries of evicted pages, or swap entries from | 
|  | 1411 | * shmem/tmpfs, are included in the returned array. | 
| Johannes Weiner | 0cd6144 | 2014-04-03 14:47:46 -0700 | [diff] [blame] | 1412 | * | 
|  | 1413 | * find_get_entries() returns the number of pages and shadow entries | 
|  | 1414 | * which were found. | 
|  | 1415 | */ | 
|  | 1416 | unsigned find_get_entries(struct address_space *mapping, | 
|  | 1417 | pgoff_t start, unsigned int nr_entries, | 
|  | 1418 | struct page **entries, pgoff_t *indices) | 
|  | 1419 | { | 
|  | 1420 | void **slot; | 
|  | 1421 | unsigned int ret = 0; | 
|  | 1422 | struct radix_tree_iter iter; | 
|  | 1423 |  | 
|  | 1424 | if (!nr_entries) | 
|  | 1425 | return 0; | 
|  | 1426 |  | 
|  | 1427 | rcu_read_lock(); | 
| Johannes Weiner | 0cd6144 | 2014-04-03 14:47:46 -0700 | [diff] [blame] | 1428 | radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) { | 
| Kirill A. Shutemov | 8392937 | 2016-07-26 15:26:04 -0700 | [diff] [blame] | 1429 | struct page *head, *page; | 
| Johannes Weiner | 0cd6144 | 2014-04-03 14:47:46 -0700 | [diff] [blame] | 1430 | repeat: | 
|  | 1431 | page = radix_tree_deref_slot(slot); | 
|  | 1432 | if (unlikely(!page)) | 
|  | 1433 | continue; | 
|  | 1434 | if (radix_tree_exception(page)) { | 
| Matthew Wilcox | 2cf938a | 2016-03-17 14:22:03 -0700 | [diff] [blame] | 1435 | if (radix_tree_deref_retry(page)) { | 
|  | 1436 | slot = radix_tree_iter_retry(&iter); | 
|  | 1437 | continue; | 
|  | 1438 | } | 
| Johannes Weiner | 0cd6144 | 2014-04-03 14:47:46 -0700 | [diff] [blame] | 1439 | /* | 
| Ross Zwisler | f9fe48b | 2016-01-22 15:10:40 -0800 | [diff] [blame] | 1440 | * A shadow entry of a recently evicted page, a swap | 
|  | 1441 | * entry from shmem/tmpfs or a DAX entry.  Return it | 
|  | 1442 | * without attempting to raise page count. | 
| Johannes Weiner | 0cd6144 | 2014-04-03 14:47:46 -0700 | [diff] [blame] | 1443 | */ | 
|  | 1444 | goto export; | 
|  | 1445 | } | 
| Kirill A. Shutemov | 8392937 | 2016-07-26 15:26:04 -0700 | [diff] [blame] | 1446 |  | 
|  | 1447 | head = compound_head(page); | 
|  | 1448 | if (!page_cache_get_speculative(head)) | 
| Johannes Weiner | 0cd6144 | 2014-04-03 14:47:46 -0700 | [diff] [blame] | 1449 | goto repeat; | 
|  | 1450 |  | 
| Kirill A. Shutemov | 8392937 | 2016-07-26 15:26:04 -0700 | [diff] [blame] | 1451 | /* The page was split under us? */ | 
|  | 1452 | if (compound_head(page) != head) { | 
|  | 1453 | put_page(head); | 
|  | 1454 | goto repeat; | 
|  | 1455 | } | 
|  | 1456 |  | 
| Johannes Weiner | 0cd6144 | 2014-04-03 14:47:46 -0700 | [diff] [blame] | 1457 | /* Has the page moved? */ | 
|  | 1458 | if (unlikely(page != *slot)) { | 
| Kirill A. Shutemov | 8392937 | 2016-07-26 15:26:04 -0700 | [diff] [blame] | 1459 | put_page(head); | 
| Johannes Weiner | 0cd6144 | 2014-04-03 14:47:46 -0700 | [diff] [blame] | 1460 | goto repeat; | 
|  | 1461 | } | 
|  | 1462 | export: | 
|  | 1463 | indices[ret] = iter.index; | 
|  | 1464 | entries[ret] = page; | 
|  | 1465 | if (++ret == nr_entries) | 
|  | 1466 | break; | 
|  | 1467 | } | 
|  | 1468 | rcu_read_unlock(); | 
|  | 1469 | return ret; | 
|  | 1470 | } | 
|  | 1471 |  | 
|  | 1472 | /** | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1473 | * find_get_pages - gang pagecache lookup | 
|  | 1474 | * @mapping:	The address_space to search | 
|  | 1475 | * @start:	The starting page index | 
|  | 1476 | * @nr_pages:	The maximum number of pages | 
|  | 1477 | * @pages:	Where the resulting pages are placed | 
|  | 1478 | * | 
|  | 1479 | * find_get_pages() will search for and return a group of up to | 
|  | 1480 | * @nr_pages pages in the mapping.  The pages are placed at @pages. | 
|  | 1481 | * find_get_pages() takes a reference against the returned pages. | 
|  | 1482 | * | 
|  | 1483 | * The search returns a group of mapping-contiguous pages with ascending | 
|  | 1484 | * indexes.  There may be holes in the indices due to not-present pages. | 
|  | 1485 | * | 
|  | 1486 | * find_get_pages() returns the number of pages which were found. | 
|  | 1487 | */ | 
|  | 1488 | unsigned find_get_pages(struct address_space *mapping, pgoff_t start, | 
|  | 1489 | unsigned int nr_pages, struct page **pages) | 
|  | 1490 | { | 
| Konstantin Khlebnikov | 0fc9d10 | 2012-03-28 14:42:54 -0700 | [diff] [blame] | 1491 | struct radix_tree_iter iter; | 
|  | 1492 | void **slot; | 
|  | 1493 | unsigned ret = 0; | 
|  | 1494 |  | 
|  | 1495 | if (unlikely(!nr_pages)) | 
|  | 1496 | return 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1497 |  | 
| Nick Piggin | a60637c | 2008-07-25 19:45:31 -0700 | [diff] [blame] | 1498 | rcu_read_lock(); | 
| Konstantin Khlebnikov | 0fc9d10 | 2012-03-28 14:42:54 -0700 | [diff] [blame] | 1499 | radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) { | 
| Kirill A. Shutemov | 8392937 | 2016-07-26 15:26:04 -0700 | [diff] [blame] | 1500 | struct page *head, *page; | 
| Nick Piggin | a60637c | 2008-07-25 19:45:31 -0700 | [diff] [blame] | 1501 | repeat: | 
| Konstantin Khlebnikov | 0fc9d10 | 2012-03-28 14:42:54 -0700 | [diff] [blame] | 1502 | page = radix_tree_deref_slot(slot); | 
| Nick Piggin | a60637c | 2008-07-25 19:45:31 -0700 | [diff] [blame] | 1503 | if (unlikely(!page)) | 
|  | 1504 | continue; | 
| Hugh Dickins | 9d8aa4e | 2011-03-22 16:33:06 -0700 | [diff] [blame] | 1505 |  | 
| Hugh Dickins | a2c16d6 | 2011-08-03 16:21:19 -0700 | [diff] [blame] | 1506 | if (radix_tree_exception(page)) { | 
| Hugh Dickins | 8079b1c | 2011-08-03 16:21:28 -0700 | [diff] [blame] | 1507 | if (radix_tree_deref_retry(page)) { | 
| Matthew Wilcox | 2cf938a | 2016-03-17 14:22:03 -0700 | [diff] [blame] | 1508 | slot = radix_tree_iter_retry(&iter); | 
|  | 1509 | continue; | 
| Hugh Dickins | 8079b1c | 2011-08-03 16:21:28 -0700 | [diff] [blame] | 1510 | } | 
| Hugh Dickins | a2c16d6 | 2011-08-03 16:21:19 -0700 | [diff] [blame] | 1511 | /* | 
| Johannes Weiner | 139b6a6 | 2014-05-06 12:50:05 -0700 | [diff] [blame] | 1512 | * A shadow entry of a recently evicted page, | 
|  | 1513 | * or a swap entry from shmem/tmpfs.  Skip | 
|  | 1514 | * over it. | 
| Hugh Dickins | a2c16d6 | 2011-08-03 16:21:19 -0700 | [diff] [blame] | 1515 | */ | 
| Hugh Dickins | 8079b1c | 2011-08-03 16:21:28 -0700 | [diff] [blame] | 1516 | continue; | 
| Nick Piggin | 27d20fd | 2010-11-11 14:05:19 -0800 | [diff] [blame] | 1517 | } | 
| Nick Piggin | a60637c | 2008-07-25 19:45:31 -0700 | [diff] [blame] | 1518 |  | 
| Kirill A. Shutemov | 8392937 | 2016-07-26 15:26:04 -0700 | [diff] [blame] | 1519 | head = compound_head(page); | 
|  | 1520 | if (!page_cache_get_speculative(head)) | 
| Nick Piggin | a60637c | 2008-07-25 19:45:31 -0700 | [diff] [blame] | 1521 | goto repeat; | 
|  | 1522 |  | 
| Kirill A. Shutemov | 8392937 | 2016-07-26 15:26:04 -0700 | [diff] [blame] | 1523 | /* The page was split under us? */ | 
|  | 1524 | if (compound_head(page) != head) { | 
|  | 1525 | put_page(head); | 
|  | 1526 | goto repeat; | 
|  | 1527 | } | 
|  | 1528 |  | 
| Nick Piggin | a60637c | 2008-07-25 19:45:31 -0700 | [diff] [blame] | 1529 | /* Has the page moved? */ | 
| Konstantin Khlebnikov | 0fc9d10 | 2012-03-28 14:42:54 -0700 | [diff] [blame] | 1530 | if (unlikely(page != *slot)) { | 
| Kirill A. Shutemov | 8392937 | 2016-07-26 15:26:04 -0700 | [diff] [blame] | 1531 | put_page(head); | 
| Nick Piggin | a60637c | 2008-07-25 19:45:31 -0700 | [diff] [blame] | 1532 | goto repeat; | 
|  | 1533 | } | 
|  | 1534 |  | 
|  | 1535 | pages[ret] = page; | 
| Konstantin Khlebnikov | 0fc9d10 | 2012-03-28 14:42:54 -0700 | [diff] [blame] | 1536 | if (++ret == nr_pages) | 
|  | 1537 | break; | 
| Nick Piggin | a60637c | 2008-07-25 19:45:31 -0700 | [diff] [blame] | 1538 | } | 
| Hugh Dickins | 5b280c0 | 2011-03-22 16:33:07 -0700 | [diff] [blame] | 1539 |  | 
| Nick Piggin | a60637c | 2008-07-25 19:45:31 -0700 | [diff] [blame] | 1540 | rcu_read_unlock(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1541 | return ret; | 
|  | 1542 | } | 
|  | 1543 |  | 
| Jens Axboe | ebf4350 | 2006-04-27 08:46:01 +0200 | [diff] [blame] | 1544 | /** | 
|  | 1545 | * find_get_pages_contig - gang contiguous pagecache lookup | 
|  | 1546 | * @mapping:	The address_space to search | 
|  | 1547 | * @index:	The starting page index | 
|  | 1548 | * @nr_pages:	The maximum number of pages | 
|  | 1549 | * @pages:	Where the resulting pages are placed | 
|  | 1550 | * | 
|  | 1551 | * find_get_pages_contig() works exactly like find_get_pages(), except | 
|  | 1552 | * that the returned number of pages are guaranteed to be contiguous. | 
|  | 1553 | * | 
|  | 1554 | * find_get_pages_contig() returns the number of pages which were found. | 
|  | 1555 | */ | 
|  | 1556 | unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t index, | 
|  | 1557 | unsigned int nr_pages, struct page **pages) | 
|  | 1558 | { | 
| Konstantin Khlebnikov | 0fc9d10 | 2012-03-28 14:42:54 -0700 | [diff] [blame] | 1559 | struct radix_tree_iter iter; | 
|  | 1560 | void **slot; | 
|  | 1561 | unsigned int ret = 0; | 
|  | 1562 |  | 
|  | 1563 | if (unlikely(!nr_pages)) | 
|  | 1564 | return 0; | 
| Jens Axboe | ebf4350 | 2006-04-27 08:46:01 +0200 | [diff] [blame] | 1565 |  | 
| Nick Piggin | a60637c | 2008-07-25 19:45:31 -0700 | [diff] [blame] | 1566 | rcu_read_lock(); | 
| Konstantin Khlebnikov | 0fc9d10 | 2012-03-28 14:42:54 -0700 | [diff] [blame] | 1567 | radix_tree_for_each_contig(slot, &mapping->page_tree, &iter, index) { | 
| Kirill A. Shutemov | 8392937 | 2016-07-26 15:26:04 -0700 | [diff] [blame] | 1568 | struct page *head, *page; | 
| Nick Piggin | a60637c | 2008-07-25 19:45:31 -0700 | [diff] [blame] | 1569 | repeat: | 
| Konstantin Khlebnikov | 0fc9d10 | 2012-03-28 14:42:54 -0700 | [diff] [blame] | 1570 | page = radix_tree_deref_slot(slot); | 
|  | 1571 | /* The hole, there no reason to continue */ | 
| Nick Piggin | a60637c | 2008-07-25 19:45:31 -0700 | [diff] [blame] | 1572 | if (unlikely(!page)) | 
| Konstantin Khlebnikov | 0fc9d10 | 2012-03-28 14:42:54 -0700 | [diff] [blame] | 1573 | break; | 
| Hugh Dickins | 9d8aa4e | 2011-03-22 16:33:06 -0700 | [diff] [blame] | 1574 |  | 
| Hugh Dickins | a2c16d6 | 2011-08-03 16:21:19 -0700 | [diff] [blame] | 1575 | if (radix_tree_exception(page)) { | 
| Hugh Dickins | 8079b1c | 2011-08-03 16:21:28 -0700 | [diff] [blame] | 1576 | if (radix_tree_deref_retry(page)) { | 
| Matthew Wilcox | 2cf938a | 2016-03-17 14:22:03 -0700 | [diff] [blame] | 1577 | slot = radix_tree_iter_retry(&iter); | 
|  | 1578 | continue; | 
| Hugh Dickins | 8079b1c | 2011-08-03 16:21:28 -0700 | [diff] [blame] | 1579 | } | 
| Hugh Dickins | a2c16d6 | 2011-08-03 16:21:19 -0700 | [diff] [blame] | 1580 | /* | 
| Johannes Weiner | 139b6a6 | 2014-05-06 12:50:05 -0700 | [diff] [blame] | 1581 | * A shadow entry of a recently evicted page, | 
|  | 1582 | * or a swap entry from shmem/tmpfs.  Stop | 
|  | 1583 | * looking for contiguous pages. | 
| Hugh Dickins | a2c16d6 | 2011-08-03 16:21:19 -0700 | [diff] [blame] | 1584 | */ | 
| Hugh Dickins | 8079b1c | 2011-08-03 16:21:28 -0700 | [diff] [blame] | 1585 | break; | 
| Hugh Dickins | a2c16d6 | 2011-08-03 16:21:19 -0700 | [diff] [blame] | 1586 | } | 
| Nick Piggin | a60637c | 2008-07-25 19:45:31 -0700 | [diff] [blame] | 1587 |  | 
| Kirill A. Shutemov | 8392937 | 2016-07-26 15:26:04 -0700 | [diff] [blame] | 1588 | head = compound_head(page); | 
|  | 1589 | if (!page_cache_get_speculative(head)) | 
| Nick Piggin | a60637c | 2008-07-25 19:45:31 -0700 | [diff] [blame] | 1590 | goto repeat; | 
|  | 1591 |  | 
| Kirill A. Shutemov | 8392937 | 2016-07-26 15:26:04 -0700 | [diff] [blame] | 1592 | /* The page was split under us? */ | 
|  | 1593 | if (compound_head(page) != head) { | 
|  | 1594 | put_page(head); | 
|  | 1595 | goto repeat; | 
|  | 1596 | } | 
|  | 1597 |  | 
| Nick Piggin | a60637c | 2008-07-25 19:45:31 -0700 | [diff] [blame] | 1598 | /* Has the page moved? */ | 
| Konstantin Khlebnikov | 0fc9d10 | 2012-03-28 14:42:54 -0700 | [diff] [blame] | 1599 | if (unlikely(page != *slot)) { | 
| Kirill A. Shutemov | 8392937 | 2016-07-26 15:26:04 -0700 | [diff] [blame] | 1600 | put_page(head); | 
| Nick Piggin | a60637c | 2008-07-25 19:45:31 -0700 | [diff] [blame] | 1601 | goto repeat; | 
|  | 1602 | } | 
|  | 1603 |  | 
| Nick Piggin | 9cbb4cb | 2011-01-13 15:45:51 -0800 | [diff] [blame] | 1604 | /* | 
|  | 1605 | * must check mapping and index after taking the ref. | 
|  | 1606 | * otherwise we can get both false positives and false | 
|  | 1607 | * negatives, which is just confusing to the caller. | 
|  | 1608 | */ | 
| Kirill A. Shutemov | 8392937 | 2016-07-26 15:26:04 -0700 | [diff] [blame] | 1609 | if (page->mapping == NULL || page_to_pgoff(page) != iter.index) { | 
| Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 1610 | put_page(page); | 
| Nick Piggin | 9cbb4cb | 2011-01-13 15:45:51 -0800 | [diff] [blame] | 1611 | break; | 
|  | 1612 | } | 
|  | 1613 |  | 
| Nick Piggin | a60637c | 2008-07-25 19:45:31 -0700 | [diff] [blame] | 1614 | pages[ret] = page; | 
| Konstantin Khlebnikov | 0fc9d10 | 2012-03-28 14:42:54 -0700 | [diff] [blame] | 1615 | if (++ret == nr_pages) | 
|  | 1616 | break; | 
| Jens Axboe | ebf4350 | 2006-04-27 08:46:01 +0200 | [diff] [blame] | 1617 | } | 
| Nick Piggin | a60637c | 2008-07-25 19:45:31 -0700 | [diff] [blame] | 1618 | rcu_read_unlock(); | 
|  | 1619 | return ret; | 
| Jens Axboe | ebf4350 | 2006-04-27 08:46:01 +0200 | [diff] [blame] | 1620 | } | 
| David Howells | ef71c15 | 2007-05-09 02:33:44 -0700 | [diff] [blame] | 1621 | EXPORT_SYMBOL(find_get_pages_contig); | 
| Jens Axboe | ebf4350 | 2006-04-27 08:46:01 +0200 | [diff] [blame] | 1622 |  | 
| Randy Dunlap | 485bb99 | 2006-06-23 02:03:49 -0700 | [diff] [blame] | 1623 | /** | 
|  | 1624 | * find_get_pages_tag - find and return pages that match @tag | 
|  | 1625 | * @mapping:	the address_space to search | 
|  | 1626 | * @index:	the starting page index | 
|  | 1627 | * @tag:	the tag index | 
|  | 1628 | * @nr_pages:	the maximum number of pages | 
|  | 1629 | * @pages:	where the resulting pages are placed | 
|  | 1630 | * | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1631 | * Like find_get_pages, except we only return pages which are tagged with | 
| Randy Dunlap | 485bb99 | 2006-06-23 02:03:49 -0700 | [diff] [blame] | 1632 | * @tag.   We update @index to index the next page for the traversal. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1633 | */ | 
|  | 1634 | unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index, | 
|  | 1635 | int tag, unsigned int nr_pages, struct page **pages) | 
|  | 1636 | { | 
| Konstantin Khlebnikov | 0fc9d10 | 2012-03-28 14:42:54 -0700 | [diff] [blame] | 1637 | struct radix_tree_iter iter; | 
|  | 1638 | void **slot; | 
|  | 1639 | unsigned ret = 0; | 
|  | 1640 |  | 
|  | 1641 | if (unlikely(!nr_pages)) | 
|  | 1642 | return 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1643 |  | 
| Nick Piggin | a60637c | 2008-07-25 19:45:31 -0700 | [diff] [blame] | 1644 | rcu_read_lock(); | 
| Konstantin Khlebnikov | 0fc9d10 | 2012-03-28 14:42:54 -0700 | [diff] [blame] | 1645 | radix_tree_for_each_tagged(slot, &mapping->page_tree, | 
|  | 1646 | &iter, *index, tag) { | 
| Kirill A. Shutemov | 8392937 | 2016-07-26 15:26:04 -0700 | [diff] [blame] | 1647 | struct page *head, *page; | 
| Nick Piggin | a60637c | 2008-07-25 19:45:31 -0700 | [diff] [blame] | 1648 | repeat: | 
| Konstantin Khlebnikov | 0fc9d10 | 2012-03-28 14:42:54 -0700 | [diff] [blame] | 1649 | page = radix_tree_deref_slot(slot); | 
| Nick Piggin | a60637c | 2008-07-25 19:45:31 -0700 | [diff] [blame] | 1650 | if (unlikely(!page)) | 
|  | 1651 | continue; | 
| Hugh Dickins | 9d8aa4e | 2011-03-22 16:33:06 -0700 | [diff] [blame] | 1652 |  | 
| Hugh Dickins | a2c16d6 | 2011-08-03 16:21:19 -0700 | [diff] [blame] | 1653 | if (radix_tree_exception(page)) { | 
| Hugh Dickins | 8079b1c | 2011-08-03 16:21:28 -0700 | [diff] [blame] | 1654 | if (radix_tree_deref_retry(page)) { | 
| Matthew Wilcox | 2cf938a | 2016-03-17 14:22:03 -0700 | [diff] [blame] | 1655 | slot = radix_tree_iter_retry(&iter); | 
|  | 1656 | continue; | 
| Hugh Dickins | 8079b1c | 2011-08-03 16:21:28 -0700 | [diff] [blame] | 1657 | } | 
| Hugh Dickins | a2c16d6 | 2011-08-03 16:21:19 -0700 | [diff] [blame] | 1658 | /* | 
| Johannes Weiner | 139b6a6 | 2014-05-06 12:50:05 -0700 | [diff] [blame] | 1659 | * A shadow entry of a recently evicted page. | 
|  | 1660 | * | 
|  | 1661 | * Those entries should never be tagged, but | 
|  | 1662 | * this tree walk is lockless and the tags are | 
|  | 1663 | * looked up in bulk, one radix tree node at a | 
|  | 1664 | * time, so there is a sizable window for page | 
|  | 1665 | * reclaim to evict a page we saw tagged. | 
|  | 1666 | * | 
|  | 1667 | * Skip over it. | 
| Hugh Dickins | a2c16d6 | 2011-08-03 16:21:19 -0700 | [diff] [blame] | 1668 | */ | 
| Johannes Weiner | 139b6a6 | 2014-05-06 12:50:05 -0700 | [diff] [blame] | 1669 | continue; | 
| Hugh Dickins | a2c16d6 | 2011-08-03 16:21:19 -0700 | [diff] [blame] | 1670 | } | 
| Nick Piggin | a60637c | 2008-07-25 19:45:31 -0700 | [diff] [blame] | 1671 |  | 
| Kirill A. Shutemov | 8392937 | 2016-07-26 15:26:04 -0700 | [diff] [blame] | 1672 | head = compound_head(page); | 
|  | 1673 | if (!page_cache_get_speculative(head)) | 
| Nick Piggin | a60637c | 2008-07-25 19:45:31 -0700 | [diff] [blame] | 1674 | goto repeat; | 
|  | 1675 |  | 
| Kirill A. Shutemov | 8392937 | 2016-07-26 15:26:04 -0700 | [diff] [blame] | 1676 | /* The page was split under us? */ | 
|  | 1677 | if (compound_head(page) != head) { | 
|  | 1678 | put_page(head); | 
|  | 1679 | goto repeat; | 
|  | 1680 | } | 
|  | 1681 |  | 
| Nick Piggin | a60637c | 2008-07-25 19:45:31 -0700 | [diff] [blame] | 1682 | /* Has the page moved? */ | 
| Konstantin Khlebnikov | 0fc9d10 | 2012-03-28 14:42:54 -0700 | [diff] [blame] | 1683 | if (unlikely(page != *slot)) { | 
| Kirill A. Shutemov | 8392937 | 2016-07-26 15:26:04 -0700 | [diff] [blame] | 1684 | put_page(head); | 
| Nick Piggin | a60637c | 2008-07-25 19:45:31 -0700 | [diff] [blame] | 1685 | goto repeat; | 
|  | 1686 | } | 
|  | 1687 |  | 
|  | 1688 | pages[ret] = page; | 
| Konstantin Khlebnikov | 0fc9d10 | 2012-03-28 14:42:54 -0700 | [diff] [blame] | 1689 | if (++ret == nr_pages) | 
|  | 1690 | break; | 
| Nick Piggin | a60637c | 2008-07-25 19:45:31 -0700 | [diff] [blame] | 1691 | } | 
| Hugh Dickins | 5b280c0 | 2011-03-22 16:33:07 -0700 | [diff] [blame] | 1692 |  | 
| Nick Piggin | a60637c | 2008-07-25 19:45:31 -0700 | [diff] [blame] | 1693 | rcu_read_unlock(); | 
|  | 1694 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1695 | if (ret) | 
|  | 1696 | *index = pages[ret - 1]->index + 1; | 
| Nick Piggin | a60637c | 2008-07-25 19:45:31 -0700 | [diff] [blame] | 1697 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1698 | return ret; | 
|  | 1699 | } | 
| David Howells | ef71c15 | 2007-05-09 02:33:44 -0700 | [diff] [blame] | 1700 | EXPORT_SYMBOL(find_get_pages_tag); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1701 |  | 
| Ross Zwisler | 7e7f774 | 2016-01-22 15:10:44 -0800 | [diff] [blame] | 1702 | /** | 
|  | 1703 | * find_get_entries_tag - find and return entries that match @tag | 
|  | 1704 | * @mapping:	the address_space to search | 
|  | 1705 | * @start:	the starting page cache index | 
|  | 1706 | * @tag:	the tag index | 
|  | 1707 | * @nr_entries:	the maximum number of entries | 
|  | 1708 | * @entries:	where the resulting entries are placed | 
|  | 1709 | * @indices:	the cache indices corresponding to the entries in @entries | 
|  | 1710 | * | 
|  | 1711 | * Like find_get_entries, except we only return entries which are tagged with | 
|  | 1712 | * @tag. | 
|  | 1713 | */ | 
|  | 1714 | unsigned find_get_entries_tag(struct address_space *mapping, pgoff_t start, | 
|  | 1715 | int tag, unsigned int nr_entries, | 
|  | 1716 | struct page **entries, pgoff_t *indices) | 
|  | 1717 | { | 
|  | 1718 | void **slot; | 
|  | 1719 | unsigned int ret = 0; | 
|  | 1720 | struct radix_tree_iter iter; | 
|  | 1721 |  | 
|  | 1722 | if (!nr_entries) | 
|  | 1723 | return 0; | 
|  | 1724 |  | 
|  | 1725 | rcu_read_lock(); | 
| Ross Zwisler | 7e7f774 | 2016-01-22 15:10:44 -0800 | [diff] [blame] | 1726 | radix_tree_for_each_tagged(slot, &mapping->page_tree, | 
|  | 1727 | &iter, start, tag) { | 
| Kirill A. Shutemov | 8392937 | 2016-07-26 15:26:04 -0700 | [diff] [blame] | 1728 | struct page *head, *page; | 
| Ross Zwisler | 7e7f774 | 2016-01-22 15:10:44 -0800 | [diff] [blame] | 1729 | repeat: | 
|  | 1730 | page = radix_tree_deref_slot(slot); | 
|  | 1731 | if (unlikely(!page)) | 
|  | 1732 | continue; | 
|  | 1733 | if (radix_tree_exception(page)) { | 
|  | 1734 | if (radix_tree_deref_retry(page)) { | 
| Matthew Wilcox | 2cf938a | 2016-03-17 14:22:03 -0700 | [diff] [blame] | 1735 | slot = radix_tree_iter_retry(&iter); | 
|  | 1736 | continue; | 
| Ross Zwisler | 7e7f774 | 2016-01-22 15:10:44 -0800 | [diff] [blame] | 1737 | } | 
|  | 1738 |  | 
|  | 1739 | /* | 
|  | 1740 | * A shadow entry of a recently evicted page, a swap | 
|  | 1741 | * entry from shmem/tmpfs or a DAX entry.  Return it | 
|  | 1742 | * without attempting to raise page count. | 
|  | 1743 | */ | 
|  | 1744 | goto export; | 
|  | 1745 | } | 
| Kirill A. Shutemov | 8392937 | 2016-07-26 15:26:04 -0700 | [diff] [blame] | 1746 |  | 
|  | 1747 | head = compound_head(page); | 
|  | 1748 | if (!page_cache_get_speculative(head)) | 
| Ross Zwisler | 7e7f774 | 2016-01-22 15:10:44 -0800 | [diff] [blame] | 1749 | goto repeat; | 
|  | 1750 |  | 
| Kirill A. Shutemov | 8392937 | 2016-07-26 15:26:04 -0700 | [diff] [blame] | 1751 | /* The page was split under us? */ | 
|  | 1752 | if (compound_head(page) != head) { | 
|  | 1753 | put_page(head); | 
|  | 1754 | goto repeat; | 
|  | 1755 | } | 
|  | 1756 |  | 
| Ross Zwisler | 7e7f774 | 2016-01-22 15:10:44 -0800 | [diff] [blame] | 1757 | /* Has the page moved? */ | 
|  | 1758 | if (unlikely(page != *slot)) { | 
| Kirill A. Shutemov | 8392937 | 2016-07-26 15:26:04 -0700 | [diff] [blame] | 1759 | put_page(head); | 
| Ross Zwisler | 7e7f774 | 2016-01-22 15:10:44 -0800 | [diff] [blame] | 1760 | goto repeat; | 
|  | 1761 | } | 
|  | 1762 | export: | 
|  | 1763 | indices[ret] = iter.index; | 
|  | 1764 | entries[ret] = page; | 
|  | 1765 | if (++ret == nr_entries) | 
|  | 1766 | break; | 
|  | 1767 | } | 
|  | 1768 | rcu_read_unlock(); | 
|  | 1769 | return ret; | 
|  | 1770 | } | 
|  | 1771 | EXPORT_SYMBOL(find_get_entries_tag); | 
|  | 1772 |  | 
| Wu Fengguang | 76d42bd | 2006-06-25 05:48:43 -0700 | [diff] [blame] | 1773 | /* | 
|  | 1774 | * CD/DVDs are error prone. When a medium error occurs, the driver may fail | 
|  | 1775 | * a _large_ part of the i/o request. Imagine the worst scenario: | 
|  | 1776 | * | 
|  | 1777 | *      ---R__________________________________________B__________ | 
|  | 1778 | *         ^ reading here                             ^ bad block(assume 4k) | 
|  | 1779 | * | 
|  | 1780 | * read(R) => miss => readahead(R...B) => media error => frustrating retries | 
|  | 1781 | * => failing the whole request => read(R) => read(R+1) => | 
|  | 1782 | * readahead(R+1...B+1) => bang => read(R+2) => read(R+3) => | 
|  | 1783 | * readahead(R+3...B+2) => bang => read(R+3) => read(R+4) => | 
|  | 1784 | * readahead(R+4...B+3) => bang => read(R+4) => read(R+5) => ...... | 
|  | 1785 | * | 
|  | 1786 | * It is going insane. Fix it by quickly scaling down the readahead size. | 
|  | 1787 | */ | 
|  | 1788 | static void shrink_readahead_size_eio(struct file *filp, | 
|  | 1789 | struct file_ra_state *ra) | 
|  | 1790 | { | 
| Wu Fengguang | 76d42bd | 2006-06-25 05:48:43 -0700 | [diff] [blame] | 1791 | ra->ra_pages /= 4; | 
| Wu Fengguang | 76d42bd | 2006-06-25 05:48:43 -0700 | [diff] [blame] | 1792 | } | 
|  | 1793 |  | 
| Randy Dunlap | 485bb99 | 2006-06-23 02:03:49 -0700 | [diff] [blame] | 1794 | /** | 
| Christoph Hellwig | 36e7891 | 2008-02-08 04:21:24 -0800 | [diff] [blame] | 1795 | * do_generic_file_read - generic file read routine | 
| Randy Dunlap | 485bb99 | 2006-06-23 02:03:49 -0700 | [diff] [blame] | 1796 | * @filp:	the file to read | 
|  | 1797 | * @ppos:	current file position | 
| Al Viro | 6e58e79 | 2014-02-03 17:07:03 -0500 | [diff] [blame] | 1798 | * @iter:	data destination | 
|  | 1799 | * @written:	already copied | 
| Randy Dunlap | 485bb99 | 2006-06-23 02:03:49 -0700 | [diff] [blame] | 1800 | * | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1801 | * This is a generic file read routine, and uses the | 
| Randy Dunlap | 485bb99 | 2006-06-23 02:03:49 -0700 | [diff] [blame] | 1802 | * mapping->a_ops->readpage() function for the actual low-level stuff. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1803 | * | 
|  | 1804 | * This is really ugly. But the goto's actually try to clarify some | 
|  | 1805 | * of the logic when it comes to error handling etc. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1806 | */ | 
| Al Viro | 6e58e79 | 2014-02-03 17:07:03 -0500 | [diff] [blame] | 1807 | static ssize_t do_generic_file_read(struct file *filp, loff_t *ppos, | 
|  | 1808 | struct iov_iter *iter, ssize_t written) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1809 | { | 
| Christoph Hellwig | 36e7891 | 2008-02-08 04:21:24 -0800 | [diff] [blame] | 1810 | struct address_space *mapping = filp->f_mapping; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1811 | struct inode *inode = mapping->host; | 
| Christoph Hellwig | 36e7891 | 2008-02-08 04:21:24 -0800 | [diff] [blame] | 1812 | struct file_ra_state *ra = &filp->f_ra; | 
| Fengguang Wu | 57f6b96 | 2007-10-16 01:24:37 -0700 | [diff] [blame] | 1813 | pgoff_t index; | 
|  | 1814 | pgoff_t last_index; | 
|  | 1815 | pgoff_t prev_index; | 
|  | 1816 | unsigned long offset;      /* offset into pagecache page */ | 
| Jan Kara | ec0f163 | 2007-05-06 14:49:25 -0700 | [diff] [blame] | 1817 | unsigned int prev_offset; | 
| Al Viro | 6e58e79 | 2014-02-03 17:07:03 -0500 | [diff] [blame] | 1818 | int error = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1819 |  | 
| Wei Fang | c2a9737 | 2016-10-07 17:01:52 -0700 | [diff] [blame] | 1820 | if (unlikely(*ppos >= inode->i_sb->s_maxbytes)) | 
| Linus Torvalds | d05c5f7 | 2016-12-14 12:45:25 -0800 | [diff] [blame] | 1821 | return 0; | 
| Wei Fang | c2a9737 | 2016-10-07 17:01:52 -0700 | [diff] [blame] | 1822 | iov_iter_truncate(iter, inode->i_sb->s_maxbytes); | 
|  | 1823 |  | 
| Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 1824 | index = *ppos >> PAGE_SHIFT; | 
|  | 1825 | prev_index = ra->prev_pos >> PAGE_SHIFT; | 
|  | 1826 | prev_offset = ra->prev_pos & (PAGE_SIZE-1); | 
|  | 1827 | last_index = (*ppos + iter->count + PAGE_SIZE-1) >> PAGE_SHIFT; | 
|  | 1828 | offset = *ppos & ~PAGE_MASK; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1829 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1830 | for (;;) { | 
|  | 1831 | struct page *page; | 
| Fengguang Wu | 57f6b96 | 2007-10-16 01:24:37 -0700 | [diff] [blame] | 1832 | pgoff_t end_index; | 
| NeilBrown | a32ea1e | 2007-07-17 04:03:04 -0700 | [diff] [blame] | 1833 | loff_t isize; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1834 | unsigned long nr, ret; | 
|  | 1835 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1836 | cond_resched(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1837 | find_page: | 
| Michal Hocko | 5abf186 | 2017-02-03 13:13:29 -0800 | [diff] [blame] | 1838 | if (fatal_signal_pending(current)) { | 
|  | 1839 | error = -EINTR; | 
|  | 1840 | goto out; | 
|  | 1841 | } | 
|  | 1842 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1843 | page = find_get_page(mapping, index); | 
| Fengguang Wu | 3ea89ee | 2007-07-19 01:48:02 -0700 | [diff] [blame] | 1844 | if (!page) { | 
| Rusty Russell | cf914a7 | 2007-07-19 01:48:08 -0700 | [diff] [blame] | 1845 | page_cache_sync_readahead(mapping, | 
| Fengguang Wu | 7ff8107 | 2007-10-16 01:24:35 -0700 | [diff] [blame] | 1846 | ra, filp, | 
| Fengguang Wu | 3ea89ee | 2007-07-19 01:48:02 -0700 | [diff] [blame] | 1847 | index, last_index - index); | 
|  | 1848 | page = find_get_page(mapping, index); | 
|  | 1849 | if (unlikely(page == NULL)) | 
|  | 1850 | goto no_cached_page; | 
|  | 1851 | } | 
|  | 1852 | if (PageReadahead(page)) { | 
| Rusty Russell | cf914a7 | 2007-07-19 01:48:08 -0700 | [diff] [blame] | 1853 | page_cache_async_readahead(mapping, | 
| Fengguang Wu | 7ff8107 | 2007-10-16 01:24:35 -0700 | [diff] [blame] | 1854 | ra, filp, page, | 
| Fengguang Wu | 3ea89ee | 2007-07-19 01:48:02 -0700 | [diff] [blame] | 1855 | index, last_index - index); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1856 | } | 
| Hisashi Hifumi | 8ab22b9 | 2008-07-28 15:46:36 -0700 | [diff] [blame] | 1857 | if (!PageUptodate(page)) { | 
| Mel Gorman | ebded02 | 2016-03-15 14:55:39 -0700 | [diff] [blame] | 1858 | /* | 
|  | 1859 | * See comment in do_read_cache_page on why | 
|  | 1860 | * wait_on_page_locked is used to avoid unnecessarily | 
|  | 1861 | * serialisations and why it's safe. | 
|  | 1862 | */ | 
| Bart Van Assche | c4b209a | 2016-10-07 16:58:33 -0700 | [diff] [blame] | 1863 | error = wait_on_page_locked_killable(page); | 
|  | 1864 | if (unlikely(error)) | 
|  | 1865 | goto readpage_error; | 
| Mel Gorman | ebded02 | 2016-03-15 14:55:39 -0700 | [diff] [blame] | 1866 | if (PageUptodate(page)) | 
|  | 1867 | goto page_ok; | 
|  | 1868 |  | 
| Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 1869 | if (inode->i_blkbits == PAGE_SHIFT || | 
| Hisashi Hifumi | 8ab22b9 | 2008-07-28 15:46:36 -0700 | [diff] [blame] | 1870 | !mapping->a_ops->is_partially_uptodate) | 
|  | 1871 | goto page_not_up_to_date; | 
| Eryu Guan | 6d6d36b | 2016-11-01 15:43:07 +0800 | [diff] [blame] | 1872 | /* pipes can't handle partially uptodate pages */ | 
|  | 1873 | if (unlikely(iter->type & ITER_PIPE)) | 
|  | 1874 | goto page_not_up_to_date; | 
| Nick Piggin | 529ae9a | 2008-08-02 12:01:03 +0200 | [diff] [blame] | 1875 | if (!trylock_page(page)) | 
| Hisashi Hifumi | 8ab22b9 | 2008-07-28 15:46:36 -0700 | [diff] [blame] | 1876 | goto page_not_up_to_date; | 
| Dave Hansen | 8d056cb | 2010-11-11 14:05:15 -0800 | [diff] [blame] | 1877 | /* Did it get truncated before we got the lock? */ | 
|  | 1878 | if (!page->mapping) | 
|  | 1879 | goto page_not_up_to_date_locked; | 
| Hisashi Hifumi | 8ab22b9 | 2008-07-28 15:46:36 -0700 | [diff] [blame] | 1880 | if (!mapping->a_ops->is_partially_uptodate(page, | 
| Al Viro | 6e58e79 | 2014-02-03 17:07:03 -0500 | [diff] [blame] | 1881 | offset, iter->count)) | 
| Hisashi Hifumi | 8ab22b9 | 2008-07-28 15:46:36 -0700 | [diff] [blame] | 1882 | goto page_not_up_to_date_locked; | 
|  | 1883 | unlock_page(page); | 
|  | 1884 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1885 | page_ok: | 
| NeilBrown | a32ea1e | 2007-07-17 04:03:04 -0700 | [diff] [blame] | 1886 | /* | 
|  | 1887 | * i_size must be checked after we know the page is Uptodate. | 
|  | 1888 | * | 
|  | 1889 | * Checking i_size after the check allows us to calculate | 
|  | 1890 | * the correct value for "nr", which means the zero-filled | 
|  | 1891 | * part of the page is not copied back to userspace (unless | 
|  | 1892 | * another truncate extends the file - this is desired though). | 
|  | 1893 | */ | 
|  | 1894 |  | 
|  | 1895 | isize = i_size_read(inode); | 
| Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 1896 | end_index = (isize - 1) >> PAGE_SHIFT; | 
| NeilBrown | a32ea1e | 2007-07-17 04:03:04 -0700 | [diff] [blame] | 1897 | if (unlikely(!isize || index > end_index)) { | 
| Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 1898 | put_page(page); | 
| NeilBrown | a32ea1e | 2007-07-17 04:03:04 -0700 | [diff] [blame] | 1899 | goto out; | 
|  | 1900 | } | 
|  | 1901 |  | 
|  | 1902 | /* nr is the maximum number of bytes to copy from this page */ | 
| Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 1903 | nr = PAGE_SIZE; | 
| NeilBrown | a32ea1e | 2007-07-17 04:03:04 -0700 | [diff] [blame] | 1904 | if (index == end_index) { | 
| Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 1905 | nr = ((isize - 1) & ~PAGE_MASK) + 1; | 
| NeilBrown | a32ea1e | 2007-07-17 04:03:04 -0700 | [diff] [blame] | 1906 | if (nr <= offset) { | 
| Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 1907 | put_page(page); | 
| NeilBrown | a32ea1e | 2007-07-17 04:03:04 -0700 | [diff] [blame] | 1908 | goto out; | 
|  | 1909 | } | 
|  | 1910 | } | 
|  | 1911 | nr = nr - offset; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1912 |  | 
|  | 1913 | /* If users can be writing to this page using arbitrary | 
|  | 1914 | * virtual addresses, take care about potential aliasing | 
|  | 1915 | * before reading the page on the kernel side. | 
|  | 1916 | */ | 
|  | 1917 | if (mapping_writably_mapped(mapping)) | 
|  | 1918 | flush_dcache_page(page); | 
|  | 1919 |  | 
|  | 1920 | /* | 
| Jan Kara | ec0f163 | 2007-05-06 14:49:25 -0700 | [diff] [blame] | 1921 | * When a sequential read accesses a page several times, | 
|  | 1922 | * only mark it as accessed the first time. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1923 | */ | 
| Jan Kara | ec0f163 | 2007-05-06 14:49:25 -0700 | [diff] [blame] | 1924 | if (prev_index != index || offset != prev_offset) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1925 | mark_page_accessed(page); | 
|  | 1926 | prev_index = index; | 
|  | 1927 |  | 
|  | 1928 | /* | 
|  | 1929 | * Ok, we have the page, and it's up-to-date, so | 
|  | 1930 | * now we can copy it to user space... | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1931 | */ | 
| Al Viro | 6e58e79 | 2014-02-03 17:07:03 -0500 | [diff] [blame] | 1932 |  | 
|  | 1933 | ret = copy_page_to_iter(page, offset, nr, iter); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1934 | offset += ret; | 
| Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 1935 | index += offset >> PAGE_SHIFT; | 
|  | 1936 | offset &= ~PAGE_MASK; | 
| Jan Kara | 6ce745e | 2007-05-06 14:49:26 -0700 | [diff] [blame] | 1937 | prev_offset = offset; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1938 |  | 
| Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 1939 | put_page(page); | 
| Al Viro | 6e58e79 | 2014-02-03 17:07:03 -0500 | [diff] [blame] | 1940 | written += ret; | 
|  | 1941 | if (!iov_iter_count(iter)) | 
|  | 1942 | goto out; | 
|  | 1943 | if (ret < nr) { | 
|  | 1944 | error = -EFAULT; | 
|  | 1945 | goto out; | 
|  | 1946 | } | 
|  | 1947 | continue; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1948 |  | 
|  | 1949 | page_not_up_to_date: | 
|  | 1950 | /* Get exclusive access to the page ... */ | 
| Oleg Nesterov | 8546232 | 2008-06-08 21:20:43 +0400 | [diff] [blame] | 1951 | error = lock_page_killable(page); | 
|  | 1952 | if (unlikely(error)) | 
|  | 1953 | goto readpage_error; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1954 |  | 
| Hisashi Hifumi | 8ab22b9 | 2008-07-28 15:46:36 -0700 | [diff] [blame] | 1955 | page_not_up_to_date_locked: | 
| Nick Piggin | da6052f | 2006-09-25 23:31:35 -0700 | [diff] [blame] | 1956 | /* Did it get truncated before we got the lock? */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1957 | if (!page->mapping) { | 
|  | 1958 | unlock_page(page); | 
| Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 1959 | put_page(page); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1960 | continue; | 
|  | 1961 | } | 
|  | 1962 |  | 
|  | 1963 | /* Did somebody else fill it already? */ | 
|  | 1964 | if (PageUptodate(page)) { | 
|  | 1965 | unlock_page(page); | 
|  | 1966 | goto page_ok; | 
|  | 1967 | } | 
|  | 1968 |  | 
|  | 1969 | readpage: | 
| Jeff Moyer | 91803b4 | 2010-05-26 11:49:40 -0400 | [diff] [blame] | 1970 | /* | 
|  | 1971 | * A previous I/O error may have been due to temporary | 
|  | 1972 | * failures, eg. multipath errors. | 
|  | 1973 | * PG_error will be set again if readpage fails. | 
|  | 1974 | */ | 
|  | 1975 | ClearPageError(page); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1976 | /* Start the actual read. The read will unlock the page. */ | 
|  | 1977 | error = mapping->a_ops->readpage(filp, page); | 
|  | 1978 |  | 
| Zach Brown | 994fc28c | 2005-12-15 14:28:17 -0800 | [diff] [blame] | 1979 | if (unlikely(error)) { | 
|  | 1980 | if (error == AOP_TRUNCATED_PAGE) { | 
| Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 1981 | put_page(page); | 
| Al Viro | 6e58e79 | 2014-02-03 17:07:03 -0500 | [diff] [blame] | 1982 | error = 0; | 
| Zach Brown | 994fc28c | 2005-12-15 14:28:17 -0800 | [diff] [blame] | 1983 | goto find_page; | 
|  | 1984 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1985 | goto readpage_error; | 
| Zach Brown | 994fc28c | 2005-12-15 14:28:17 -0800 | [diff] [blame] | 1986 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1987 |  | 
|  | 1988 | if (!PageUptodate(page)) { | 
| Oleg Nesterov | 8546232 | 2008-06-08 21:20:43 +0400 | [diff] [blame] | 1989 | error = lock_page_killable(page); | 
|  | 1990 | if (unlikely(error)) | 
|  | 1991 | goto readpage_error; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1992 | if (!PageUptodate(page)) { | 
|  | 1993 | if (page->mapping == NULL) { | 
|  | 1994 | /* | 
| Christoph Hellwig | 2ecdc82 | 2010-01-26 17:27:20 +0100 | [diff] [blame] | 1995 | * invalidate_mapping_pages got it | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1996 | */ | 
|  | 1997 | unlock_page(page); | 
| Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 1998 | put_page(page); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1999 | goto find_page; | 
|  | 2000 | } | 
|  | 2001 | unlock_page(page); | 
| Fengguang Wu | 7ff8107 | 2007-10-16 01:24:35 -0700 | [diff] [blame] | 2002 | shrink_readahead_size_eio(filp, ra); | 
| Oleg Nesterov | 8546232 | 2008-06-08 21:20:43 +0400 | [diff] [blame] | 2003 | error = -EIO; | 
|  | 2004 | goto readpage_error; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2005 | } | 
|  | 2006 | unlock_page(page); | 
|  | 2007 | } | 
|  | 2008 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2009 | goto page_ok; | 
|  | 2010 |  | 
|  | 2011 | readpage_error: | 
|  | 2012 | /* UHHUH! A synchronous read error occurred. Report it */ | 
| Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 2013 | put_page(page); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2014 | goto out; | 
|  | 2015 |  | 
|  | 2016 | no_cached_page: | 
|  | 2017 | /* | 
|  | 2018 | * Ok, it wasn't cached, so we need to create a new | 
|  | 2019 | * page.. | 
|  | 2020 | */ | 
| Nick Piggin | eb2be18 | 2007-10-16 01:24:57 -0700 | [diff] [blame] | 2021 | page = page_cache_alloc_cold(mapping); | 
|  | 2022 | if (!page) { | 
| Al Viro | 6e58e79 | 2014-02-03 17:07:03 -0500 | [diff] [blame] | 2023 | error = -ENOMEM; | 
| Nick Piggin | eb2be18 | 2007-10-16 01:24:57 -0700 | [diff] [blame] | 2024 | goto out; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2025 | } | 
| Michal Hocko | 6afdb85 | 2015-06-24 16:58:06 -0700 | [diff] [blame] | 2026 | error = add_to_page_cache_lru(page, mapping, index, | 
| Michal Hocko | c62d255 | 2015-11-06 16:28:49 -0800 | [diff] [blame] | 2027 | mapping_gfp_constraint(mapping, GFP_KERNEL)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2028 | if (error) { | 
| Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 2029 | put_page(page); | 
| Al Viro | 6e58e79 | 2014-02-03 17:07:03 -0500 | [diff] [blame] | 2030 | if (error == -EEXIST) { | 
|  | 2031 | error = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2032 | goto find_page; | 
| Al Viro | 6e58e79 | 2014-02-03 17:07:03 -0500 | [diff] [blame] | 2033 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2034 | goto out; | 
|  | 2035 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2036 | goto readpage; | 
|  | 2037 | } | 
|  | 2038 |  | 
|  | 2039 | out: | 
| Fengguang Wu | 7ff8107 | 2007-10-16 01:24:35 -0700 | [diff] [blame] | 2040 | ra->prev_pos = prev_index; | 
| Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 2041 | ra->prev_pos <<= PAGE_SHIFT; | 
| Fengguang Wu | 7ff8107 | 2007-10-16 01:24:35 -0700 | [diff] [blame] | 2042 | ra->prev_pos |= prev_offset; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2043 |  | 
| Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 2044 | *ppos = ((loff_t)index << PAGE_SHIFT) + offset; | 
| Krishna Kumar | 0c6aa26 | 2008-10-15 22:01:13 -0700 | [diff] [blame] | 2045 | file_accessed(filp); | 
| Al Viro | 6e58e79 | 2014-02-03 17:07:03 -0500 | [diff] [blame] | 2046 | return written ? written : error; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2047 | } | 
|  | 2048 |  | 
| Randy Dunlap | 485bb99 | 2006-06-23 02:03:49 -0700 | [diff] [blame] | 2049 | /** | 
| Al Viro | 6abd232 | 2014-04-04 14:20:57 -0400 | [diff] [blame] | 2050 | * generic_file_read_iter - generic filesystem read routine | 
| Randy Dunlap | 485bb99 | 2006-06-23 02:03:49 -0700 | [diff] [blame] | 2051 | * @iocb:	kernel I/O control block | 
| Al Viro | 6abd232 | 2014-04-04 14:20:57 -0400 | [diff] [blame] | 2052 | * @iter:	destination for the data read | 
| Randy Dunlap | 485bb99 | 2006-06-23 02:03:49 -0700 | [diff] [blame] | 2053 | * | 
| Al Viro | 6abd232 | 2014-04-04 14:20:57 -0400 | [diff] [blame] | 2054 | * This is the "read_iter()" routine for all filesystems | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2055 | * that can use the page cache directly. | 
|  | 2056 | */ | 
|  | 2057 | ssize_t | 
| Al Viro | ed978a8 | 2014-03-05 22:53:04 -0500 | [diff] [blame] | 2058 | generic_file_read_iter(struct kiocb *iocb, struct iov_iter *iter) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2059 | { | 
| Al Viro | ed978a8 | 2014-03-05 22:53:04 -0500 | [diff] [blame] | 2060 | struct file *file = iocb->ki_filp; | 
|  | 2061 | ssize_t retval = 0; | 
| Nicolai Stange | e7080a4 | 2016-03-25 14:22:14 -0700 | [diff] [blame] | 2062 | size_t count = iov_iter_count(iter); | 
|  | 2063 |  | 
|  | 2064 | if (!count) | 
|  | 2065 | goto out; /* skip atime */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2066 |  | 
| Al Viro | 2ba48ce | 2015-04-09 13:52:01 -0400 | [diff] [blame] | 2067 | if (iocb->ki_flags & IOCB_DIRECT) { | 
| Al Viro | ed978a8 | 2014-03-05 22:53:04 -0500 | [diff] [blame] | 2068 | struct address_space *mapping = file->f_mapping; | 
|  | 2069 | struct inode *inode = mapping->host; | 
| Badari Pulavarty | 543ade1 | 2006-09-30 23:28:48 -0700 | [diff] [blame] | 2070 | loff_t size; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2071 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2072 | size = i_size_read(inode); | 
| Goldwyn Rodrigues | 6be96d3 | 2017-06-20 07:05:44 -0500 | [diff] [blame] | 2073 | if (iocb->ki_flags & IOCB_NOWAIT) { | 
|  | 2074 | if (filemap_range_has_page(mapping, iocb->ki_pos, | 
|  | 2075 | iocb->ki_pos + count - 1)) | 
|  | 2076 | return -EAGAIN; | 
|  | 2077 | } else { | 
|  | 2078 | retval = filemap_write_and_wait_range(mapping, | 
|  | 2079 | iocb->ki_pos, | 
|  | 2080 | iocb->ki_pos + count - 1); | 
|  | 2081 | if (retval < 0) | 
|  | 2082 | goto out; | 
|  | 2083 | } | 
| Al Viro | ed978a8 | 2014-03-05 22:53:04 -0500 | [diff] [blame] | 2084 |  | 
| Christoph Hellwig | 0d5b0cf | 2016-10-03 09:48:08 +1100 | [diff] [blame] | 2085 | file_accessed(file); | 
|  | 2086 |  | 
| Al Viro | 5ecda13 | 2017-04-13 14:13:36 -0400 | [diff] [blame] | 2087 | retval = mapping->a_ops->direct_IO(iocb, iter); | 
| Al Viro | c3a6902 | 2016-10-10 13:26:27 -0400 | [diff] [blame] | 2088 | if (retval >= 0) { | 
| Christoph Hellwig | c64fb5c | 2016-04-07 08:51:55 -0700 | [diff] [blame] | 2089 | iocb->ki_pos += retval; | 
| Al Viro | 5ecda13 | 2017-04-13 14:13:36 -0400 | [diff] [blame] | 2090 | count -= retval; | 
| Steven Whitehouse | 9fe55ee | 2014-01-24 14:42:22 +0000 | [diff] [blame] | 2091 | } | 
| Al Viro | 5b47d59 | 2017-05-08 13:54:47 -0400 | [diff] [blame] | 2092 | iov_iter_revert(iter, count - iov_iter_count(iter)); | 
| Josef Bacik | 66f998f | 2010-05-23 11:00:54 -0400 | [diff] [blame] | 2093 |  | 
| Steven Whitehouse | 9fe55ee | 2014-01-24 14:42:22 +0000 | [diff] [blame] | 2094 | /* | 
|  | 2095 | * Btrfs can have a short DIO read if we encounter | 
|  | 2096 | * compressed extents, so if there was an error, or if | 
|  | 2097 | * we've already read everything we wanted to, or if | 
|  | 2098 | * there was a short read because we hit EOF, go ahead | 
|  | 2099 | * and return.  Otherwise fallthrough to buffered io for | 
| Matthew Wilcox | fbbbad4 | 2015-02-16 15:58:53 -0800 | [diff] [blame] | 2100 | * the rest of the read.  Buffered reads will not work for | 
|  | 2101 | * DAX files, so don't bother trying. | 
| Steven Whitehouse | 9fe55ee | 2014-01-24 14:42:22 +0000 | [diff] [blame] | 2102 | */ | 
| Al Viro | 5ecda13 | 2017-04-13 14:13:36 -0400 | [diff] [blame] | 2103 | if (retval < 0 || !count || iocb->ki_pos >= size || | 
| Christoph Hellwig | 0d5b0cf | 2016-10-03 09:48:08 +1100 | [diff] [blame] | 2104 | IS_DAX(inode)) | 
| Steven Whitehouse | 9fe55ee | 2014-01-24 14:42:22 +0000 | [diff] [blame] | 2105 | goto out; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2106 | } | 
|  | 2107 |  | 
| Christoph Hellwig | c64fb5c | 2016-04-07 08:51:55 -0700 | [diff] [blame] | 2108 | retval = do_generic_file_read(file, &iocb->ki_pos, iter, retval); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2109 | out: | 
|  | 2110 | return retval; | 
|  | 2111 | } | 
| Al Viro | ed978a8 | 2014-03-05 22:53:04 -0500 | [diff] [blame] | 2112 | EXPORT_SYMBOL(generic_file_read_iter); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2113 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2114 | #ifdef CONFIG_MMU | 
| Randy Dunlap | 485bb99 | 2006-06-23 02:03:49 -0700 | [diff] [blame] | 2115 | /** | 
|  | 2116 | * page_cache_read - adds requested page to the page cache if not already there | 
|  | 2117 | * @file:	file to read | 
|  | 2118 | * @offset:	page index | 
| Randy Dunlap | 62eb320 | 2016-02-11 16:12:58 -0800 | [diff] [blame] | 2119 | * @gfp_mask:	memory allocation flags | 
| Randy Dunlap | 485bb99 | 2006-06-23 02:03:49 -0700 | [diff] [blame] | 2120 | * | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2121 | * This adds the requested page to the page cache if it isn't already there, | 
|  | 2122 | * and schedules an I/O to read in its contents from disk. | 
|  | 2123 | */ | 
| Michal Hocko | c20cd45 | 2016-01-14 15:20:12 -0800 | [diff] [blame] | 2124 | static int page_cache_read(struct file *file, pgoff_t offset, gfp_t gfp_mask) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2125 | { | 
|  | 2126 | struct address_space *mapping = file->f_mapping; | 
| Paul McQuade | 99dadfd | 2014-10-09 15:29:03 -0700 | [diff] [blame] | 2127 | struct page *page; | 
| Zach Brown | 994fc28c | 2005-12-15 14:28:17 -0800 | [diff] [blame] | 2128 | int ret; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2129 |  | 
| Zach Brown | 994fc28c | 2005-12-15 14:28:17 -0800 | [diff] [blame] | 2130 | do { | 
| Michal Hocko | c20cd45 | 2016-01-14 15:20:12 -0800 | [diff] [blame] | 2131 | page = __page_cache_alloc(gfp_mask|__GFP_COLD); | 
| Zach Brown | 994fc28c | 2005-12-15 14:28:17 -0800 | [diff] [blame] | 2132 | if (!page) | 
|  | 2133 | return -ENOMEM; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2134 |  | 
| Michal Hocko | c20cd45 | 2016-01-14 15:20:12 -0800 | [diff] [blame] | 2135 | ret = add_to_page_cache_lru(page, mapping, offset, gfp_mask & GFP_KERNEL); | 
| Zach Brown | 994fc28c | 2005-12-15 14:28:17 -0800 | [diff] [blame] | 2136 | if (ret == 0) | 
|  | 2137 | ret = mapping->a_ops->readpage(file, page); | 
|  | 2138 | else if (ret == -EEXIST) | 
|  | 2139 | ret = 0; /* losing race to add is OK */ | 
|  | 2140 |  | 
| Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 2141 | put_page(page); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2142 |  | 
| Zach Brown | 994fc28c | 2005-12-15 14:28:17 -0800 | [diff] [blame] | 2143 | } while (ret == AOP_TRUNCATED_PAGE); | 
| Paul McQuade | 99dadfd | 2014-10-09 15:29:03 -0700 | [diff] [blame] | 2144 |  | 
| Zach Brown | 994fc28c | 2005-12-15 14:28:17 -0800 | [diff] [blame] | 2145 | return ret; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2146 | } | 
|  | 2147 |  | 
|  | 2148 | #define MMAP_LOTSAMISS  (100) | 
|  | 2149 |  | 
| Linus Torvalds | ef00e08 | 2009-06-16 15:31:25 -0700 | [diff] [blame] | 2150 | /* | 
|  | 2151 | * Synchronous readahead happens when we don't even find | 
|  | 2152 | * a page in the page cache at all. | 
|  | 2153 | */ | 
|  | 2154 | static void do_sync_mmap_readahead(struct vm_area_struct *vma, | 
|  | 2155 | struct file_ra_state *ra, | 
|  | 2156 | struct file *file, | 
|  | 2157 | pgoff_t offset) | 
|  | 2158 | { | 
| Linus Torvalds | ef00e08 | 2009-06-16 15:31:25 -0700 | [diff] [blame] | 2159 | struct address_space *mapping = file->f_mapping; | 
|  | 2160 |  | 
|  | 2161 | /* If we don't want any read-ahead, don't bother */ | 
| Joe Perches | 64363aa | 2013-07-08 16:00:18 -0700 | [diff] [blame] | 2162 | if (vma->vm_flags & VM_RAND_READ) | 
| Linus Torvalds | ef00e08 | 2009-06-16 15:31:25 -0700 | [diff] [blame] | 2163 | return; | 
| Wu Fengguang | 275b12b | 2011-05-24 17:12:28 -0700 | [diff] [blame] | 2164 | if (!ra->ra_pages) | 
|  | 2165 | return; | 
| Linus Torvalds | ef00e08 | 2009-06-16 15:31:25 -0700 | [diff] [blame] | 2166 |  | 
| Joe Perches | 64363aa | 2013-07-08 16:00:18 -0700 | [diff] [blame] | 2167 | if (vma->vm_flags & VM_SEQ_READ) { | 
| Wu Fengguang | 7ffc59b | 2009-06-16 15:31:38 -0700 | [diff] [blame] | 2168 | page_cache_sync_readahead(mapping, ra, file, offset, | 
|  | 2169 | ra->ra_pages); | 
| Linus Torvalds | ef00e08 | 2009-06-16 15:31:25 -0700 | [diff] [blame] | 2170 | return; | 
|  | 2171 | } | 
|  | 2172 |  | 
| Andi Kleen | 207d04b | 2011-05-24 17:12:29 -0700 | [diff] [blame] | 2173 | /* Avoid banging the cache line if not needed */ | 
|  | 2174 | if (ra->mmap_miss < MMAP_LOTSAMISS * 10) | 
| Linus Torvalds | ef00e08 | 2009-06-16 15:31:25 -0700 | [diff] [blame] | 2175 | ra->mmap_miss++; | 
|  | 2176 |  | 
|  | 2177 | /* | 
|  | 2178 | * Do we miss much more than hit in this file? If so, | 
|  | 2179 | * stop bothering with read-ahead. It will only hurt. | 
|  | 2180 | */ | 
|  | 2181 | if (ra->mmap_miss > MMAP_LOTSAMISS) | 
|  | 2182 | return; | 
|  | 2183 |  | 
| Wu Fengguang | d30a110 | 2009-06-16 15:31:30 -0700 | [diff] [blame] | 2184 | /* | 
|  | 2185 | * mmap read-around | 
|  | 2186 | */ | 
| Roman Gushchin | 600e19a | 2015-11-05 18:47:08 -0800 | [diff] [blame] | 2187 | ra->start = max_t(long, 0, offset - ra->ra_pages / 2); | 
|  | 2188 | ra->size = ra->ra_pages; | 
|  | 2189 | ra->async_size = ra->ra_pages / 4; | 
| Wu Fengguang | 275b12b | 2011-05-24 17:12:28 -0700 | [diff] [blame] | 2190 | ra_submit(ra, mapping, file); | 
| Linus Torvalds | ef00e08 | 2009-06-16 15:31:25 -0700 | [diff] [blame] | 2191 | } | 
|  | 2192 |  | 
|  | 2193 | /* | 
|  | 2194 | * Asynchronous readahead happens when we find the page and PG_readahead, | 
|  | 2195 | * so we want to possibly extend the readahead further.. | 
|  | 2196 | */ | 
|  | 2197 | static void do_async_mmap_readahead(struct vm_area_struct *vma, | 
|  | 2198 | struct file_ra_state *ra, | 
|  | 2199 | struct file *file, | 
|  | 2200 | struct page *page, | 
|  | 2201 | pgoff_t offset) | 
|  | 2202 | { | 
|  | 2203 | struct address_space *mapping = file->f_mapping; | 
|  | 2204 |  | 
|  | 2205 | /* If we don't want any read-ahead, don't bother */ | 
| Joe Perches | 64363aa | 2013-07-08 16:00:18 -0700 | [diff] [blame] | 2206 | if (vma->vm_flags & VM_RAND_READ) | 
| Linus Torvalds | ef00e08 | 2009-06-16 15:31:25 -0700 | [diff] [blame] | 2207 | return; | 
|  | 2208 | if (ra->mmap_miss > 0) | 
|  | 2209 | ra->mmap_miss--; | 
|  | 2210 | if (PageReadahead(page)) | 
| Wu Fengguang | 2fad6f5 | 2009-06-16 15:31:29 -0700 | [diff] [blame] | 2211 | page_cache_async_readahead(mapping, ra, file, | 
|  | 2212 | page, offset, ra->ra_pages); | 
| Linus Torvalds | ef00e08 | 2009-06-16 15:31:25 -0700 | [diff] [blame] | 2213 | } | 
|  | 2214 |  | 
| Randy Dunlap | 485bb99 | 2006-06-23 02:03:49 -0700 | [diff] [blame] | 2215 | /** | 
| Nick Piggin | 54cb882 | 2007-07-19 01:46:59 -0700 | [diff] [blame] | 2216 | * filemap_fault - read in file data for page fault handling | 
| Nick Piggin | d0217ac | 2007-07-19 01:47:03 -0700 | [diff] [blame] | 2217 | * @vmf:	struct vm_fault containing details of the fault | 
| Randy Dunlap | 485bb99 | 2006-06-23 02:03:49 -0700 | [diff] [blame] | 2218 | * | 
| Nick Piggin | 54cb882 | 2007-07-19 01:46:59 -0700 | [diff] [blame] | 2219 | * filemap_fault() is invoked via the vma operations vector for a | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2220 | * mapped memory region to read in file data during a page fault. | 
|  | 2221 | * | 
|  | 2222 | * The goto's are kind of ugly, but this streamlines the normal case of having | 
|  | 2223 | * it in the page cache, and handles the special cases reasonably without | 
|  | 2224 | * having a lot of duplicated code. | 
| Paul Cassella | 9a95f3c | 2014-08-06 16:07:24 -0700 | [diff] [blame] | 2225 | * | 
|  | 2226 | * vma->vm_mm->mmap_sem must be held on entry. | 
|  | 2227 | * | 
|  | 2228 | * If our return value has VM_FAULT_RETRY set, it's because | 
|  | 2229 | * lock_page_or_retry() returned 0. | 
|  | 2230 | * The mmap_sem has usually been released in this case. | 
|  | 2231 | * See __lock_page_or_retry() for the exception. | 
|  | 2232 | * | 
|  | 2233 | * If our return value does not have VM_FAULT_RETRY set, the mmap_sem | 
|  | 2234 | * has not been released. | 
|  | 2235 | * | 
|  | 2236 | * We never return with VM_FAULT_RETRY and a bit from VM_FAULT_ERROR set. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2237 | */ | 
| Dave Jiang | 11bac80 | 2017-02-24 14:56:41 -0800 | [diff] [blame] | 2238 | int filemap_fault(struct vm_fault *vmf) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2239 | { | 
|  | 2240 | int error; | 
| Dave Jiang | 11bac80 | 2017-02-24 14:56:41 -0800 | [diff] [blame] | 2241 | struct file *file = vmf->vma->vm_file; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2242 | struct address_space *mapping = file->f_mapping; | 
|  | 2243 | struct file_ra_state *ra = &file->f_ra; | 
|  | 2244 | struct inode *inode = mapping->host; | 
| Linus Torvalds | ef00e08 | 2009-06-16 15:31:25 -0700 | [diff] [blame] | 2245 | pgoff_t offset = vmf->pgoff; | 
| Matthew Wilcox | 9ab2594 | 2017-05-03 14:53:29 -0700 | [diff] [blame] | 2246 | pgoff_t max_off; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2247 | struct page *page; | 
| Nick Piggin | 83c5407 | 2007-07-19 01:47:05 -0700 | [diff] [blame] | 2248 | int ret = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2249 |  | 
| Matthew Wilcox | 9ab2594 | 2017-05-03 14:53:29 -0700 | [diff] [blame] | 2250 | max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); | 
|  | 2251 | if (unlikely(offset >= max_off)) | 
| Linus Torvalds | 5307cc1 | 2007-10-31 09:19:46 -0700 | [diff] [blame] | 2252 | return VM_FAULT_SIGBUS; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2253 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2254 | /* | 
| Johannes Weiner | 4942642 | 2013-10-16 13:46:59 -0700 | [diff] [blame] | 2255 | * Do we have something in the page cache already? | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2256 | */ | 
| Linus Torvalds | ef00e08 | 2009-06-16 15:31:25 -0700 | [diff] [blame] | 2257 | page = find_get_page(mapping, offset); | 
| Shaohua Li | 45cac65 | 2012-10-08 16:32:19 -0700 | [diff] [blame] | 2258 | if (likely(page) && !(vmf->flags & FAULT_FLAG_TRIED)) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2259 | /* | 
| Linus Torvalds | ef00e08 | 2009-06-16 15:31:25 -0700 | [diff] [blame] | 2260 | * We found the page, so try async readahead before | 
|  | 2261 | * waiting for the lock. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2262 | */ | 
| Dave Jiang | 11bac80 | 2017-02-24 14:56:41 -0800 | [diff] [blame] | 2263 | do_async_mmap_readahead(vmf->vma, ra, file, page, offset); | 
| Shaohua Li | 45cac65 | 2012-10-08 16:32:19 -0700 | [diff] [blame] | 2264 | } else if (!page) { | 
| Linus Torvalds | ef00e08 | 2009-06-16 15:31:25 -0700 | [diff] [blame] | 2265 | /* No page in the page cache at all */ | 
| Dave Jiang | 11bac80 | 2017-02-24 14:56:41 -0800 | [diff] [blame] | 2266 | do_sync_mmap_readahead(vmf->vma, ra, file, offset); | 
| Linus Torvalds | ef00e08 | 2009-06-16 15:31:25 -0700 | [diff] [blame] | 2267 | count_vm_event(PGMAJFAULT); | 
| Roman Gushchin | 2262185 | 2017-07-06 15:40:25 -0700 | [diff] [blame^] | 2268 | count_memcg_event_mm(vmf->vma->vm_mm, PGMAJFAULT); | 
| Linus Torvalds | ef00e08 | 2009-06-16 15:31:25 -0700 | [diff] [blame] | 2269 | ret = VM_FAULT_MAJOR; | 
|  | 2270 | retry_find: | 
| Michel Lespinasse | b522c94 | 2010-10-26 14:21:56 -0700 | [diff] [blame] | 2271 | page = find_get_page(mapping, offset); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2272 | if (!page) | 
|  | 2273 | goto no_cached_page; | 
|  | 2274 | } | 
|  | 2275 |  | 
| Dave Jiang | 11bac80 | 2017-02-24 14:56:41 -0800 | [diff] [blame] | 2276 | if (!lock_page_or_retry(page, vmf->vma->vm_mm, vmf->flags)) { | 
| Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 2277 | put_page(page); | 
| Michel Lespinasse | d065bd8 | 2010-10-26 14:21:57 -0700 | [diff] [blame] | 2278 | return ret | VM_FAULT_RETRY; | 
| Michel Lespinasse | d88c092 | 2010-11-02 13:05:18 -0700 | [diff] [blame] | 2279 | } | 
| Michel Lespinasse | b522c94 | 2010-10-26 14:21:56 -0700 | [diff] [blame] | 2280 |  | 
|  | 2281 | /* Did it get truncated? */ | 
|  | 2282 | if (unlikely(page->mapping != mapping)) { | 
|  | 2283 | unlock_page(page); | 
|  | 2284 | put_page(page); | 
|  | 2285 | goto retry_find; | 
|  | 2286 | } | 
| Sasha Levin | 309381fea | 2014-01-23 15:52:54 -0800 | [diff] [blame] | 2287 | VM_BUG_ON_PAGE(page->index != offset, page); | 
| Michel Lespinasse | b522c94 | 2010-10-26 14:21:56 -0700 | [diff] [blame] | 2288 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2289 | /* | 
| Nick Piggin | d00806b | 2007-07-19 01:46:57 -0700 | [diff] [blame] | 2290 | * We have a locked page in the page cache, now we need to check | 
|  | 2291 | * that it's up-to-date. If not, it is going to be due to an error. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2292 | */ | 
| Nick Piggin | d00806b | 2007-07-19 01:46:57 -0700 | [diff] [blame] | 2293 | if (unlikely(!PageUptodate(page))) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2294 | goto page_not_uptodate; | 
|  | 2295 |  | 
| Linus Torvalds | ef00e08 | 2009-06-16 15:31:25 -0700 | [diff] [blame] | 2296 | /* | 
|  | 2297 | * Found the page and have a reference on it. | 
|  | 2298 | * We must recheck i_size under page lock. | 
|  | 2299 | */ | 
| Matthew Wilcox | 9ab2594 | 2017-05-03 14:53:29 -0700 | [diff] [blame] | 2300 | max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); | 
|  | 2301 | if (unlikely(offset >= max_off)) { | 
| Nick Piggin | d00806b | 2007-07-19 01:46:57 -0700 | [diff] [blame] | 2302 | unlock_page(page); | 
| Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 2303 | put_page(page); | 
| Linus Torvalds | 5307cc1 | 2007-10-31 09:19:46 -0700 | [diff] [blame] | 2304 | return VM_FAULT_SIGBUS; | 
| Nick Piggin | d00806b | 2007-07-19 01:46:57 -0700 | [diff] [blame] | 2305 | } | 
|  | 2306 |  | 
| Nick Piggin | d0217ac | 2007-07-19 01:47:03 -0700 | [diff] [blame] | 2307 | vmf->page = page; | 
| Nick Piggin | 83c5407 | 2007-07-19 01:47:05 -0700 | [diff] [blame] | 2308 | return ret | VM_FAULT_LOCKED; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2309 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2310 | no_cached_page: | 
|  | 2311 | /* | 
|  | 2312 | * We're only likely to ever get here if MADV_RANDOM is in | 
|  | 2313 | * effect. | 
|  | 2314 | */ | 
| Michal Hocko | c20cd45 | 2016-01-14 15:20:12 -0800 | [diff] [blame] | 2315 | error = page_cache_read(file, offset, vmf->gfp_mask); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2316 |  | 
|  | 2317 | /* | 
|  | 2318 | * The page we want has now been added to the page cache. | 
|  | 2319 | * In the unlikely event that someone removed it in the | 
|  | 2320 | * meantime, we'll just come back here and read it again. | 
|  | 2321 | */ | 
|  | 2322 | if (error >= 0) | 
|  | 2323 | goto retry_find; | 
|  | 2324 |  | 
|  | 2325 | /* | 
|  | 2326 | * An error return from page_cache_read can result if the | 
|  | 2327 | * system is low on memory, or a problem occurs while trying | 
|  | 2328 | * to schedule I/O. | 
|  | 2329 | */ | 
|  | 2330 | if (error == -ENOMEM) | 
| Nick Piggin | d0217ac | 2007-07-19 01:47:03 -0700 | [diff] [blame] | 2331 | return VM_FAULT_OOM; | 
|  | 2332 | return VM_FAULT_SIGBUS; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2333 |  | 
|  | 2334 | page_not_uptodate: | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2335 | /* | 
|  | 2336 | * Umm, take care of errors if the page isn't up-to-date. | 
|  | 2337 | * Try to re-read it _once_. We do this synchronously, | 
|  | 2338 | * because there really aren't any performance issues here | 
|  | 2339 | * and we need to check for errors. | 
|  | 2340 | */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2341 | ClearPageError(page); | 
| Zach Brown | 994fc28c | 2005-12-15 14:28:17 -0800 | [diff] [blame] | 2342 | error = mapping->a_ops->readpage(file, page); | 
| Miklos Szeredi | 3ef0f72 | 2008-05-14 16:05:37 -0700 | [diff] [blame] | 2343 | if (!error) { | 
|  | 2344 | wait_on_page_locked(page); | 
|  | 2345 | if (!PageUptodate(page)) | 
|  | 2346 | error = -EIO; | 
|  | 2347 | } | 
| Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 2348 | put_page(page); | 
| Nick Piggin | d00806b | 2007-07-19 01:46:57 -0700 | [diff] [blame] | 2349 |  | 
|  | 2350 | if (!error || error == AOP_TRUNCATED_PAGE) | 
|  | 2351 | goto retry_find; | 
|  | 2352 |  | 
|  | 2353 | /* Things didn't work out. Return zero to tell the mm layer so. */ | 
|  | 2354 | shrink_readahead_size_eio(file, ra); | 
| Nick Piggin | d0217ac | 2007-07-19 01:47:03 -0700 | [diff] [blame] | 2355 | return VM_FAULT_SIGBUS; | 
| Nick Piggin | 54cb882 | 2007-07-19 01:46:59 -0700 | [diff] [blame] | 2356 | } | 
|  | 2357 | EXPORT_SYMBOL(filemap_fault); | 
|  | 2358 |  | 
| Jan Kara | 82b0f8c | 2016-12-14 15:06:58 -0800 | [diff] [blame] | 2359 | void filemap_map_pages(struct vm_fault *vmf, | 
| Kirill A. Shutemov | bae473a | 2016-07-26 15:25:20 -0700 | [diff] [blame] | 2360 | pgoff_t start_pgoff, pgoff_t end_pgoff) | 
| Kirill A. Shutemov | f182036 | 2014-04-07 15:37:19 -0700 | [diff] [blame] | 2361 | { | 
|  | 2362 | struct radix_tree_iter iter; | 
|  | 2363 | void **slot; | 
| Jan Kara | 82b0f8c | 2016-12-14 15:06:58 -0800 | [diff] [blame] | 2364 | struct file *file = vmf->vma->vm_file; | 
| Kirill A. Shutemov | f182036 | 2014-04-07 15:37:19 -0700 | [diff] [blame] | 2365 | struct address_space *mapping = file->f_mapping; | 
| Kirill A. Shutemov | bae473a | 2016-07-26 15:25:20 -0700 | [diff] [blame] | 2366 | pgoff_t last_pgoff = start_pgoff; | 
| Matthew Wilcox | 9ab2594 | 2017-05-03 14:53:29 -0700 | [diff] [blame] | 2367 | unsigned long max_idx; | 
| Kirill A. Shutemov | 8392937 | 2016-07-26 15:26:04 -0700 | [diff] [blame] | 2368 | struct page *head, *page; | 
| Kirill A. Shutemov | f182036 | 2014-04-07 15:37:19 -0700 | [diff] [blame] | 2369 |  | 
|  | 2370 | rcu_read_lock(); | 
| Kirill A. Shutemov | bae473a | 2016-07-26 15:25:20 -0700 | [diff] [blame] | 2371 | radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, | 
|  | 2372 | start_pgoff) { | 
|  | 2373 | if (iter.index > end_pgoff) | 
| Kirill A. Shutemov | f182036 | 2014-04-07 15:37:19 -0700 | [diff] [blame] | 2374 | break; | 
|  | 2375 | repeat: | 
|  | 2376 | page = radix_tree_deref_slot(slot); | 
|  | 2377 | if (unlikely(!page)) | 
|  | 2378 | goto next; | 
|  | 2379 | if (radix_tree_exception(page)) { | 
| Matthew Wilcox | 2cf938a | 2016-03-17 14:22:03 -0700 | [diff] [blame] | 2380 | if (radix_tree_deref_retry(page)) { | 
|  | 2381 | slot = radix_tree_iter_retry(&iter); | 
|  | 2382 | continue; | 
|  | 2383 | } | 
|  | 2384 | goto next; | 
| Kirill A. Shutemov | f182036 | 2014-04-07 15:37:19 -0700 | [diff] [blame] | 2385 | } | 
|  | 2386 |  | 
| Kirill A. Shutemov | 8392937 | 2016-07-26 15:26:04 -0700 | [diff] [blame] | 2387 | head = compound_head(page); | 
|  | 2388 | if (!page_cache_get_speculative(head)) | 
| Kirill A. Shutemov | f182036 | 2014-04-07 15:37:19 -0700 | [diff] [blame] | 2389 | goto repeat; | 
|  | 2390 |  | 
| Kirill A. Shutemov | 8392937 | 2016-07-26 15:26:04 -0700 | [diff] [blame] | 2391 | /* The page was split under us? */ | 
|  | 2392 | if (compound_head(page) != head) { | 
|  | 2393 | put_page(head); | 
|  | 2394 | goto repeat; | 
|  | 2395 | } | 
|  | 2396 |  | 
| Kirill A. Shutemov | f182036 | 2014-04-07 15:37:19 -0700 | [diff] [blame] | 2397 | /* Has the page moved? */ | 
|  | 2398 | if (unlikely(page != *slot)) { | 
| Kirill A. Shutemov | 8392937 | 2016-07-26 15:26:04 -0700 | [diff] [blame] | 2399 | put_page(head); | 
| Kirill A. Shutemov | f182036 | 2014-04-07 15:37:19 -0700 | [diff] [blame] | 2400 | goto repeat; | 
|  | 2401 | } | 
|  | 2402 |  | 
|  | 2403 | if (!PageUptodate(page) || | 
|  | 2404 | PageReadahead(page) || | 
|  | 2405 | PageHWPoison(page)) | 
|  | 2406 | goto skip; | 
|  | 2407 | if (!trylock_page(page)) | 
|  | 2408 | goto skip; | 
|  | 2409 |  | 
|  | 2410 | if (page->mapping != mapping || !PageUptodate(page)) | 
|  | 2411 | goto unlock; | 
|  | 2412 |  | 
| Matthew Wilcox | 9ab2594 | 2017-05-03 14:53:29 -0700 | [diff] [blame] | 2413 | max_idx = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE); | 
|  | 2414 | if (page->index >= max_idx) | 
| Kirill A. Shutemov | f182036 | 2014-04-07 15:37:19 -0700 | [diff] [blame] | 2415 | goto unlock; | 
|  | 2416 |  | 
| Kirill A. Shutemov | f182036 | 2014-04-07 15:37:19 -0700 | [diff] [blame] | 2417 | if (file->f_ra.mmap_miss > 0) | 
|  | 2418 | file->f_ra.mmap_miss--; | 
| Kirill A. Shutemov | 7267ec00 | 2016-07-26 15:25:23 -0700 | [diff] [blame] | 2419 |  | 
| Jan Kara | 82b0f8c | 2016-12-14 15:06:58 -0800 | [diff] [blame] | 2420 | vmf->address += (iter.index - last_pgoff) << PAGE_SHIFT; | 
|  | 2421 | if (vmf->pte) | 
|  | 2422 | vmf->pte += iter.index - last_pgoff; | 
| Kirill A. Shutemov | 7267ec00 | 2016-07-26 15:25:23 -0700 | [diff] [blame] | 2423 | last_pgoff = iter.index; | 
| Jan Kara | 82b0f8c | 2016-12-14 15:06:58 -0800 | [diff] [blame] | 2424 | if (alloc_set_pte(vmf, NULL, page)) | 
| Kirill A. Shutemov | 7267ec00 | 2016-07-26 15:25:23 -0700 | [diff] [blame] | 2425 | goto unlock; | 
| Kirill A. Shutemov | f182036 | 2014-04-07 15:37:19 -0700 | [diff] [blame] | 2426 | unlock_page(page); | 
|  | 2427 | goto next; | 
|  | 2428 | unlock: | 
|  | 2429 | unlock_page(page); | 
|  | 2430 | skip: | 
| Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 2431 | put_page(page); | 
| Kirill A. Shutemov | f182036 | 2014-04-07 15:37:19 -0700 | [diff] [blame] | 2432 | next: | 
| Kirill A. Shutemov | 7267ec00 | 2016-07-26 15:25:23 -0700 | [diff] [blame] | 2433 | /* Huge page is mapped? No need to proceed. */ | 
| Jan Kara | 82b0f8c | 2016-12-14 15:06:58 -0800 | [diff] [blame] | 2434 | if (pmd_trans_huge(*vmf->pmd)) | 
| Kirill A. Shutemov | 7267ec00 | 2016-07-26 15:25:23 -0700 | [diff] [blame] | 2435 | break; | 
| Kirill A. Shutemov | bae473a | 2016-07-26 15:25:20 -0700 | [diff] [blame] | 2436 | if (iter.index == end_pgoff) | 
| Kirill A. Shutemov | f182036 | 2014-04-07 15:37:19 -0700 | [diff] [blame] | 2437 | break; | 
|  | 2438 | } | 
|  | 2439 | rcu_read_unlock(); | 
|  | 2440 | } | 
|  | 2441 | EXPORT_SYMBOL(filemap_map_pages); | 
|  | 2442 |  | 
| Dave Jiang | 11bac80 | 2017-02-24 14:56:41 -0800 | [diff] [blame] | 2443 | int filemap_page_mkwrite(struct vm_fault *vmf) | 
| Jan Kara | 4fcf1c6 | 2012-06-12 16:20:29 +0200 | [diff] [blame] | 2444 | { | 
|  | 2445 | struct page *page = vmf->page; | 
| Dave Jiang | 11bac80 | 2017-02-24 14:56:41 -0800 | [diff] [blame] | 2446 | struct inode *inode = file_inode(vmf->vma->vm_file); | 
| Jan Kara | 4fcf1c6 | 2012-06-12 16:20:29 +0200 | [diff] [blame] | 2447 | int ret = VM_FAULT_LOCKED; | 
|  | 2448 |  | 
| Jan Kara | 14da920 | 2012-06-12 16:20:37 +0200 | [diff] [blame] | 2449 | sb_start_pagefault(inode->i_sb); | 
| Dave Jiang | 11bac80 | 2017-02-24 14:56:41 -0800 | [diff] [blame] | 2450 | file_update_time(vmf->vma->vm_file); | 
| Jan Kara | 4fcf1c6 | 2012-06-12 16:20:29 +0200 | [diff] [blame] | 2451 | lock_page(page); | 
|  | 2452 | if (page->mapping != inode->i_mapping) { | 
|  | 2453 | unlock_page(page); | 
|  | 2454 | ret = VM_FAULT_NOPAGE; | 
|  | 2455 | goto out; | 
|  | 2456 | } | 
| Jan Kara | 14da920 | 2012-06-12 16:20:37 +0200 | [diff] [blame] | 2457 | /* | 
|  | 2458 | * We mark the page dirty already here so that when freeze is in | 
|  | 2459 | * progress, we are guaranteed that writeback during freezing will | 
|  | 2460 | * see the dirty page and writeprotect it again. | 
|  | 2461 | */ | 
|  | 2462 | set_page_dirty(page); | 
| Darrick J. Wong | 1d1d1a7 | 2013-02-21 16:42:51 -0800 | [diff] [blame] | 2463 | wait_for_stable_page(page); | 
| Jan Kara | 4fcf1c6 | 2012-06-12 16:20:29 +0200 | [diff] [blame] | 2464 | out: | 
| Jan Kara | 14da920 | 2012-06-12 16:20:37 +0200 | [diff] [blame] | 2465 | sb_end_pagefault(inode->i_sb); | 
| Jan Kara | 4fcf1c6 | 2012-06-12 16:20:29 +0200 | [diff] [blame] | 2466 | return ret; | 
|  | 2467 | } | 
|  | 2468 | EXPORT_SYMBOL(filemap_page_mkwrite); | 
|  | 2469 |  | 
| Alexey Dobriyan | f0f37e2 | 2009-09-27 22:29:37 +0400 | [diff] [blame] | 2470 | const struct vm_operations_struct generic_file_vm_ops = { | 
| Nick Piggin | 54cb882 | 2007-07-19 01:46:59 -0700 | [diff] [blame] | 2471 | .fault		= filemap_fault, | 
| Kirill A. Shutemov | f182036 | 2014-04-07 15:37:19 -0700 | [diff] [blame] | 2472 | .map_pages	= filemap_map_pages, | 
| Jan Kara | 4fcf1c6 | 2012-06-12 16:20:29 +0200 | [diff] [blame] | 2473 | .page_mkwrite	= filemap_page_mkwrite, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2474 | }; | 
|  | 2475 |  | 
|  | 2476 | /* This is used for a general mmap of a disk file */ | 
|  | 2477 |  | 
|  | 2478 | int generic_file_mmap(struct file * file, struct vm_area_struct * vma) | 
|  | 2479 | { | 
|  | 2480 | struct address_space *mapping = file->f_mapping; | 
|  | 2481 |  | 
|  | 2482 | if (!mapping->a_ops->readpage) | 
|  | 2483 | return -ENOEXEC; | 
|  | 2484 | file_accessed(file); | 
|  | 2485 | vma->vm_ops = &generic_file_vm_ops; | 
|  | 2486 | return 0; | 
|  | 2487 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2488 |  | 
|  | 2489 | /* | 
|  | 2490 | * This is for filesystems which do not implement ->writepage. | 
|  | 2491 | */ | 
|  | 2492 | int generic_file_readonly_mmap(struct file *file, struct vm_area_struct *vma) | 
|  | 2493 | { | 
|  | 2494 | if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE)) | 
|  | 2495 | return -EINVAL; | 
|  | 2496 | return generic_file_mmap(file, vma); | 
|  | 2497 | } | 
|  | 2498 | #else | 
|  | 2499 | int generic_file_mmap(struct file * file, struct vm_area_struct * vma) | 
|  | 2500 | { | 
|  | 2501 | return -ENOSYS; | 
|  | 2502 | } | 
|  | 2503 | int generic_file_readonly_mmap(struct file * file, struct vm_area_struct * vma) | 
|  | 2504 | { | 
|  | 2505 | return -ENOSYS; | 
|  | 2506 | } | 
|  | 2507 | #endif /* CONFIG_MMU */ | 
|  | 2508 |  | 
|  | 2509 | EXPORT_SYMBOL(generic_file_mmap); | 
|  | 2510 | EXPORT_SYMBOL(generic_file_readonly_mmap); | 
|  | 2511 |  | 
| Sasha Levin | 67f9fd9 | 2014-04-03 14:48:18 -0700 | [diff] [blame] | 2512 | static struct page *wait_on_page_read(struct page *page) | 
|  | 2513 | { | 
|  | 2514 | if (!IS_ERR(page)) { | 
|  | 2515 | wait_on_page_locked(page); | 
|  | 2516 | if (!PageUptodate(page)) { | 
| Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 2517 | put_page(page); | 
| Sasha Levin | 67f9fd9 | 2014-04-03 14:48:18 -0700 | [diff] [blame] | 2518 | page = ERR_PTR(-EIO); | 
|  | 2519 | } | 
|  | 2520 | } | 
|  | 2521 | return page; | 
|  | 2522 | } | 
|  | 2523 |  | 
| Mel Gorman | 32b6352 | 2016-03-15 14:55:36 -0700 | [diff] [blame] | 2524 | static struct page *do_read_cache_page(struct address_space *mapping, | 
| Fengguang Wu | 57f6b96 | 2007-10-16 01:24:37 -0700 | [diff] [blame] | 2525 | pgoff_t index, | 
| Hugh Dickins | 5e5358e | 2011-07-25 17:12:23 -0700 | [diff] [blame] | 2526 | int (*filler)(void *, struct page *), | 
| Linus Torvalds | 0531b2a | 2010-01-27 09:20:03 -0800 | [diff] [blame] | 2527 | void *data, | 
|  | 2528 | gfp_t gfp) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2529 | { | 
| Nick Piggin | eb2be18 | 2007-10-16 01:24:57 -0700 | [diff] [blame] | 2530 | struct page *page; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2531 | int err; | 
|  | 2532 | repeat: | 
|  | 2533 | page = find_get_page(mapping, index); | 
|  | 2534 | if (!page) { | 
| Linus Torvalds | 0531b2a | 2010-01-27 09:20:03 -0800 | [diff] [blame] | 2535 | page = __page_cache_alloc(gfp | __GFP_COLD); | 
| Nick Piggin | eb2be18 | 2007-10-16 01:24:57 -0700 | [diff] [blame] | 2536 | if (!page) | 
|  | 2537 | return ERR_PTR(-ENOMEM); | 
| Dave Kleikamp | e6f67b8 | 2011-12-21 11:05:48 -0600 | [diff] [blame] | 2538 | err = add_to_page_cache_lru(page, mapping, index, gfp); | 
| Nick Piggin | eb2be18 | 2007-10-16 01:24:57 -0700 | [diff] [blame] | 2539 | if (unlikely(err)) { | 
| Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 2540 | put_page(page); | 
| Nick Piggin | eb2be18 | 2007-10-16 01:24:57 -0700 | [diff] [blame] | 2541 | if (err == -EEXIST) | 
|  | 2542 | goto repeat; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2543 | /* Presumably ENOMEM for radix tree node */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2544 | return ERR_PTR(err); | 
|  | 2545 | } | 
| Mel Gorman | 32b6352 | 2016-03-15 14:55:36 -0700 | [diff] [blame] | 2546 |  | 
|  | 2547 | filler: | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2548 | err = filler(data, page); | 
|  | 2549 | if (err < 0) { | 
| Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 2550 | put_page(page); | 
| Mel Gorman | 32b6352 | 2016-03-15 14:55:36 -0700 | [diff] [blame] | 2551 | return ERR_PTR(err); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2552 | } | 
| Mel Gorman | 32b6352 | 2016-03-15 14:55:36 -0700 | [diff] [blame] | 2553 |  | 
|  | 2554 | page = wait_on_page_read(page); | 
|  | 2555 | if (IS_ERR(page)) | 
|  | 2556 | return page; | 
|  | 2557 | goto out; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2558 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2559 | if (PageUptodate(page)) | 
|  | 2560 | goto out; | 
|  | 2561 |  | 
| Mel Gorman | ebded02 | 2016-03-15 14:55:39 -0700 | [diff] [blame] | 2562 | /* | 
|  | 2563 | * Page is not up to date and may be locked due one of the following | 
|  | 2564 | * case a: Page is being filled and the page lock is held | 
|  | 2565 | * case b: Read/write error clearing the page uptodate status | 
|  | 2566 | * case c: Truncation in progress (page locked) | 
|  | 2567 | * case d: Reclaim in progress | 
|  | 2568 | * | 
|  | 2569 | * Case a, the page will be up to date when the page is unlocked. | 
|  | 2570 | *    There is no need to serialise on the page lock here as the page | 
|  | 2571 | *    is pinned so the lock gives no additional protection. Even if the | 
|  | 2572 | *    the page is truncated, the data is still valid if PageUptodate as | 
|  | 2573 | *    it's a race vs truncate race. | 
|  | 2574 | * Case b, the page will not be up to date | 
|  | 2575 | * Case c, the page may be truncated but in itself, the data may still | 
|  | 2576 | *    be valid after IO completes as it's a read vs truncate race. The | 
|  | 2577 | *    operation must restart if the page is not uptodate on unlock but | 
|  | 2578 | *    otherwise serialising on page lock to stabilise the mapping gives | 
|  | 2579 | *    no additional guarantees to the caller as the page lock is | 
|  | 2580 | *    released before return. | 
|  | 2581 | * Case d, similar to truncation. If reclaim holds the page lock, it | 
|  | 2582 | *    will be a race with remove_mapping that determines if the mapping | 
|  | 2583 | *    is valid on unlock but otherwise the data is valid and there is | 
|  | 2584 | *    no need to serialise with page lock. | 
|  | 2585 | * | 
|  | 2586 | * As the page lock gives no additional guarantee, we optimistically | 
|  | 2587 | * wait on the page to be unlocked and check if it's up to date and | 
|  | 2588 | * use the page if it is. Otherwise, the page lock is required to | 
|  | 2589 | * distinguish between the different cases. The motivation is that we | 
|  | 2590 | * avoid spurious serialisations and wakeups when multiple processes | 
|  | 2591 | * wait on the same page for IO to complete. | 
|  | 2592 | */ | 
|  | 2593 | wait_on_page_locked(page); | 
|  | 2594 | if (PageUptodate(page)) | 
|  | 2595 | goto out; | 
|  | 2596 |  | 
|  | 2597 | /* Distinguish between all the cases under the safety of the lock */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2598 | lock_page(page); | 
| Mel Gorman | ebded02 | 2016-03-15 14:55:39 -0700 | [diff] [blame] | 2599 |  | 
|  | 2600 | /* Case c or d, restart the operation */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2601 | if (!page->mapping) { | 
|  | 2602 | unlock_page(page); | 
| Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 2603 | put_page(page); | 
| Mel Gorman | 32b6352 | 2016-03-15 14:55:36 -0700 | [diff] [blame] | 2604 | goto repeat; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2605 | } | 
| Mel Gorman | ebded02 | 2016-03-15 14:55:39 -0700 | [diff] [blame] | 2606 |  | 
|  | 2607 | /* Someone else locked and filled the page in a very small window */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2608 | if (PageUptodate(page)) { | 
|  | 2609 | unlock_page(page); | 
|  | 2610 | goto out; | 
|  | 2611 | } | 
| Mel Gorman | 32b6352 | 2016-03-15 14:55:36 -0700 | [diff] [blame] | 2612 | goto filler; | 
|  | 2613 |  | 
| David Howells | c855ff3 | 2007-05-09 13:42:20 +0100 | [diff] [blame] | 2614 | out: | 
| Nick Piggin | 6fe6900 | 2007-05-06 14:49:04 -0700 | [diff] [blame] | 2615 | mark_page_accessed(page); | 
|  | 2616 | return page; | 
|  | 2617 | } | 
| Linus Torvalds | 0531b2a | 2010-01-27 09:20:03 -0800 | [diff] [blame] | 2618 |  | 
|  | 2619 | /** | 
| Sasha Levin | 67f9fd9 | 2014-04-03 14:48:18 -0700 | [diff] [blame] | 2620 | * read_cache_page - read into page cache, fill it if needed | 
| Linus Torvalds | 0531b2a | 2010-01-27 09:20:03 -0800 | [diff] [blame] | 2621 | * @mapping:	the page's address_space | 
|  | 2622 | * @index:	the page index | 
|  | 2623 | * @filler:	function to perform the read | 
| Hugh Dickins | 5e5358e | 2011-07-25 17:12:23 -0700 | [diff] [blame] | 2624 | * @data:	first arg to filler(data, page) function, often left as NULL | 
| Linus Torvalds | 0531b2a | 2010-01-27 09:20:03 -0800 | [diff] [blame] | 2625 | * | 
| Linus Torvalds | 0531b2a | 2010-01-27 09:20:03 -0800 | [diff] [blame] | 2626 | * Read into the page cache. If a page already exists, and PageUptodate() is | 
| Sasha Levin | 67f9fd9 | 2014-04-03 14:48:18 -0700 | [diff] [blame] | 2627 | * not set, try to fill the page and wait for it to become unlocked. | 
| Linus Torvalds | 0531b2a | 2010-01-27 09:20:03 -0800 | [diff] [blame] | 2628 | * | 
|  | 2629 | * If the page does not get brought uptodate, return -EIO. | 
|  | 2630 | */ | 
| Sasha Levin | 67f9fd9 | 2014-04-03 14:48:18 -0700 | [diff] [blame] | 2631 | struct page *read_cache_page(struct address_space *mapping, | 
| Linus Torvalds | 0531b2a | 2010-01-27 09:20:03 -0800 | [diff] [blame] | 2632 | pgoff_t index, | 
| Hugh Dickins | 5e5358e | 2011-07-25 17:12:23 -0700 | [diff] [blame] | 2633 | int (*filler)(void *, struct page *), | 
| Linus Torvalds | 0531b2a | 2010-01-27 09:20:03 -0800 | [diff] [blame] | 2634 | void *data) | 
|  | 2635 | { | 
|  | 2636 | return do_read_cache_page(mapping, index, filler, data, mapping_gfp_mask(mapping)); | 
|  | 2637 | } | 
| Sasha Levin | 67f9fd9 | 2014-04-03 14:48:18 -0700 | [diff] [blame] | 2638 | EXPORT_SYMBOL(read_cache_page); | 
| Linus Torvalds | 0531b2a | 2010-01-27 09:20:03 -0800 | [diff] [blame] | 2639 |  | 
|  | 2640 | /** | 
|  | 2641 | * read_cache_page_gfp - read into page cache, using specified page allocation flags. | 
|  | 2642 | * @mapping:	the page's address_space | 
|  | 2643 | * @index:	the page index | 
|  | 2644 | * @gfp:	the page allocator flags to use if allocating | 
|  | 2645 | * | 
|  | 2646 | * This is the same as "read_mapping_page(mapping, index, NULL)", but with | 
| Dave Kleikamp | e6f67b8 | 2011-12-21 11:05:48 -0600 | [diff] [blame] | 2647 | * any new page allocations done using the specified allocation flags. | 
| Linus Torvalds | 0531b2a | 2010-01-27 09:20:03 -0800 | [diff] [blame] | 2648 | * | 
|  | 2649 | * If the page does not get brought uptodate, return -EIO. | 
|  | 2650 | */ | 
|  | 2651 | struct page *read_cache_page_gfp(struct address_space *mapping, | 
|  | 2652 | pgoff_t index, | 
|  | 2653 | gfp_t gfp) | 
|  | 2654 | { | 
|  | 2655 | filler_t *filler = (filler_t *)mapping->a_ops->readpage; | 
|  | 2656 |  | 
| Sasha Levin | 67f9fd9 | 2014-04-03 14:48:18 -0700 | [diff] [blame] | 2657 | return do_read_cache_page(mapping, index, filler, NULL, gfp); | 
| Linus Torvalds | 0531b2a | 2010-01-27 09:20:03 -0800 | [diff] [blame] | 2658 | } | 
|  | 2659 | EXPORT_SYMBOL(read_cache_page_gfp); | 
|  | 2660 |  | 
| Nick Piggin | 2f718ff | 2007-10-16 01:24:59 -0700 | [diff] [blame] | 2661 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2662 | * Performs necessary checks before doing a write | 
|  | 2663 | * | 
| Randy Dunlap | 485bb99 | 2006-06-23 02:03:49 -0700 | [diff] [blame] | 2664 | * Can adjust writing position or amount of bytes to write. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2665 | * Returns appropriate error code that caller should return or | 
|  | 2666 | * zero in case that write should be allowed. | 
|  | 2667 | */ | 
| Al Viro | 3309dd0 | 2015-04-09 12:55:47 -0400 | [diff] [blame] | 2668 | inline ssize_t generic_write_checks(struct kiocb *iocb, struct iov_iter *from) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2669 | { | 
| Al Viro | 3309dd0 | 2015-04-09 12:55:47 -0400 | [diff] [blame] | 2670 | struct file *file = iocb->ki_filp; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2671 | struct inode *inode = file->f_mapping->host; | 
| Jiri Slaby | 59e99e5 | 2010-03-05 13:41:44 -0800 | [diff] [blame] | 2672 | unsigned long limit = rlimit(RLIMIT_FSIZE); | 
| Al Viro | 3309dd0 | 2015-04-09 12:55:47 -0400 | [diff] [blame] | 2673 | loff_t pos; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2674 |  | 
| Al Viro | 3309dd0 | 2015-04-09 12:55:47 -0400 | [diff] [blame] | 2675 | if (!iov_iter_count(from)) | 
|  | 2676 | return 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2677 |  | 
| Al Viro | 0fa6b00 | 2015-04-04 04:05:48 -0400 | [diff] [blame] | 2678 | /* FIXME: this is for backwards compatibility with 2.4 */ | 
| Al Viro | 2ba48ce | 2015-04-09 13:52:01 -0400 | [diff] [blame] | 2679 | if (iocb->ki_flags & IOCB_APPEND) | 
| Al Viro | 3309dd0 | 2015-04-09 12:55:47 -0400 | [diff] [blame] | 2680 | iocb->ki_pos = i_size_read(inode); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2681 |  | 
| Al Viro | 3309dd0 | 2015-04-09 12:55:47 -0400 | [diff] [blame] | 2682 | pos = iocb->ki_pos; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2683 |  | 
| Goldwyn Rodrigues | 6be96d3 | 2017-06-20 07:05:44 -0500 | [diff] [blame] | 2684 | if ((iocb->ki_flags & IOCB_NOWAIT) && !(iocb->ki_flags & IOCB_DIRECT)) | 
|  | 2685 | return -EINVAL; | 
|  | 2686 |  | 
| Al Viro | 0fa6b00 | 2015-04-04 04:05:48 -0400 | [diff] [blame] | 2687 | if (limit != RLIM_INFINITY) { | 
| Al Viro | 3309dd0 | 2015-04-09 12:55:47 -0400 | [diff] [blame] | 2688 | if (iocb->ki_pos >= limit) { | 
| Al Viro | 0fa6b00 | 2015-04-04 04:05:48 -0400 | [diff] [blame] | 2689 | send_sig(SIGXFSZ, current, 0); | 
|  | 2690 | return -EFBIG; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2691 | } | 
| Al Viro | 3309dd0 | 2015-04-09 12:55:47 -0400 | [diff] [blame] | 2692 | iov_iter_truncate(from, limit - (unsigned long)pos); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2693 | } | 
|  | 2694 |  | 
|  | 2695 | /* | 
|  | 2696 | * LFS rule | 
|  | 2697 | */ | 
| Al Viro | 3309dd0 | 2015-04-09 12:55:47 -0400 | [diff] [blame] | 2698 | if (unlikely(pos + iov_iter_count(from) > MAX_NON_LFS && | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2699 | !(file->f_flags & O_LARGEFILE))) { | 
| Al Viro | 3309dd0 | 2015-04-09 12:55:47 -0400 | [diff] [blame] | 2700 | if (pos >= MAX_NON_LFS) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2701 | return -EFBIG; | 
| Al Viro | 3309dd0 | 2015-04-09 12:55:47 -0400 | [diff] [blame] | 2702 | iov_iter_truncate(from, MAX_NON_LFS - (unsigned long)pos); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2703 | } | 
|  | 2704 |  | 
|  | 2705 | /* | 
|  | 2706 | * Are we about to exceed the fs block limit ? | 
|  | 2707 | * | 
|  | 2708 | * If we have written data it becomes a short write.  If we have | 
|  | 2709 | * exceeded without writing data we send a signal and return EFBIG. | 
|  | 2710 | * Linus frestrict idea will clean these up nicely.. | 
|  | 2711 | */ | 
| Al Viro | 3309dd0 | 2015-04-09 12:55:47 -0400 | [diff] [blame] | 2712 | if (unlikely(pos >= inode->i_sb->s_maxbytes)) | 
|  | 2713 | return -EFBIG; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2714 |  | 
| Al Viro | 3309dd0 | 2015-04-09 12:55:47 -0400 | [diff] [blame] | 2715 | iov_iter_truncate(from, inode->i_sb->s_maxbytes - pos); | 
|  | 2716 | return iov_iter_count(from); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2717 | } | 
|  | 2718 | EXPORT_SYMBOL(generic_write_checks); | 
|  | 2719 |  | 
| Nick Piggin | afddba4 | 2007-10-16 01:25:01 -0700 | [diff] [blame] | 2720 | int pagecache_write_begin(struct file *file, struct address_space *mapping, | 
|  | 2721 | loff_t pos, unsigned len, unsigned flags, | 
|  | 2722 | struct page **pagep, void **fsdata) | 
|  | 2723 | { | 
|  | 2724 | const struct address_space_operations *aops = mapping->a_ops; | 
|  | 2725 |  | 
| Nick Piggin | 4e02ed4 | 2008-10-29 14:00:55 -0700 | [diff] [blame] | 2726 | return aops->write_begin(file, mapping, pos, len, flags, | 
| Nick Piggin | afddba4 | 2007-10-16 01:25:01 -0700 | [diff] [blame] | 2727 | pagep, fsdata); | 
| Nick Piggin | afddba4 | 2007-10-16 01:25:01 -0700 | [diff] [blame] | 2728 | } | 
|  | 2729 | EXPORT_SYMBOL(pagecache_write_begin); | 
|  | 2730 |  | 
|  | 2731 | int pagecache_write_end(struct file *file, struct address_space *mapping, | 
|  | 2732 | loff_t pos, unsigned len, unsigned copied, | 
|  | 2733 | struct page *page, void *fsdata) | 
|  | 2734 | { | 
|  | 2735 | const struct address_space_operations *aops = mapping->a_ops; | 
| Nick Piggin | afddba4 | 2007-10-16 01:25:01 -0700 | [diff] [blame] | 2736 |  | 
| Nick Piggin | 4e02ed4 | 2008-10-29 14:00:55 -0700 | [diff] [blame] | 2737 | return aops->write_end(file, mapping, pos, len, copied, page, fsdata); | 
| Nick Piggin | afddba4 | 2007-10-16 01:25:01 -0700 | [diff] [blame] | 2738 | } | 
|  | 2739 | EXPORT_SYMBOL(pagecache_write_end); | 
|  | 2740 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2741 | ssize_t | 
| Christoph Hellwig | 1af5bb4 | 2016-04-07 08:51:56 -0700 | [diff] [blame] | 2742 | generic_file_direct_write(struct kiocb *iocb, struct iov_iter *from) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2743 | { | 
|  | 2744 | struct file	*file = iocb->ki_filp; | 
|  | 2745 | struct address_space *mapping = file->f_mapping; | 
|  | 2746 | struct inode	*inode = mapping->host; | 
| Christoph Hellwig | 1af5bb4 | 2016-04-07 08:51:56 -0700 | [diff] [blame] | 2747 | loff_t		pos = iocb->ki_pos; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2748 | ssize_t		written; | 
| Christoph Hellwig | a969e90 | 2008-07-23 21:27:04 -0700 | [diff] [blame] | 2749 | size_t		write_len; | 
|  | 2750 | pgoff_t		end; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2751 |  | 
| Al Viro | 0c94933 | 2014-03-22 06:51:37 -0400 | [diff] [blame] | 2752 | write_len = iov_iter_count(from); | 
| Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 2753 | end = (pos + write_len - 1) >> PAGE_SHIFT; | 
| Christoph Hellwig | a969e90 | 2008-07-23 21:27:04 -0700 | [diff] [blame] | 2754 |  | 
| Goldwyn Rodrigues | 6be96d3 | 2017-06-20 07:05:44 -0500 | [diff] [blame] | 2755 | if (iocb->ki_flags & IOCB_NOWAIT) { | 
|  | 2756 | /* If there are pages to writeback, return */ | 
|  | 2757 | if (filemap_range_has_page(inode->i_mapping, pos, | 
|  | 2758 | pos + iov_iter_count(from))) | 
|  | 2759 | return -EAGAIN; | 
|  | 2760 | } else { | 
|  | 2761 | written = filemap_write_and_wait_range(mapping, pos, | 
|  | 2762 | pos + write_len - 1); | 
|  | 2763 | if (written) | 
|  | 2764 | goto out; | 
|  | 2765 | } | 
| Christoph Hellwig | a969e90 | 2008-07-23 21:27:04 -0700 | [diff] [blame] | 2766 |  | 
|  | 2767 | /* | 
|  | 2768 | * After a write we want buffered reads to be sure to go to disk to get | 
|  | 2769 | * the new data.  We invalidate clean cached page from the region we're | 
|  | 2770 | * about to write.  We do this *before* the write so that we can return | 
| Hisashi Hifumi | 6ccfa80 | 2008-09-02 14:35:40 -0700 | [diff] [blame] | 2771 | * without clobbering -EIOCBQUEUED from ->direct_IO(). | 
| Christoph Hellwig | a969e90 | 2008-07-23 21:27:04 -0700 | [diff] [blame] | 2772 | */ | 
| Andrey Ryabinin | 55635ba | 2017-05-03 14:55:59 -0700 | [diff] [blame] | 2773 | written = invalidate_inode_pages2_range(mapping, | 
| Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 2774 | pos >> PAGE_SHIFT, end); | 
| Andrey Ryabinin | 55635ba | 2017-05-03 14:55:59 -0700 | [diff] [blame] | 2775 | /* | 
|  | 2776 | * If a page can not be invalidated, return 0 to fall back | 
|  | 2777 | * to buffered write. | 
|  | 2778 | */ | 
|  | 2779 | if (written) { | 
|  | 2780 | if (written == -EBUSY) | 
|  | 2781 | return 0; | 
|  | 2782 | goto out; | 
| Christoph Hellwig | a969e90 | 2008-07-23 21:27:04 -0700 | [diff] [blame] | 2783 | } | 
|  | 2784 |  | 
| Al Viro | 639a93a5 | 2017-04-13 14:10:15 -0400 | [diff] [blame] | 2785 | written = mapping->a_ops->direct_IO(iocb, from); | 
| Christoph Hellwig | a969e90 | 2008-07-23 21:27:04 -0700 | [diff] [blame] | 2786 |  | 
|  | 2787 | /* | 
|  | 2788 | * Finally, try again to invalidate clean pages which might have been | 
|  | 2789 | * cached by non-direct readahead, or faulted in by get_user_pages() | 
|  | 2790 | * if the source of the write was an mmap'ed region of the file | 
|  | 2791 | * we're writing.  Either one is a pretty crazy thing to do, | 
|  | 2792 | * so we don't support it 100%.  If this invalidation | 
|  | 2793 | * fails, tough, the write still worked... | 
|  | 2794 | */ | 
| Andrey Ryabinin | 55635ba | 2017-05-03 14:55:59 -0700 | [diff] [blame] | 2795 | invalidate_inode_pages2_range(mapping, | 
|  | 2796 | pos >> PAGE_SHIFT, end); | 
| Christoph Hellwig | a969e90 | 2008-07-23 21:27:04 -0700 | [diff] [blame] | 2797 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2798 | if (written > 0) { | 
| Namhyung Kim | 0116651 | 2010-10-26 14:21:58 -0700 | [diff] [blame] | 2799 | pos += written; | 
| Al Viro | 639a93a5 | 2017-04-13 14:10:15 -0400 | [diff] [blame] | 2800 | write_len -= written; | 
| Namhyung Kim | 0116651 | 2010-10-26 14:21:58 -0700 | [diff] [blame] | 2801 | if (pos > i_size_read(inode) && !S_ISBLK(inode->i_mode)) { | 
|  | 2802 | i_size_write(inode, pos); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2803 | mark_inode_dirty(inode); | 
|  | 2804 | } | 
| Al Viro | 5cb6c6c | 2014-02-11 20:58:20 -0500 | [diff] [blame] | 2805 | iocb->ki_pos = pos; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2806 | } | 
| Al Viro | 639a93a5 | 2017-04-13 14:10:15 -0400 | [diff] [blame] | 2807 | iov_iter_revert(from, write_len - iov_iter_count(from)); | 
| Christoph Hellwig | a969e90 | 2008-07-23 21:27:04 -0700 | [diff] [blame] | 2808 | out: | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2809 | return written; | 
|  | 2810 | } | 
|  | 2811 | EXPORT_SYMBOL(generic_file_direct_write); | 
|  | 2812 |  | 
| Nick Piggin | eb2be18 | 2007-10-16 01:24:57 -0700 | [diff] [blame] | 2813 | /* | 
|  | 2814 | * Find or create a page at the given pagecache position. Return the locked | 
|  | 2815 | * page. This function is specifically for buffered writes. | 
|  | 2816 | */ | 
| Nick Piggin | 54566b2 | 2009-01-04 12:00:53 -0800 | [diff] [blame] | 2817 | struct page *grab_cache_page_write_begin(struct address_space *mapping, | 
|  | 2818 | pgoff_t index, unsigned flags) | 
| Nick Piggin | eb2be18 | 2007-10-16 01:24:57 -0700 | [diff] [blame] | 2819 | { | 
| Nick Piggin | eb2be18 | 2007-10-16 01:24:57 -0700 | [diff] [blame] | 2820 | struct page *page; | 
| Johannes Weiner | bbddabe | 2016-05-20 16:56:28 -0700 | [diff] [blame] | 2821 | int fgp_flags = FGP_LOCK|FGP_WRITE|FGP_CREAT; | 
| Johannes Weiner | 0faa70c | 2012-01-10 15:07:53 -0800 | [diff] [blame] | 2822 |  | 
| Nick Piggin | 54566b2 | 2009-01-04 12:00:53 -0800 | [diff] [blame] | 2823 | if (flags & AOP_FLAG_NOFS) | 
| Mel Gorman | 2457aec | 2014-06-04 16:10:31 -0700 | [diff] [blame] | 2824 | fgp_flags |= FGP_NOFS; | 
| Nick Piggin | eb2be18 | 2007-10-16 01:24:57 -0700 | [diff] [blame] | 2825 |  | 
| Mel Gorman | 2457aec | 2014-06-04 16:10:31 -0700 | [diff] [blame] | 2826 | page = pagecache_get_page(mapping, index, fgp_flags, | 
| Michal Hocko | 45f87de | 2014-12-29 20:30:35 +0100 | [diff] [blame] | 2827 | mapping_gfp_mask(mapping)); | 
| Mel Gorman | 2457aec | 2014-06-04 16:10:31 -0700 | [diff] [blame] | 2828 | if (page) | 
|  | 2829 | wait_for_stable_page(page); | 
|  | 2830 |  | 
| Nick Piggin | eb2be18 | 2007-10-16 01:24:57 -0700 | [diff] [blame] | 2831 | return page; | 
|  | 2832 | } | 
| Nick Piggin | 54566b2 | 2009-01-04 12:00:53 -0800 | [diff] [blame] | 2833 | EXPORT_SYMBOL(grab_cache_page_write_begin); | 
| Nick Piggin | eb2be18 | 2007-10-16 01:24:57 -0700 | [diff] [blame] | 2834 |  | 
| Al Viro | 3b93f91 | 2014-02-11 21:34:08 -0500 | [diff] [blame] | 2835 | ssize_t generic_perform_write(struct file *file, | 
| Nick Piggin | afddba4 | 2007-10-16 01:25:01 -0700 | [diff] [blame] | 2836 | struct iov_iter *i, loff_t pos) | 
|  | 2837 | { | 
|  | 2838 | struct address_space *mapping = file->f_mapping; | 
|  | 2839 | const struct address_space_operations *a_ops = mapping->a_ops; | 
|  | 2840 | long status = 0; | 
|  | 2841 | ssize_t written = 0; | 
| Nick Piggin | 674b892 | 2007-10-16 01:25:03 -0700 | [diff] [blame] | 2842 | unsigned int flags = 0; | 
|  | 2843 |  | 
| Nick Piggin | afddba4 | 2007-10-16 01:25:01 -0700 | [diff] [blame] | 2844 | do { | 
|  | 2845 | struct page *page; | 
| Nick Piggin | afddba4 | 2007-10-16 01:25:01 -0700 | [diff] [blame] | 2846 | unsigned long offset;	/* Offset into pagecache page */ | 
|  | 2847 | unsigned long bytes;	/* Bytes to write to page */ | 
|  | 2848 | size_t copied;		/* Bytes copied from user */ | 
|  | 2849 | void *fsdata; | 
|  | 2850 |  | 
| Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 2851 | offset = (pos & (PAGE_SIZE - 1)); | 
|  | 2852 | bytes = min_t(unsigned long, PAGE_SIZE - offset, | 
| Nick Piggin | afddba4 | 2007-10-16 01:25:01 -0700 | [diff] [blame] | 2853 | iov_iter_count(i)); | 
|  | 2854 |  | 
|  | 2855 | again: | 
| Linus Torvalds | 00a3d66 | 2015-10-07 08:32:38 +0100 | [diff] [blame] | 2856 | /* | 
|  | 2857 | * Bring in the user page that we will copy from _first_. | 
|  | 2858 | * Otherwise there's a nasty deadlock on copying from the | 
|  | 2859 | * same page as we're writing to, without it being marked | 
|  | 2860 | * up-to-date. | 
|  | 2861 | * | 
|  | 2862 | * Not only is this an optimisation, but it is also required | 
|  | 2863 | * to check that the address is actually valid, when atomic | 
|  | 2864 | * usercopies are used, below. | 
|  | 2865 | */ | 
|  | 2866 | if (unlikely(iov_iter_fault_in_readable(i, bytes))) { | 
|  | 2867 | status = -EFAULT; | 
|  | 2868 | break; | 
|  | 2869 | } | 
|  | 2870 |  | 
| Jan Kara | 296291c | 2015-10-22 13:32:21 -0700 | [diff] [blame] | 2871 | if (fatal_signal_pending(current)) { | 
|  | 2872 | status = -EINTR; | 
|  | 2873 | break; | 
|  | 2874 | } | 
|  | 2875 |  | 
| Nick Piggin | 674b892 | 2007-10-16 01:25:03 -0700 | [diff] [blame] | 2876 | status = a_ops->write_begin(file, mapping, pos, bytes, flags, | 
| Nick Piggin | afddba4 | 2007-10-16 01:25:01 -0700 | [diff] [blame] | 2877 | &page, &fsdata); | 
| Mel Gorman | 2457aec | 2014-06-04 16:10:31 -0700 | [diff] [blame] | 2878 | if (unlikely(status < 0)) | 
| Nick Piggin | afddba4 | 2007-10-16 01:25:01 -0700 | [diff] [blame] | 2879 | break; | 
|  | 2880 |  | 
| anfei zhou | 931e80e | 2010-02-02 13:44:02 -0800 | [diff] [blame] | 2881 | if (mapping_writably_mapped(mapping)) | 
|  | 2882 | flush_dcache_page(page); | 
| Linus Torvalds | 00a3d66 | 2015-10-07 08:32:38 +0100 | [diff] [blame] | 2883 |  | 
| Nick Piggin | afddba4 | 2007-10-16 01:25:01 -0700 | [diff] [blame] | 2884 | copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes); | 
| Nick Piggin | afddba4 | 2007-10-16 01:25:01 -0700 | [diff] [blame] | 2885 | flush_dcache_page(page); | 
|  | 2886 |  | 
|  | 2887 | status = a_ops->write_end(file, mapping, pos, bytes, copied, | 
|  | 2888 | page, fsdata); | 
|  | 2889 | if (unlikely(status < 0)) | 
|  | 2890 | break; | 
|  | 2891 | copied = status; | 
|  | 2892 |  | 
|  | 2893 | cond_resched(); | 
|  | 2894 |  | 
| Nick Piggin | 124d3b7 | 2008-02-02 15:01:17 +0100 | [diff] [blame] | 2895 | iov_iter_advance(i, copied); | 
| Nick Piggin | afddba4 | 2007-10-16 01:25:01 -0700 | [diff] [blame] | 2896 | if (unlikely(copied == 0)) { | 
|  | 2897 | /* | 
|  | 2898 | * If we were unable to copy any data at all, we must | 
|  | 2899 | * fall back to a single segment length write. | 
|  | 2900 | * | 
|  | 2901 | * If we didn't fallback here, we could livelock | 
|  | 2902 | * because not all segments in the iov can be copied at | 
|  | 2903 | * once without a pagefault. | 
|  | 2904 | */ | 
| Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 2905 | bytes = min_t(unsigned long, PAGE_SIZE - offset, | 
| Nick Piggin | afddba4 | 2007-10-16 01:25:01 -0700 | [diff] [blame] | 2906 | iov_iter_single_seg_count(i)); | 
|  | 2907 | goto again; | 
|  | 2908 | } | 
| Nick Piggin | afddba4 | 2007-10-16 01:25:01 -0700 | [diff] [blame] | 2909 | pos += copied; | 
|  | 2910 | written += copied; | 
|  | 2911 |  | 
|  | 2912 | balance_dirty_pages_ratelimited(mapping); | 
| Nick Piggin | afddba4 | 2007-10-16 01:25:01 -0700 | [diff] [blame] | 2913 | } while (iov_iter_count(i)); | 
|  | 2914 |  | 
|  | 2915 | return written ? written : status; | 
|  | 2916 | } | 
| Al Viro | 3b93f91 | 2014-02-11 21:34:08 -0500 | [diff] [blame] | 2917 | EXPORT_SYMBOL(generic_perform_write); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2918 |  | 
| Jan Kara | e4dd9de | 2009-08-17 18:10:06 +0200 | [diff] [blame] | 2919 | /** | 
| Al Viro | 8174202 | 2014-04-03 03:17:43 -0400 | [diff] [blame] | 2920 | * __generic_file_write_iter - write data to a file | 
| Jan Kara | e4dd9de | 2009-08-17 18:10:06 +0200 | [diff] [blame] | 2921 | * @iocb:	IO state structure (file, offset, etc.) | 
| Al Viro | 8174202 | 2014-04-03 03:17:43 -0400 | [diff] [blame] | 2922 | * @from:	iov_iter with data to write | 
| Jan Kara | e4dd9de | 2009-08-17 18:10:06 +0200 | [diff] [blame] | 2923 | * | 
|  | 2924 | * This function does all the work needed for actually writing data to a | 
|  | 2925 | * file. It does all basic checks, removes SUID from the file, updates | 
|  | 2926 | * modification times and calls proper subroutines depending on whether we | 
|  | 2927 | * do direct IO or a standard buffered write. | 
|  | 2928 | * | 
|  | 2929 | * It expects i_mutex to be grabbed unless we work on a block device or similar | 
|  | 2930 | * object which does not need locking at all. | 
|  | 2931 | * | 
|  | 2932 | * This function does *not* take care of syncing data in case of O_SYNC write. | 
|  | 2933 | * A caller has to handle it. This is mainly due to the fact that we want to | 
|  | 2934 | * avoid syncing under i_mutex. | 
|  | 2935 | */ | 
| Al Viro | 8174202 | 2014-04-03 03:17:43 -0400 | [diff] [blame] | 2936 | ssize_t __generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2937 | { | 
|  | 2938 | struct file *file = iocb->ki_filp; | 
| Jeff Moyer | fb5527e | 2006-10-19 23:28:13 -0700 | [diff] [blame] | 2939 | struct address_space * mapping = file->f_mapping; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2940 | struct inode 	*inode = mapping->host; | 
| Al Viro | 3b93f91 | 2014-02-11 21:34:08 -0500 | [diff] [blame] | 2941 | ssize_t		written = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2942 | ssize_t		err; | 
| Al Viro | 3b93f91 | 2014-02-11 21:34:08 -0500 | [diff] [blame] | 2943 | ssize_t		status; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2944 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2945 | /* We can write back this queue in page reclaim */ | 
| Christoph Hellwig | de1414a | 2015-01-14 10:42:36 +0100 | [diff] [blame] | 2946 | current->backing_dev_info = inode_to_bdi(inode); | 
| Jan Kara | 5fa8e0a | 2015-05-21 16:05:53 +0200 | [diff] [blame] | 2947 | err = file_remove_privs(file); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2948 | if (err) | 
|  | 2949 | goto out; | 
|  | 2950 |  | 
| Josef Bacik | c3b2da3 | 2012-03-26 09:59:21 -0400 | [diff] [blame] | 2951 | err = file_update_time(file); | 
|  | 2952 | if (err) | 
|  | 2953 | goto out; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2954 |  | 
| Al Viro | 2ba48ce | 2015-04-09 13:52:01 -0400 | [diff] [blame] | 2955 | if (iocb->ki_flags & IOCB_DIRECT) { | 
| Al Viro | 0b8def9 | 2015-04-07 10:22:53 -0400 | [diff] [blame] | 2956 | loff_t pos, endbyte; | 
| Jeff Moyer | fb5527e | 2006-10-19 23:28:13 -0700 | [diff] [blame] | 2957 |  | 
| Christoph Hellwig | 1af5bb4 | 2016-04-07 08:51:56 -0700 | [diff] [blame] | 2958 | written = generic_file_direct_write(iocb, from); | 
| Matthew Wilcox | fbbbad4 | 2015-02-16 15:58:53 -0800 | [diff] [blame] | 2959 | /* | 
|  | 2960 | * If the write stopped short of completing, fall back to | 
|  | 2961 | * buffered writes.  Some filesystems do this for writes to | 
|  | 2962 | * holes, for example.  For DAX files, a buffered write will | 
|  | 2963 | * not succeed (even if it did, DAX does not handle dirty | 
|  | 2964 | * page-cache pages correctly). | 
|  | 2965 | */ | 
| Al Viro | 0b8def9 | 2015-04-07 10:22:53 -0400 | [diff] [blame] | 2966 | if (written < 0 || !iov_iter_count(from) || IS_DAX(inode)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2967 | goto out; | 
| Al Viro | 3b93f91 | 2014-02-11 21:34:08 -0500 | [diff] [blame] | 2968 |  | 
| Al Viro | 0b8def9 | 2015-04-07 10:22:53 -0400 | [diff] [blame] | 2969 | status = generic_perform_write(file, from, pos = iocb->ki_pos); | 
| Jeff Moyer | fb5527e | 2006-10-19 23:28:13 -0700 | [diff] [blame] | 2970 | /* | 
| Al Viro | 3b93f91 | 2014-02-11 21:34:08 -0500 | [diff] [blame] | 2971 | * If generic_perform_write() returned a synchronous error | 
| Jeff Moyer | fb5527e | 2006-10-19 23:28:13 -0700 | [diff] [blame] | 2972 | * then we want to return the number of bytes which were | 
|  | 2973 | * direct-written, or the error code if that was zero.  Note | 
|  | 2974 | * that this differs from normal direct-io semantics, which | 
|  | 2975 | * will return -EFOO even if some bytes were written. | 
|  | 2976 | */ | 
| Al Viro | 60bb452 | 2014-08-08 12:39:16 -0400 | [diff] [blame] | 2977 | if (unlikely(status < 0)) { | 
| Al Viro | 3b93f91 | 2014-02-11 21:34:08 -0500 | [diff] [blame] | 2978 | err = status; | 
| Jeff Moyer | fb5527e | 2006-10-19 23:28:13 -0700 | [diff] [blame] | 2979 | goto out; | 
|  | 2980 | } | 
| Jeff Moyer | fb5527e | 2006-10-19 23:28:13 -0700 | [diff] [blame] | 2981 | /* | 
|  | 2982 | * We need to ensure that the page cache pages are written to | 
|  | 2983 | * disk and invalidated to preserve the expected O_DIRECT | 
|  | 2984 | * semantics. | 
|  | 2985 | */ | 
| Al Viro | 3b93f91 | 2014-02-11 21:34:08 -0500 | [diff] [blame] | 2986 | endbyte = pos + status - 1; | 
| Al Viro | 0b8def9 | 2015-04-07 10:22:53 -0400 | [diff] [blame] | 2987 | err = filemap_write_and_wait_range(mapping, pos, endbyte); | 
| Jeff Moyer | fb5527e | 2006-10-19 23:28:13 -0700 | [diff] [blame] | 2988 | if (err == 0) { | 
| Al Viro | 0b8def9 | 2015-04-07 10:22:53 -0400 | [diff] [blame] | 2989 | iocb->ki_pos = endbyte + 1; | 
| Al Viro | 3b93f91 | 2014-02-11 21:34:08 -0500 | [diff] [blame] | 2990 | written += status; | 
| Jeff Moyer | fb5527e | 2006-10-19 23:28:13 -0700 | [diff] [blame] | 2991 | invalidate_mapping_pages(mapping, | 
| Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 2992 | pos >> PAGE_SHIFT, | 
|  | 2993 | endbyte >> PAGE_SHIFT); | 
| Jeff Moyer | fb5527e | 2006-10-19 23:28:13 -0700 | [diff] [blame] | 2994 | } else { | 
|  | 2995 | /* | 
|  | 2996 | * We don't know how much we wrote, so just return | 
|  | 2997 | * the number of bytes which were direct-written | 
|  | 2998 | */ | 
|  | 2999 | } | 
|  | 3000 | } else { | 
| Al Viro | 0b8def9 | 2015-04-07 10:22:53 -0400 | [diff] [blame] | 3001 | written = generic_perform_write(file, from, iocb->ki_pos); | 
|  | 3002 | if (likely(written > 0)) | 
|  | 3003 | iocb->ki_pos += written; | 
| Jeff Moyer | fb5527e | 2006-10-19 23:28:13 -0700 | [diff] [blame] | 3004 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3005 | out: | 
|  | 3006 | current->backing_dev_info = NULL; | 
|  | 3007 | return written ? written : err; | 
|  | 3008 | } | 
| Al Viro | 8174202 | 2014-04-03 03:17:43 -0400 | [diff] [blame] | 3009 | EXPORT_SYMBOL(__generic_file_write_iter); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3010 |  | 
| Jan Kara | e4dd9de | 2009-08-17 18:10:06 +0200 | [diff] [blame] | 3011 | /** | 
| Al Viro | 8174202 | 2014-04-03 03:17:43 -0400 | [diff] [blame] | 3012 | * generic_file_write_iter - write data to a file | 
| Jan Kara | e4dd9de | 2009-08-17 18:10:06 +0200 | [diff] [blame] | 3013 | * @iocb:	IO state structure | 
| Al Viro | 8174202 | 2014-04-03 03:17:43 -0400 | [diff] [blame] | 3014 | * @from:	iov_iter with data to write | 
| Jan Kara | e4dd9de | 2009-08-17 18:10:06 +0200 | [diff] [blame] | 3015 | * | 
| Al Viro | 8174202 | 2014-04-03 03:17:43 -0400 | [diff] [blame] | 3016 | * This is a wrapper around __generic_file_write_iter() to be used by most | 
| Jan Kara | e4dd9de | 2009-08-17 18:10:06 +0200 | [diff] [blame] | 3017 | * filesystems. It takes care of syncing the file in case of O_SYNC file | 
|  | 3018 | * and acquires i_mutex as needed. | 
|  | 3019 | */ | 
| Al Viro | 8174202 | 2014-04-03 03:17:43 -0400 | [diff] [blame] | 3020 | ssize_t generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3021 | { | 
|  | 3022 | struct file *file = iocb->ki_filp; | 
| Jan Kara | 148f948 | 2009-08-17 19:52:36 +0200 | [diff] [blame] | 3023 | struct inode *inode = file->f_mapping->host; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3024 | ssize_t ret; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3025 |  | 
| Al Viro | 5955102 | 2016-01-22 15:40:57 -0500 | [diff] [blame] | 3026 | inode_lock(inode); | 
| Al Viro | 3309dd0 | 2015-04-09 12:55:47 -0400 | [diff] [blame] | 3027 | ret = generic_write_checks(iocb, from); | 
|  | 3028 | if (ret > 0) | 
| Al Viro | 5f380c7 | 2015-04-07 11:28:12 -0400 | [diff] [blame] | 3029 | ret = __generic_file_write_iter(iocb, from); | 
| Al Viro | 5955102 | 2016-01-22 15:40:57 -0500 | [diff] [blame] | 3030 | inode_unlock(inode); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3031 |  | 
| Christoph Hellwig | e259221 | 2016-04-07 08:52:01 -0700 | [diff] [blame] | 3032 | if (ret > 0) | 
|  | 3033 | ret = generic_write_sync(iocb, ret); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3034 | return ret; | 
|  | 3035 | } | 
| Al Viro | 8174202 | 2014-04-03 03:17:43 -0400 | [diff] [blame] | 3036 | EXPORT_SYMBOL(generic_file_write_iter); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3037 |  | 
| David Howells | cf9a2ae | 2006-08-29 19:05:54 +0100 | [diff] [blame] | 3038 | /** | 
|  | 3039 | * try_to_release_page() - release old fs-specific metadata on a page | 
|  | 3040 | * | 
|  | 3041 | * @page: the page which the kernel is trying to free | 
|  | 3042 | * @gfp_mask: memory allocation flags (and I/O mode) | 
|  | 3043 | * | 
|  | 3044 | * The address_space is to try to release any data against the page | 
| mchehab@s-opensource.com | 0e056eb | 2017-03-30 17:11:36 -0300 | [diff] [blame] | 3045 | * (presumably at page->private).  If the release was successful, return '1'. | 
| David Howells | cf9a2ae | 2006-08-29 19:05:54 +0100 | [diff] [blame] | 3046 | * Otherwise return zero. | 
|  | 3047 | * | 
| David Howells | 266cf65 | 2009-04-03 16:42:36 +0100 | [diff] [blame] | 3048 | * This may also be called if PG_fscache is set on a page, indicating that the | 
|  | 3049 | * page is known to the local caching routines. | 
|  | 3050 | * | 
| David Howells | cf9a2ae | 2006-08-29 19:05:54 +0100 | [diff] [blame] | 3051 | * The @gfp_mask argument specifies whether I/O may be performed to release | 
| Mel Gorman | 71baba4 | 2015-11-06 16:28:28 -0800 | [diff] [blame] | 3052 | * this page (__GFP_IO), and whether the call may block (__GFP_RECLAIM & __GFP_FS). | 
| David Howells | cf9a2ae | 2006-08-29 19:05:54 +0100 | [diff] [blame] | 3053 | * | 
| David Howells | cf9a2ae | 2006-08-29 19:05:54 +0100 | [diff] [blame] | 3054 | */ | 
|  | 3055 | int try_to_release_page(struct page *page, gfp_t gfp_mask) | 
|  | 3056 | { | 
|  | 3057 | struct address_space * const mapping = page->mapping; | 
|  | 3058 |  | 
|  | 3059 | BUG_ON(!PageLocked(page)); | 
|  | 3060 | if (PageWriteback(page)) | 
|  | 3061 | return 0; | 
|  | 3062 |  | 
|  | 3063 | if (mapping && mapping->a_ops->releasepage) | 
|  | 3064 | return mapping->a_ops->releasepage(page, gfp_mask); | 
|  | 3065 | return try_to_free_buffers(page); | 
|  | 3066 | } | 
|  | 3067 |  | 
|  | 3068 | EXPORT_SYMBOL(try_to_release_page); |