blob: 66ee5db5fce847f1bf0509df4ff9a3adaca886df [file] [log] [blame]
Peng Taod7e09d02013-05-02 16:46:55 +08001/*
2 * GPL HEADER START
3 *
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
Oleg Drokin6a5b99a2016-06-14 23:33:40 -040018 * http://www.gnu.org/licenses/gpl-2.0.html
Peng Taod7e09d02013-05-02 16:46:55 +080019 *
Peng Taod7e09d02013-05-02 16:46:55 +080020 * GPL HEADER END
21 */
22/*
23 * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
25 *
Andreas Dilger1dc563a2015-11-08 18:09:37 -050026 * Copyright (c) 2011, 2015, Intel Corporation.
Peng Taod7e09d02013-05-02 16:46:55 +080027 */
28/*
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
31 */
32
33#include <linux/kernel.h>
34#include <linux/mm.h>
35#include <linux/string.h>
36#include <linux/stat.h>
37#include <linux/errno.h>
38#include <linux/unistd.h>
Asaf Vertze8fd99f2015-02-09 12:00:50 +020039#include <linux/uaccess.h>
Peng Taod7e09d02013-05-02 16:46:55 +080040
41#include <linux/fs.h>
Peng Taod7e09d02013-05-02 16:46:55 +080042#include <linux/pagemap.h>
43
44#define DEBUG_SUBSYSTEM S_LLITE
45
Greg Kroah-Hartman67a235f2014-07-11 21:51:41 -070046#include "../include/lustre_lite.h"
Peng Taod7e09d02013-05-02 16:46:55 +080047#include "llite_internal.h"
Greg Kroah-Hartman67a235f2014-07-11 21:51:41 -070048#include "../include/linux/lustre_compat25.h"
Peng Taod7e09d02013-05-02 16:46:55 +080049
John L. Hammond2d95f102014-04-27 13:07:05 -040050static const struct vm_operations_struct ll_file_vm_ops;
Peng Taod7e09d02013-05-02 16:46:55 +080051
52void policy_from_vma(ldlm_policy_data_t *policy,
Oleg Drokine15ba452016-02-26 01:49:49 -050053 struct vm_area_struct *vma, unsigned long addr,
54 size_t count)
Peng Taod7e09d02013-05-02 16:46:55 +080055{
Oleg Drokin616387e2016-03-30 19:48:23 -040056 policy->l_extent.start = ((addr - vma->vm_start) & PAGE_MASK) +
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +030057 (vma->vm_pgoff << PAGE_SHIFT);
Peng Taod7e09d02013-05-02 16:46:55 +080058 policy->l_extent.end = (policy->l_extent.start + count - 1) |
Oleg Drokin616387e2016-03-30 19:48:23 -040059 ~PAGE_MASK;
Peng Taod7e09d02013-05-02 16:46:55 +080060}
61
62struct vm_area_struct *our_vma(struct mm_struct *mm, unsigned long addr,
63 size_t count)
64{
65 struct vm_area_struct *vma, *ret = NULL;
Peng Taod7e09d02013-05-02 16:46:55 +080066
67 /* mmap_sem must have been held by caller. */
68 LASSERT(!down_write_trylock(&mm->mmap_sem));
69
Greg Donalda58a38a2014-08-21 12:40:35 -050070 for (vma = find_vma(mm, addr);
Oleg Drokin6e168182016-02-16 00:46:46 -050071 vma && vma->vm_start < (addr + count); vma = vma->vm_next) {
Peng Taod7e09d02013-05-02 16:46:55 +080072 if (vma->vm_ops && vma->vm_ops == &ll_file_vm_ops &&
73 vma->vm_flags & VM_SHARED) {
74 ret = vma;
75 break;
76 }
77 }
Greg Kroah-Hartman0a3bdb02013-08-03 10:35:28 +080078 return ret;
Peng Taod7e09d02013-05-02 16:46:55 +080079}
80
81/**
82 * API independent part for page fault initialization.
83 * \param vma - virtual memory area addressed to page fault
84 * \param env - corespondent lu_env to processing
85 * \param nest - nested level
86 * \param index - page index corespondent to fault.
87 * \parm ra_flags - vma readahead flags.
88 *
89 * \return allocated and initialized env for fault operation.
90 * \retval EINVAL if env can't allocated
91 * \return other error codes from cl_io_init.
92 */
John L. Hammond2d95f102014-04-27 13:07:05 -040093static struct cl_io *
94ll_fault_io_init(struct vm_area_struct *vma, struct lu_env **env_ret,
95 struct cl_env_nest *nest, pgoff_t index,
96 unsigned long *ra_flags)
Peng Taod7e09d02013-05-02 16:46:55 +080097{
John L. Hammond8a48df72013-07-25 01:17:25 +080098 struct file *file = vma->vm_file;
Al Viro2a8a3592014-10-20 18:02:33 -040099 struct inode *inode = file_inode(file);
John L. Hammond8a48df72013-07-25 01:17:25 +0800100 struct cl_io *io;
101 struct cl_fault_io *fio;
102 struct lu_env *env;
103 int rc;
Peng Taod7e09d02013-05-02 16:46:55 +0800104
105 *env_ret = NULL;
106 if (ll_file_nolock(file))
Greg Kroah-Hartman0a3bdb02013-08-03 10:35:28 +0800107 return ERR_PTR(-EOPNOTSUPP);
Peng Taod7e09d02013-05-02 16:46:55 +0800108
109 /*
110 * page fault can be called when lustre IO is
111 * already active for the current thread, e.g., when doing read/write
112 * against user level buffer mapped from Lustre buffer. To avoid
113 * stomping on existing context, optionally force an allocation of a new
114 * one.
115 */
116 env = cl_env_nested_get(nest);
117 if (IS_ERR(env))
Oleg Drokindefa2202016-02-24 22:00:39 -0500118 return ERR_PTR(-EINVAL);
Peng Taod7e09d02013-05-02 16:46:55 +0800119
120 *env_ret = env;
121
Bobi Jam30889c72016-03-30 19:49:07 -0400122restart:
John Hammond9acc4502016-03-30 19:48:57 -0400123 io = vvp_env_thread_io(env);
Peng Taod7e09d02013-05-02 16:46:55 +0800124 io->ci_obj = ll_i2info(inode)->lli_clob;
Oleg Drokin6e168182016-02-16 00:46:46 -0500125 LASSERT(io->ci_obj);
Peng Taod7e09d02013-05-02 16:46:55 +0800126
127 fio = &io->u.ci_fault;
128 fio->ft_index = index;
129 fio->ft_executable = vma->vm_flags&VM_EXEC;
130
131 /*
132 * disable VM_SEQ_READ and use VM_RAND_READ to make sure that
133 * the kernel will not read other pages not covered by ldlm in
134 * filemap_nopage. we do our readahead in ll_readpage.
135 */
Oleg Drokin6e168182016-02-16 00:46:46 -0500136 if (ra_flags)
Peng Taod7e09d02013-05-02 16:46:55 +0800137 *ra_flags = vma->vm_flags & (VM_RAND_READ|VM_SEQ_READ);
138 vma->vm_flags &= ~VM_SEQ_READ;
139 vma->vm_flags |= VM_RAND_READ;
140
141 CDEBUG(D_MMAP, "vm_flags: %lx (%lu %d)\n", vma->vm_flags,
142 fio->ft_index, fio->ft_executable);
143
John L. Hammond8a48df72013-07-25 01:17:25 +0800144 rc = cl_io_init(env, io, CIT_FAULT, io->ci_obj);
145 if (rc == 0) {
John L. Hammonde0a81442016-03-30 19:48:52 -0400146 struct vvp_io *vio = vvp_env_io(env);
Peng Taod7e09d02013-05-02 16:46:55 +0800147 struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
148
John L. Hammonde0a81442016-03-30 19:48:52 -0400149 LASSERT(vio->vui_cl.cis_io == io);
Peng Taod7e09d02013-05-02 16:46:55 +0800150
Oleg Drokinc0894c62016-02-24 22:00:30 -0500151 /* mmap lock must be MANDATORY it has to cache pages. */
Peng Taod7e09d02013-05-02 16:46:55 +0800152 io->ci_lockreq = CILR_MANDATORY;
John L. Hammonde0a81442016-03-30 19:48:52 -0400153 vio->vui_fd = fd;
John L. Hammond8a48df72013-07-25 01:17:25 +0800154 } else {
155 LASSERT(rc < 0);
156 cl_io_fini(env, io);
Bobi Jam30889c72016-03-30 19:49:07 -0400157 if (io->ci_need_restart)
158 goto restart;
159
John L. Hammond8a48df72013-07-25 01:17:25 +0800160 cl_env_nested_put(nest, env);
161 io = ERR_PTR(rc);
Peng Taod7e09d02013-05-02 16:46:55 +0800162 }
163
164 return io;
165}
166
167/* Sharing code of page_mkwrite method for rhel5 and rhel6 */
168static int ll_page_mkwrite0(struct vm_area_struct *vma, struct page *vmpage,
169 bool *retry)
170{
171 struct lu_env *env;
172 struct cl_io *io;
173 struct vvp_io *vio;
174 struct cl_env_nest nest;
175 int result;
176 sigset_t set;
177 struct inode *inode;
178 struct ll_inode_info *lli;
Peng Taod7e09d02013-05-02 16:46:55 +0800179
Peng Taod7e09d02013-05-02 16:46:55 +0800180 io = ll_fault_io_init(vma, &env, &nest, vmpage->index, NULL);
Julia Lawall34e1f2b2014-08-30 16:24:55 +0200181 if (IS_ERR(io)) {
182 result = PTR_ERR(io);
183 goto out;
184 }
Peng Taod7e09d02013-05-02 16:46:55 +0800185
186 result = io->ci_result;
187 if (result < 0)
Julia Lawall34e1f2b2014-08-30 16:24:55 +0200188 goto out_io;
Peng Taod7e09d02013-05-02 16:46:55 +0800189
190 io->u.ci_fault.ft_mkwrite = 1;
191 io->u.ci_fault.ft_writable = 1;
192
193 vio = vvp_env_io(env);
194 vio->u.fault.ft_vma = vma;
195 vio->u.fault.ft_vmpage = vmpage;
196
197 set = cfs_block_sigsinv(sigmask(SIGKILL) | sigmask(SIGTERM));
198
John L. Hammond8c7b0e12016-03-30 19:48:47 -0400199 inode = vvp_object_inode(io->ci_obj);
Peng Taod7e09d02013-05-02 16:46:55 +0800200 lli = ll_i2info(inode);
Peng Taod7e09d02013-05-02 16:46:55 +0800201
202 result = cl_io_loop(env, io);
203
Peng Taod7e09d02013-05-02 16:46:55 +0800204 cfs_restore_sigs(set);
205
206 if (result == 0) {
Al Viro2a8a3592014-10-20 18:02:33 -0400207 struct inode *inode = file_inode(vma->vm_file);
Peng Taod7e09d02013-05-02 16:46:55 +0800208 struct ll_inode_info *lli = ll_i2info(inode);
209
210 lock_page(vmpage);
Oleg Drokin6e168182016-02-16 00:46:46 -0500211 if (!vmpage->mapping) {
Peng Taod7e09d02013-05-02 16:46:55 +0800212 unlock_page(vmpage);
213
214 /* page was truncated and lock was cancelled, return
215 * ENODATA so that VM_FAULT_NOPAGE will be returned
Oleg Drokinc0894c62016-02-24 22:00:30 -0500216 * to handle_mm_fault().
217 */
Peng Taod7e09d02013-05-02 16:46:55 +0800218 if (result == 0)
219 result = -ENODATA;
220 } else if (!PageDirty(vmpage)) {
221 /* race, the page has been cleaned by ptlrpcd after
222 * it was unlocked, it has to be added into dirty
223 * cache again otherwise this soon-to-dirty page won't
224 * consume any grants, even worse if this page is being
225 * transferred because it will break RPC checksum.
226 */
227 unlock_page(vmpage);
228
Joe Perches2d00bd12014-11-23 11:28:50 -0800229 CDEBUG(D_MMAP, "Race on page_mkwrite %p/%lu, page has been written out, retry.\n",
Peng Taod7e09d02013-05-02 16:46:55 +0800230 vmpage, vmpage->index);
231
232 *retry = true;
233 result = -EAGAIN;
234 }
235
236 if (result == 0) {
237 spin_lock(&lli->lli_lock);
238 lli->lli_flags |= LLIF_DATA_MODIFIED;
239 spin_unlock(&lli->lli_lock);
240 }
241 }
Peng Taod7e09d02013-05-02 16:46:55 +0800242
John L. Hammond8a48df72013-07-25 01:17:25 +0800243out_io:
Peng Taod7e09d02013-05-02 16:46:55 +0800244 cl_io_fini(env, io);
245 cl_env_nested_put(&nest, env);
John L. Hammond8a48df72013-07-25 01:17:25 +0800246out:
Peng Taod7e09d02013-05-02 16:46:55 +0800247 CDEBUG(D_MMAP, "%s mkwrite with %d\n", current->comm, result);
Peng Taod7e09d02013-05-02 16:46:55 +0800248 LASSERT(ergo(result == 0, PageLocked(vmpage)));
John L. Hammond8a48df72013-07-25 01:17:25 +0800249
250 return result;
Peng Taod7e09d02013-05-02 16:46:55 +0800251}
252
Peng Taod7e09d02013-05-02 16:46:55 +0800253static inline int to_fault_error(int result)
254{
Greg Donalda58a38a2014-08-21 12:40:35 -0500255 switch (result) {
Peng Taod7e09d02013-05-02 16:46:55 +0800256 case 0:
257 result = VM_FAULT_LOCKED;
258 break;
259 case -EFAULT:
260 result = VM_FAULT_NOPAGE;
261 break;
262 case -ENOMEM:
263 result = VM_FAULT_OOM;
264 break;
265 default:
266 result = VM_FAULT_SIGBUS;
267 break;
268 }
269 return result;
270}
271
272/**
273 * Lustre implementation of a vm_operations_struct::fault() method, called by
274 * VM to server page fault (both in kernel and user space).
275 *
Masanari Iidad0a0acc2014-03-08 22:58:32 +0900276 * \param vma - is virtual area struct related to page fault
Peng Taod7e09d02013-05-02 16:46:55 +0800277 * \param vmf - structure which describe type and address where hit fault
278 *
279 * \return allocated and filled _locked_ page for address
280 * \retval VM_FAULT_ERROR on general error
281 * \retval NOPAGE_OOM not have memory for allocate new page
282 */
283static int ll_fault0(struct vm_area_struct *vma, struct vm_fault *vmf)
284{
285 struct lu_env *env;
286 struct cl_io *io;
287 struct vvp_io *vio = NULL;
288 struct page *vmpage;
289 unsigned long ra_flags;
290 struct cl_env_nest nest;
291 int result;
292 int fault_ret = 0;
Peng Taod7e09d02013-05-02 16:46:55 +0800293
294 io = ll_fault_io_init(vma, &env, &nest, vmf->pgoff, &ra_flags);
295 if (IS_ERR(io))
Greg Kroah-Hartman0a3bdb02013-08-03 10:35:28 +0800296 return to_fault_error(PTR_ERR(io));
Peng Taod7e09d02013-05-02 16:46:55 +0800297
298 result = io->ci_result;
299 if (result == 0) {
300 vio = vvp_env_io(env);
301 vio->u.fault.ft_vma = vma;
302 vio->u.fault.ft_vmpage = NULL;
John L. Hammond10cdef72016-03-30 19:48:51 -0400303 vio->u.fault.ft_vmf = vmf;
304 vio->u.fault.ft_flags = 0;
305 vio->u.fault.ft_flags_valid = false;
Peng Taod7e09d02013-05-02 16:46:55 +0800306
Jinshan Xiong966c4a82016-06-05 23:28:51 -0400307 /* May call ll_readpage() */
308 ll_cl_add(vma->vm_file, env, io);
309
Peng Taod7e09d02013-05-02 16:46:55 +0800310 result = cl_io_loop(env, io);
311
Jinshan Xiong966c4a82016-06-05 23:28:51 -0400312 ll_cl_remove(vma->vm_file, env);
313
Paul Cassella6aa51072014-08-15 12:48:08 -0400314 /* ft_flags are only valid if we reached
Oleg Drokinc0894c62016-02-24 22:00:30 -0500315 * the call to filemap_fault
316 */
John L. Hammond10cdef72016-03-30 19:48:51 -0400317 if (vio->u.fault.ft_flags_valid)
318 fault_ret = vio->u.fault.ft_flags;
Paul Cassella6aa51072014-08-15 12:48:08 -0400319
Peng Taod7e09d02013-05-02 16:46:55 +0800320 vmpage = vio->u.fault.ft_vmpage;
Oleg Drokin6e168182016-02-16 00:46:46 -0500321 if (result != 0 && vmpage) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300322 put_page(vmpage);
Peng Taod7e09d02013-05-02 16:46:55 +0800323 vmf->page = NULL;
324 }
325 }
326 cl_io_fini(env, io);
327 cl_env_nested_put(&nest, env);
328
329 vma->vm_flags |= ra_flags;
330 if (result != 0 && !(fault_ret & VM_FAULT_RETRY))
331 fault_ret |= to_fault_error(result);
332
333 CDEBUG(D_MMAP, "%s fault %d/%d\n",
334 current->comm, fault_ret, result);
Greg Kroah-Hartman0a3bdb02013-08-03 10:35:28 +0800335 return fault_ret;
Peng Taod7e09d02013-05-02 16:46:55 +0800336}
337
338static int ll_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
339{
340 int count = 0;
341 bool printed = false;
342 int result;
343 sigset_t set;
344
Oleg Drokinc0894c62016-02-24 22:00:30 -0500345 /* Only SIGKILL and SIGTERM are allowed for fault/nopage/mkwrite
Peng Taod7e09d02013-05-02 16:46:55 +0800346 * so that it can be killed by admin but not cause segfault by
Oleg Drokinc0894c62016-02-24 22:00:30 -0500347 * other signals.
348 */
Peng Taod7e09d02013-05-02 16:46:55 +0800349 set = cfs_block_sigsinv(sigmask(SIGKILL) | sigmask(SIGTERM));
350
351restart:
352 result = ll_fault0(vma, vmf);
353 LASSERT(!(result & VM_FAULT_LOCKED));
354 if (result == 0) {
355 struct page *vmpage = vmf->page;
356
357 /* check if this page has been truncated */
358 lock_page(vmpage);
Oleg Drokin6e168182016-02-16 00:46:46 -0500359 if (unlikely(!vmpage->mapping)) { /* unlucky */
Peng Taod7e09d02013-05-02 16:46:55 +0800360 unlock_page(vmpage);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300361 put_page(vmpage);
Peng Taod7e09d02013-05-02 16:46:55 +0800362 vmf->page = NULL;
363
364 if (!printed && ++count > 16) {
Joe Perches2d00bd12014-11-23 11:28:50 -0800365 CWARN("the page is under heavy contention, maybe your app(%s) needs revising :-)\n",
Peng Taod7e09d02013-05-02 16:46:55 +0800366 current->comm);
367 printed = true;
368 }
369
370 goto restart;
371 }
372
Georgiana Rodica Chelu34d1f632014-03-18 17:26:57 +0200373 result = VM_FAULT_LOCKED;
Peng Taod7e09d02013-05-02 16:46:55 +0800374 }
375 cfs_restore_sigs(set);
376 return result;
377}
378
379static int ll_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
380{
381 int count = 0;
382 bool printed = false;
383 bool retry;
384 int result;
385
386 do {
387 retry = false;
388 result = ll_page_mkwrite0(vma, vmf->page, &retry);
389
390 if (!printed && ++count > 16) {
James Nunez97a075c2016-04-27 18:21:01 -0400391 const struct dentry *de = vma->vm_file->f_path.dentry;
392
393 CWARN("app(%s): the page %lu of file "DFID" is under heavy contention\n",
Peng Taod7e09d02013-05-02 16:46:55 +0800394 current->comm, vmf->pgoff,
James Nunez97a075c2016-04-27 18:21:01 -0400395 PFID(ll_inode2fid(de->d_inode)));
Peng Taod7e09d02013-05-02 16:46:55 +0800396 printed = true;
397 }
398 } while (retry);
399
Greg Donalda58a38a2014-08-21 12:40:35 -0500400 switch (result) {
Peng Taod7e09d02013-05-02 16:46:55 +0800401 case 0:
402 LASSERT(PageLocked(vmf->page));
403 result = VM_FAULT_LOCKED;
404 break;
405 case -ENODATA:
406 case -EFAULT:
407 result = VM_FAULT_NOPAGE;
408 break;
409 case -ENOMEM:
410 result = VM_FAULT_OOM;
411 break;
412 case -EAGAIN:
413 result = VM_FAULT_RETRY;
414 break;
415 default:
416 result = VM_FAULT_SIGBUS;
417 break;
418 }
419
420 return result;
421}
422
423/**
424 * To avoid cancel the locks covering mmapped region for lock cache pressure,
John L. Hammond8c7b0e12016-03-30 19:48:47 -0400425 * we track the mapped vma count in vvp_object::vob_mmap_cnt.
Peng Taod7e09d02013-05-02 16:46:55 +0800426 */
Greg Donaldaff9d8e2014-08-21 11:07:42 -0500427static void ll_vm_open(struct vm_area_struct *vma)
Peng Taod7e09d02013-05-02 16:46:55 +0800428{
Al Viro2a8a3592014-10-20 18:02:33 -0400429 struct inode *inode = file_inode(vma->vm_file);
John L. Hammond8c7b0e12016-03-30 19:48:47 -0400430 struct vvp_object *vob = cl_inode2vvp(inode);
Peng Taod7e09d02013-05-02 16:46:55 +0800431
Peng Taod7e09d02013-05-02 16:46:55 +0800432 LASSERT(vma->vm_file);
John L. Hammond8c7b0e12016-03-30 19:48:47 -0400433 LASSERT(atomic_read(&vob->vob_mmap_cnt) >= 0);
434 atomic_inc(&vob->vob_mmap_cnt);
Peng Taod7e09d02013-05-02 16:46:55 +0800435}
436
437/**
438 * Dual to ll_vm_open().
439 */
440static void ll_vm_close(struct vm_area_struct *vma)
441{
Al Viro2a8a3592014-10-20 18:02:33 -0400442 struct inode *inode = file_inode(vma->vm_file);
John L. Hammond8c7b0e12016-03-30 19:48:47 -0400443 struct vvp_object *vob = cl_inode2vvp(inode);
Peng Taod7e09d02013-05-02 16:46:55 +0800444
Peng Taod7e09d02013-05-02 16:46:55 +0800445 LASSERT(vma->vm_file);
John L. Hammond8c7b0e12016-03-30 19:48:47 -0400446 atomic_dec(&vob->vob_mmap_cnt);
447 LASSERT(atomic_read(&vob->vob_mmap_cnt) >= 0);
Peng Taod7e09d02013-05-02 16:46:55 +0800448}
449
Peng Taod7e09d02013-05-02 16:46:55 +0800450/* XXX put nice comment here. talk about __free_pte -> dirty pages and
Oleg Drokinc0894c62016-02-24 22:00:30 -0500451 * nopage's reference passing to the pte
452 */
Peng Taod7e09d02013-05-02 16:46:55 +0800453int ll_teardown_mmaps(struct address_space *mapping, __u64 first, __u64 last)
454{
455 int rc = -ENOENT;
Peng Taod7e09d02013-05-02 16:46:55 +0800456
Greg Kroah-Hartmanb0f5aad2014-07-12 20:06:04 -0700457 LASSERTF(last > first, "last %llu first %llu\n", last, first);
Peng Taod7e09d02013-05-02 16:46:55 +0800458 if (mapping_mapped(mapping)) {
459 rc = 0;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300460 unmap_mapping_range(mapping, first + PAGE_SIZE - 1,
Peng Taod7e09d02013-05-02 16:46:55 +0800461 last - first + 1, 0);
462 }
463
Greg Kroah-Hartman0a3bdb02013-08-03 10:35:28 +0800464 return rc;
Peng Taod7e09d02013-05-02 16:46:55 +0800465}
466
John L. Hammond2d95f102014-04-27 13:07:05 -0400467static const struct vm_operations_struct ll_file_vm_ops = {
Peng Taod7e09d02013-05-02 16:46:55 +0800468 .fault = ll_fault,
469 .page_mkwrite = ll_page_mkwrite,
470 .open = ll_vm_open,
471 .close = ll_vm_close,
472};
473
Greg Donaldaff9d8e2014-08-21 11:07:42 -0500474int ll_file_mmap(struct file *file, struct vm_area_struct *vma)
Peng Taod7e09d02013-05-02 16:46:55 +0800475{
Al Viro2a8a3592014-10-20 18:02:33 -0400476 struct inode *inode = file_inode(file);
Peng Taod7e09d02013-05-02 16:46:55 +0800477 int rc;
Peng Taod7e09d02013-05-02 16:46:55 +0800478
479 if (ll_file_nolock(file))
Greg Kroah-Hartman0a3bdb02013-08-03 10:35:28 +0800480 return -EOPNOTSUPP;
Peng Taod7e09d02013-05-02 16:46:55 +0800481
482 ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_MAP, 1);
483 rc = generic_file_mmap(file, vma);
484 if (rc == 0) {
485 vma->vm_ops = &ll_file_vm_ops;
486 vma->vm_ops->open(vma);
487 /* update the inode's size and mtime */
488 rc = ll_glimpse_size(inode);
489 }
490
Greg Kroah-Hartman0a3bdb02013-08-03 10:35:28 +0800491 return rc;
Peng Taod7e09d02013-05-02 16:46:55 +0800492}