4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2015, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
33 #include <linux/kernel.h>
35 #include <linux/string.h>
36 #include <linux/stat.h>
37 #include <linux/errno.h>
38 #include <linux/unistd.h>
39 #include <linux/uaccess.h>
42 #include <linux/pagemap.h>
44 #define DEBUG_SUBSYSTEM S_LLITE
46 #include "llite_internal.h"
48 static const struct vm_operations_struct ll_file_vm_ops;
50 void policy_from_vma(ldlm_policy_data_t *policy,
51 struct vm_area_struct *vma, unsigned long addr,
54 policy->l_extent.start = ((addr - vma->vm_start) & PAGE_MASK) +
55 (vma->vm_pgoff << PAGE_SHIFT);
56 policy->l_extent.end = (policy->l_extent.start + count - 1) |
60 struct vm_area_struct *our_vma(struct mm_struct *mm, unsigned long addr,
63 struct vm_area_struct *vma, *ret = NULL;
65 /* mmap_sem must have been held by caller. */
66 LASSERT(!down_write_trylock(&mm->mmap_sem));
68 for (vma = find_vma(mm, addr);
69 vma && vma->vm_start < (addr + count); vma = vma->vm_next) {
70 if (vma->vm_ops && vma->vm_ops == &ll_file_vm_ops &&
71 vma->vm_flags & VM_SHARED) {
80 * API independent part for page fault initialization.
81 * \param vma - virtual memory area addressed to page fault
82 * \param env - corespondent lu_env to processing
83 * \param nest - nested level
84 * \param index - page index corespondent to fault.
85 * \parm ra_flags - vma readahead flags.
87 * \return allocated and initialized env for fault operation.
88 * \retval EINVAL if env can't allocated
89 * \return other error codes from cl_io_init.
92 ll_fault_io_init(struct vm_area_struct *vma, struct lu_env **env_ret,
93 struct cl_env_nest *nest, pgoff_t index,
94 unsigned long *ra_flags)
96 struct file *file = vma->vm_file;
97 struct inode *inode = file_inode(file);
99 struct cl_fault_io *fio;
104 if (ll_file_nolock(file))
105 return ERR_PTR(-EOPNOTSUPP);
108 * page fault can be called when lustre IO is
109 * already active for the current thread, e.g., when doing read/write
110 * against user level buffer mapped from Lustre buffer. To avoid
111 * stomping on existing context, optionally force an allocation of a new
114 env = cl_env_nested_get(nest);
116 return ERR_PTR(-EINVAL);
121 io = vvp_env_thread_io(env);
122 io->ci_obj = ll_i2info(inode)->lli_clob;
125 fio = &io->u.ci_fault;
126 fio->ft_index = index;
127 fio->ft_executable = vma->vm_flags & VM_EXEC;
130 * disable VM_SEQ_READ and use VM_RAND_READ to make sure that
131 * the kernel will not read other pages not covered by ldlm in
132 * filemap_nopage. we do our readahead in ll_readpage.
135 *ra_flags = vma->vm_flags & (VM_RAND_READ | VM_SEQ_READ);
136 vma->vm_flags &= ~VM_SEQ_READ;
137 vma->vm_flags |= VM_RAND_READ;
139 CDEBUG(D_MMAP, "vm_flags: %lx (%lu %d)\n", vma->vm_flags,
140 fio->ft_index, fio->ft_executable);
142 rc = cl_io_init(env, io, CIT_FAULT, io->ci_obj);
144 struct vvp_io *vio = vvp_env_io(env);
145 struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
147 LASSERT(vio->vui_cl.cis_io == io);
149 /* mmap lock must be MANDATORY it has to cache pages. */
150 io->ci_lockreq = CILR_MANDATORY;
155 if (io->ci_need_restart)
158 cl_env_nested_put(nest, env);
165 /* Sharing code of page_mkwrite method for rhel5 and rhel6 */
166 static int ll_page_mkwrite0(struct vm_area_struct *vma, struct page *vmpage,
172 struct cl_env_nest nest;
176 struct ll_inode_info *lli;
178 io = ll_fault_io_init(vma, &env, &nest, vmpage->index, NULL);
180 result = PTR_ERR(io);
184 result = io->ci_result;
188 io->u.ci_fault.ft_mkwrite = 1;
189 io->u.ci_fault.ft_writable = 1;
191 vio = vvp_env_io(env);
192 vio->u.fault.ft_vma = vma;
193 vio->u.fault.ft_vmpage = vmpage;
195 set = cfs_block_sigsinv(sigmask(SIGKILL) | sigmask(SIGTERM));
197 inode = vvp_object_inode(io->ci_obj);
198 lli = ll_i2info(inode);
200 result = cl_io_loop(env, io);
202 cfs_restore_sigs(set);
205 struct inode *inode = file_inode(vma->vm_file);
206 struct ll_inode_info *lli = ll_i2info(inode);
209 if (!vmpage->mapping) {
212 /* page was truncated and lock was cancelled, return
213 * ENODATA so that VM_FAULT_NOPAGE will be returned
214 * to handle_mm_fault().
218 } else if (!PageDirty(vmpage)) {
219 /* race, the page has been cleaned by ptlrpcd after
220 * it was unlocked, it has to be added into dirty
221 * cache again otherwise this soon-to-dirty page won't
222 * consume any grants, even worse if this page is being
223 * transferred because it will break RPC checksum.
227 CDEBUG(D_MMAP, "Race on page_mkwrite %p/%lu, page has been written out, retry.\n",
228 vmpage, vmpage->index);
235 spin_lock(&lli->lli_lock);
236 lli->lli_flags |= LLIF_DATA_MODIFIED;
237 spin_unlock(&lli->lli_lock);
243 cl_env_nested_put(&nest, env);
245 CDEBUG(D_MMAP, "%s mkwrite with %d\n", current->comm, result);
246 LASSERT(ergo(result == 0, PageLocked(vmpage)));
251 static inline int to_fault_error(int result)
255 result = VM_FAULT_LOCKED;
258 result = VM_FAULT_NOPAGE;
261 result = VM_FAULT_OOM;
264 result = VM_FAULT_SIGBUS;
271 * Lustre implementation of a vm_operations_struct::fault() method, called by
272 * VM to server page fault (both in kernel and user space).
274 * \param vma - is virtual area struct related to page fault
275 * \param vmf - structure which describe type and address where hit fault
277 * \return allocated and filled _locked_ page for address
278 * \retval VM_FAULT_ERROR on general error
279 * \retval NOPAGE_OOM not have memory for allocate new page
281 static int ll_fault0(struct vm_area_struct *vma, struct vm_fault *vmf)
285 struct vvp_io *vio = NULL;
287 unsigned long ra_flags;
288 struct cl_env_nest nest;
292 io = ll_fault_io_init(vma, &env, &nest, vmf->pgoff, &ra_flags);
294 return to_fault_error(PTR_ERR(io));
296 result = io->ci_result;
298 vio = vvp_env_io(env);
299 vio->u.fault.ft_vma = vma;
300 vio->u.fault.ft_vmpage = NULL;
301 vio->u.fault.ft_vmf = vmf;
302 vio->u.fault.ft_flags = 0;
303 vio->u.fault.ft_flags_valid = false;
305 /* May call ll_readpage() */
306 ll_cl_add(vma->vm_file, env, io);
308 result = cl_io_loop(env, io);
310 ll_cl_remove(vma->vm_file, env);
312 /* ft_flags are only valid if we reached
313 * the call to filemap_fault
315 if (vio->u.fault.ft_flags_valid)
316 fault_ret = vio->u.fault.ft_flags;
318 vmpage = vio->u.fault.ft_vmpage;
319 if (result != 0 && vmpage) {
325 cl_env_nested_put(&nest, env);
327 vma->vm_flags |= ra_flags;
328 if (result != 0 && !(fault_ret & VM_FAULT_RETRY))
329 fault_ret |= to_fault_error(result);
331 CDEBUG(D_MMAP, "%s fault %d/%d\n",
332 current->comm, fault_ret, result);
336 static int ll_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
339 bool printed = false;
343 /* Only SIGKILL and SIGTERM are allowed for fault/nopage/mkwrite
344 * so that it can be killed by admin but not cause segfault by
347 set = cfs_block_sigsinv(sigmask(SIGKILL) | sigmask(SIGTERM));
350 result = ll_fault0(vma, vmf);
351 LASSERT(!(result & VM_FAULT_LOCKED));
353 struct page *vmpage = vmf->page;
355 /* check if this page has been truncated */
357 if (unlikely(!vmpage->mapping)) { /* unlucky */
362 if (!printed && ++count > 16) {
363 CWARN("the page is under heavy contention, maybe your app(%s) needs revising :-)\n",
371 result = VM_FAULT_LOCKED;
373 cfs_restore_sigs(set);
377 static int ll_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
380 bool printed = false;
386 result = ll_page_mkwrite0(vma, vmf->page, &retry);
388 if (!printed && ++count > 16) {
389 const struct dentry *de = vma->vm_file->f_path.dentry;
391 CWARN("app(%s): the page %lu of file "DFID" is under heavy contention\n",
392 current->comm, vmf->pgoff,
393 PFID(ll_inode2fid(de->d_inode)));
400 LASSERT(PageLocked(vmf->page));
401 result = VM_FAULT_LOCKED;
406 result = VM_FAULT_NOPAGE;
409 result = VM_FAULT_OOM;
412 result = VM_FAULT_SIGBUS;
420 * To avoid cancel the locks covering mmapped region for lock cache pressure,
421 * we track the mapped vma count in vvp_object::vob_mmap_cnt.
423 static void ll_vm_open(struct vm_area_struct *vma)
425 struct inode *inode = file_inode(vma->vm_file);
426 struct vvp_object *vob = cl_inode2vvp(inode);
428 LASSERT(atomic_read(&vob->vob_mmap_cnt) >= 0);
429 atomic_inc(&vob->vob_mmap_cnt);
433 * Dual to ll_vm_open().
435 static void ll_vm_close(struct vm_area_struct *vma)
437 struct inode *inode = file_inode(vma->vm_file);
438 struct vvp_object *vob = cl_inode2vvp(inode);
440 atomic_dec(&vob->vob_mmap_cnt);
441 LASSERT(atomic_read(&vob->vob_mmap_cnt) >= 0);
444 /* XXX put nice comment here. talk about __free_pte -> dirty pages and
445 * nopage's reference passing to the pte
447 int ll_teardown_mmaps(struct address_space *mapping, __u64 first, __u64 last)
451 LASSERTF(last > first, "last %llu first %llu\n", last, first);
452 if (mapping_mapped(mapping)) {
454 unmap_mapping_range(mapping, first + PAGE_SIZE - 1,
455 last - first + 1, 0);
461 static const struct vm_operations_struct ll_file_vm_ops = {
463 .page_mkwrite = ll_page_mkwrite,
465 .close = ll_vm_close,
468 int ll_file_mmap(struct file *file, struct vm_area_struct *vma)
470 struct inode *inode = file_inode(file);
473 if (ll_file_nolock(file))
476 ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_MAP, 1);
477 rc = generic_file_mmap(file, vma);
479 vma->vm_ops = &ll_file_vm_ops;
480 vma->vm_ops->open(vma);
481 /* update the inode's size and mtime */
482 rc = ll_glimpse_size(inode);