mm: do_fault(): extract to call vm_ops->do_fault() to separate function

Extract code to vm_ops->do_fault() and basic error handling to separate
function.  The code will be reused.

Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Rik van Riel <riel@redhat.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Matthew Wilcox <matthew.r.wilcox@intel.com>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
diff --git a/mm/memory.c b/mm/memory.c
index 144e8cd..af76397 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3286,6 +3286,37 @@
 	return VM_FAULT_OOM;
 }
 
+static int __do_fault(struct vm_area_struct *vma, unsigned long address,
+		pgoff_t pgoff, unsigned int flags, struct page **page)
+{
+	struct vm_fault vmf;
+	int ret;
+
+	vmf.virtual_address = (void __user *)(address & PAGE_MASK);
+	vmf.pgoff = pgoff;
+	vmf.flags = flags;
+	vmf.page = NULL;
+
+	ret = vma->vm_ops->fault(vma, &vmf);
+	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
+		return ret;
+
+	if (unlikely(PageHWPoison(vmf.page))) {
+		if (ret & VM_FAULT_LOCKED)
+			unlock_page(vmf.page);
+		page_cache_release(vmf.page);
+		return VM_FAULT_HWPOISON;
+	}
+
+	if (unlikely(!(ret & VM_FAULT_LOCKED)))
+		lock_page(vmf.page);
+	else
+		VM_BUG_ON_PAGE(!PageLocked(vmf.page), vmf.page);
+
+	*page = vmf.page;
+	return ret;
+}
+
 /*
  * do_fault() tries to create a new page mapping. It aggressively
  * tries to share with existing pages, but makes a separate copy if
@@ -3305,12 +3336,11 @@
 {
 	pte_t *page_table;
 	spinlock_t *ptl;
-	struct page *page;
+	struct page *page, *fault_page;
 	struct page *cow_page;
 	pte_t entry;
 	int anon = 0;
 	struct page *dirty_page = NULL;
-	struct vm_fault vmf;
 	int ret;
 	int page_mkwrite = 0;
 
@@ -3334,42 +3364,19 @@
 	} else
 		cow_page = NULL;
 
-	vmf.virtual_address = (void __user *)(address & PAGE_MASK);
-	vmf.pgoff = pgoff;
-	vmf.flags = flags;
-	vmf.page = NULL;
-
-	ret = vma->vm_ops->fault(vma, &vmf);
-	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE |
-			    VM_FAULT_RETRY)))
+	ret = __do_fault(vma, address, pgoff, flags, &fault_page);
+	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
 		goto uncharge_out;
 
-	if (unlikely(PageHWPoison(vmf.page))) {
-		if (ret & VM_FAULT_LOCKED)
-			unlock_page(vmf.page);
-		ret = VM_FAULT_HWPOISON;
-		page_cache_release(vmf.page);
-		goto uncharge_out;
-	}
-
-	/*
-	 * For consistency in subsequent calls, make the faulted page always
-	 * locked.
-	 */
-	if (unlikely(!(ret & VM_FAULT_LOCKED)))
-		lock_page(vmf.page);
-	else
-		VM_BUG_ON_PAGE(!PageLocked(vmf.page), vmf.page);
-
 	/*
 	 * Should we do an early C-O-W break?
 	 */
-	page = vmf.page;
+	page = fault_page;
 	if (flags & FAULT_FLAG_WRITE) {
 		if (!(vma->vm_flags & VM_SHARED)) {
 			page = cow_page;
 			anon = 1;
-			copy_user_highpage(page, vmf.page, address, vma);
+			copy_user_highpage(page, fault_page, address, vma);
 			__SetPageUptodate(page);
 		} else {
 			/*
@@ -3378,8 +3385,15 @@
 			 * to become writable
 			 */
 			if (vma->vm_ops->page_mkwrite) {
+				struct vm_fault vmf;
 				int tmp;
 
+				vmf.virtual_address =
+					(void __user *)(address & PAGE_MASK);
+				vmf.pgoff = pgoff;
+				vmf.flags = flags;
+				vmf.page = fault_page;
+
 				unlock_page(page);
 				vmf.flags = FAULT_FLAG_WRITE|FAULT_FLAG_MKWRITE;
 				tmp = vma->vm_ops->page_mkwrite(vma, &vmf);
@@ -3469,9 +3483,9 @@
 		if (vma->vm_file && !page_mkwrite)
 			file_update_time(vma->vm_file);
 	} else {
-		unlock_page(vmf.page);
+		unlock_page(fault_page);
 		if (anon)
-			page_cache_release(vmf.page);
+			page_cache_release(fault_page);
 	}
 
 	return ret;