ARC: [mm] Zero page optimization

Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
diff --git a/arch/arc/mm/tlb.c b/arch/arc/mm/tlb.c
index f60807c..1c91dbc 100644
--- a/arch/arc/mm/tlb.c
+++ b/arch/arc/mm/tlb.c
@@ -433,9 +433,14 @@
 {
 	unsigned long vaddr = vaddr_unaligned & PAGE_MASK;
 	unsigned long paddr = pte_val(*ptep) & PAGE_MASK;
+	struct page *page = pfn_to_page(pte_pfn(*ptep));
 
 	create_tlb(vma, vaddr, ptep);
 
+	if (page == ZERO_PAGE(0)) {
+		return;
+	}
+
 	/*
 	 * Exec page : Independent of aliasing/page-color considerations,
 	 *	       since icache doesn't snoop dcache on ARC, any dirty
@@ -447,7 +452,6 @@
 	 */
 	if ((vma->vm_flags & VM_EXEC) ||
 	     addr_not_cache_congruent(paddr, vaddr)) {
-		struct page *page = pfn_to_page(pte_pfn(*ptep));
 
 		int dirty = test_and_clear_bit(PG_arch_1, &page->flags);
 		if (dirty) {