blob: 649bbcd325bffb27b263d0e43a484cc279c12a05 [file] [log] [blame]
Russell Kingf8f98a92005-06-08 15:28:24 +01001/*
2 * linux/arch/arm/lib/copypage-xscale.S
3 *
4 * Copyright (C) 1995-2005 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This handles the mini data cache, as found on SA11x0 and XScale
11 * processors. When we copy a user page page, we map it in such a way
12 * that accesses to this page will not touch the main data cache, but
13 * will be cached in the mini data cache. This prevents us thrashing
14 * the main data cache on page faults.
15 */
16#include <linux/init.h>
17#include <linux/mm.h>
Russell King063b0a42008-10-31 15:08:35 +000018#include <linux/highmem.h>
Russell Kingf8f98a92005-06-08 15:28:24 +010019
Russell Kingf8f98a92005-06-08 15:28:24 +010020#include <asm/pgtable.h>
21#include <asm/tlbflush.h>
Richard Purdie1c9d3df2006-12-30 16:08:50 +010022#include <asm/cacheflush.h>
Russell Kingf8f98a92005-06-08 15:28:24 +010023
Russell King1b2e2b72006-08-21 17:06:38 +010024#include "mm.h"
25
Russell Kingf8f98a92005-06-08 15:28:24 +010026/*
27 * 0xffff8000 to 0xffffffff is reserved for any ARM architecture
28 * specific hacks for copying pages efficiently.
29 */
30#define COPYPAGE_MINICACHE 0xffff8000
31
32#define minicache_pgprot __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | \
Russell Kingbb30f362008-09-06 20:04:59 +010033 L_PTE_MT_MINICACHE)
Russell Kingf8f98a92005-06-08 15:28:24 +010034
Russell Kingf8f98a92005-06-08 15:28:24 +010035static DEFINE_SPINLOCK(minicache_lock);
36
37/*
Russell King063b0a42008-10-31 15:08:35 +000038 * XScale mini-dcache optimised copy_user_highpage
Russell Kingf8f98a92005-06-08 15:28:24 +010039 *
40 * We flush the destination cache lines just before we write the data into the
41 * corresponding address. Since the Dcache is read-allocate, this removes the
42 * Dcache aliasing issue. The writes will be forwarded to the write buffer,
43 * and merged as appropriate.
44 */
Uwe Kleine-König446c92b2009-03-12 18:03:16 +010045static void __naked
Russell Kingf8f98a92005-06-08 15:28:24 +010046mc_copy_user_page(void *from, void *to)
47{
48 /*
49 * Strangely enough, best performance is achieved
50 * when prefetching destination as well. (NP)
51 */
52 asm volatile(
53 "stmfd sp!, {r4, r5, lr} \n\
54 mov lr, %2 \n\
55 pld [r0, #0] \n\
56 pld [r0, #32] \n\
57 pld [r1, #0] \n\
58 pld [r1, #32] \n\
591: pld [r0, #64] \n\
60 pld [r0, #96] \n\
61 pld [r1, #64] \n\
62 pld [r1, #96] \n\
632: ldrd r2, [r0], #8 \n\
64 ldrd r4, [r0], #8 \n\
65 mov ip, r1 \n\
66 strd r2, [r1], #8 \n\
67 ldrd r2, [r0], #8 \n\
68 strd r4, [r1], #8 \n\
69 ldrd r4, [r0], #8 \n\
70 strd r2, [r1], #8 \n\
71 strd r4, [r1], #8 \n\
72 mcr p15, 0, ip, c7, c10, 1 @ clean D line\n\
73 ldrd r2, [r0], #8 \n\
74 mcr p15, 0, ip, c7, c6, 1 @ invalidate D line\n\
75 ldrd r4, [r0], #8 \n\
76 mov ip, r1 \n\
77 strd r2, [r1], #8 \n\
78 ldrd r2, [r0], #8 \n\
79 strd r4, [r1], #8 \n\
80 ldrd r4, [r0], #8 \n\
81 strd r2, [r1], #8 \n\
82 strd r4, [r1], #8 \n\
83 mcr p15, 0, ip, c7, c10, 1 @ clean D line\n\
84 subs lr, lr, #1 \n\
85 mcr p15, 0, ip, c7, c6, 1 @ invalidate D line\n\
86 bgt 1b \n\
87 beq 2b \n\
88 ldmfd sp!, {r4, r5, pc} "
89 :
90 : "r" (from), "r" (to), "I" (PAGE_SIZE / 64 - 1));
91}
92
Russell King063b0a42008-10-31 15:08:35 +000093void xscale_mc_copy_user_highpage(struct page *to, struct page *from,
Russell Kingf00a75c2009-10-05 15:17:45 +010094 unsigned long vaddr, struct vm_area_struct *vma)
Russell Kingf8f98a92005-06-08 15:28:24 +010095{
Russell King063b0a42008-10-31 15:08:35 +000096 void *kto = kmap_atomic(to, KM_USER1);
Richard Purdie1c9d3df2006-12-30 16:08:50 +010097
Catalin Marinasc0177802010-09-13 15:57:36 +010098 if (!test_and_set_bit(PG_dcache_clean, &from->flags))
Russell King063b0a42008-10-31 15:08:35 +000099 __flush_dcache_page(page_mapping(from), from);
Richard Purdie1c9d3df2006-12-30 16:08:50 +0100100
Russell Kingf8f98a92005-06-08 15:28:24 +0100101 spin_lock(&minicache_lock);
102
Russell King063b0a42008-10-31 15:08:35 +0000103 set_pte_ext(TOP_PTE(COPYPAGE_MINICACHE), pfn_pte(page_to_pfn(from), minicache_pgprot), 0);
Russell Kingf8f98a92005-06-08 15:28:24 +0100104 flush_tlb_kernel_page(COPYPAGE_MINICACHE);
105
106 mc_copy_user_page((void *)COPYPAGE_MINICACHE, kto);
107
108 spin_unlock(&minicache_lock);
Russell King063b0a42008-10-31 15:08:35 +0000109
110 kunmap_atomic(kto, KM_USER1);
Russell Kingf8f98a92005-06-08 15:28:24 +0100111}
112
113/*
114 * XScale optimised clear_user_page
115 */
Russell King303c6442008-10-31 16:32:19 +0000116void
117xscale_mc_clear_user_highpage(struct page *page, unsigned long vaddr)
Russell Kingf8f98a92005-06-08 15:28:24 +0100118{
Nicolas Pitre43ae2862008-11-04 02:42:27 -0500119 void *ptr, *kaddr = kmap_atomic(page, KM_USER0);
Russell Kingf8f98a92005-06-08 15:28:24 +0100120 asm volatile(
Nicolas Pitre43ae2862008-11-04 02:42:27 -0500121 "mov r1, %2 \n\
Russell Kingf8f98a92005-06-08 15:28:24 +0100122 mov r2, #0 \n\
123 mov r3, #0 \n\
Russell King303c6442008-10-31 16:32:19 +00001241: mov ip, %0 \n\
125 strd r2, [%0], #8 \n\
126 strd r2, [%0], #8 \n\
127 strd r2, [%0], #8 \n\
128 strd r2, [%0], #8 \n\
Russell Kingf8f98a92005-06-08 15:28:24 +0100129 mcr p15, 0, ip, c7, c10, 1 @ clean D line\n\
130 subs r1, r1, #1 \n\
131 mcr p15, 0, ip, c7, c6, 1 @ invalidate D line\n\
Russell King303c6442008-10-31 16:32:19 +0000132 bne 1b"
Nicolas Pitre43ae2862008-11-04 02:42:27 -0500133 : "=r" (ptr)
134 : "0" (kaddr), "I" (PAGE_SIZE / 32)
Russell King303c6442008-10-31 16:32:19 +0000135 : "r1", "r2", "r3", "ip");
136 kunmap_atomic(kaddr, KM_USER0);
Russell Kingf8f98a92005-06-08 15:28:24 +0100137}
138
139struct cpu_user_fns xscale_mc_user_fns __initdata = {
Russell King303c6442008-10-31 16:32:19 +0000140 .cpu_clear_user_highpage = xscale_mc_clear_user_highpage,
Russell King063b0a42008-10-31 15:08:35 +0000141 .cpu_copy_user_highpage = xscale_mc_copy_user_highpage,
Russell Kingf8f98a92005-06-08 15:28:24 +0100142};