blob: 8573b83a63d037bf2f7eb09c8922b94c2b535c45 [file] [log] [blame]
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -07001#include <linux/mm.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +09002#include <linux/gfp.h>
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -07003#include <asm/pgalloc.h>
Jeremy Fitzhardingeee5aa8d2008-03-17 16:37:03 -07004#include <asm/pgtable.h>
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -07005#include <asm/tlb.h>
Ingo Molnara1d5a862008-06-20 15:34:46 +02006#include <asm/fixmap.h>
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -07007
Vegard Nossum9e730232009-02-22 11:28:25 +01008#define PGALLOC_GFP GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO
9
Ian Campbell14315592010-02-17 10:38:10 +000010#ifdef CONFIG_HIGHPTE
11#define PGALLOC_USER_GFP __GFP_HIGHMEM
12#else
13#define PGALLOC_USER_GFP 0
14#endif
15
16gfp_t __userpte_alloc_gfp = PGALLOC_GFP | PGALLOC_USER_GFP;
17
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -070018pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
19{
Vegard Nossum9e730232009-02-22 11:28:25 +010020 return (pte_t *)__get_free_page(PGALLOC_GFP);
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -070021}
22
23pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
24{
25 struct page *pte;
26
Ian Campbell14315592010-02-17 10:38:10 +000027 pte = alloc_pages(__userpte_alloc_gfp, 0);
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -070028 if (pte)
29 pgtable_page_ctor(pte);
30 return pte;
31}
32
Ian Campbell14315592010-02-17 10:38:10 +000033static int __init setup_userpte(char *arg)
34{
35 if (!arg)
36 return -EINVAL;
37
38 /*
39 * "userpte=nohigh" disables allocation of user pagetables in
40 * high memory.
41 */
42 if (strcmp(arg, "nohigh") == 0)
43 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
44 else
45 return -EINVAL;
46 return 0;
47}
48early_param("userpte", setup_userpte);
49
Benjamin Herrenschmidt9e1b32c2009-07-22 15:44:28 +100050void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte)
Jeremy Fitzhardinge397f6872008-03-17 16:36:57 -070051{
52 pgtable_page_dtor(pte);
Jeremy Fitzhardinge6944a9c2008-03-17 16:37:01 -070053 paravirt_release_pte(page_to_pfn(pte));
Jeremy Fitzhardinge397f6872008-03-17 16:36:57 -070054 tlb_remove_page(tlb, pte);
55}
56
Jeremy Fitzhardinge170fdff2008-03-17 16:36:58 -070057#if PAGETABLE_LEVELS > 2
Benjamin Herrenschmidt9e1b32c2009-07-22 15:44:28 +100058void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd)
Jeremy Fitzhardinge170fdff2008-03-17 16:36:58 -070059{
Jeremy Fitzhardinge6944a9c2008-03-17 16:37:01 -070060 paravirt_release_pmd(__pa(pmd) >> PAGE_SHIFT);
Jeremy Fitzhardinge170fdff2008-03-17 16:36:58 -070061 tlb_remove_page(tlb, virt_to_page(pmd));
62}
Jeremy Fitzhardinge5a5f8f42008-03-17 16:36:59 -070063
64#if PAGETABLE_LEVELS > 3
Benjamin Herrenschmidt9e1b32c2009-07-22 15:44:28 +100065void ___pud_free_tlb(struct mmu_gather *tlb, pud_t *pud)
Jeremy Fitzhardinge5a5f8f42008-03-17 16:36:59 -070066{
Jeremy Fitzhardinge2761fa02008-03-17 16:37:02 -070067 paravirt_release_pud(__pa(pud) >> PAGE_SHIFT);
Jeremy Fitzhardinge5a5f8f42008-03-17 16:36:59 -070068 tlb_remove_page(tlb, virt_to_page(pud));
69}
70#endif /* PAGETABLE_LEVELS > 3 */
Jeremy Fitzhardinge170fdff2008-03-17 16:36:58 -070071#endif /* PAGETABLE_LEVELS > 2 */
72
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -070073static inline void pgd_list_add(pgd_t *pgd)
74{
75 struct page *page = virt_to_page(pgd);
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -070076
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -070077 list_add(&page->lru, &pgd_list);
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -070078}
79
80static inline void pgd_list_del(pgd_t *pgd)
81{
82 struct page *page = virt_to_page(pgd);
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -070083
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -070084 list_del(&page->lru);
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -070085}
86
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -070087#define UNSHARED_PTRS_PER_PGD \
Jeremy Fitzhardinge68db0652008-03-17 16:37:13 -070088 (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -070089
Jeremy Fitzhardinge617d34d2010-09-21 12:01:51 -070090
91static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
92{
93 BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
94 virt_to_page(pgd)->index = (pgoff_t)mm;
95}
96
97struct mm_struct *pgd_page_get_mm(struct page *page)
98{
99 return (struct mm_struct *)page->index;
100}
101
102static void pgd_ctor(struct mm_struct *mm, pgd_t *pgd)
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -0700103{
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -0700104 /* If the pgd points to a shared pagetable level (either the
105 ptes in non-PAE, or shared PMD in PAE), then just copy the
106 references from swapper_pg_dir. */
107 if (PAGETABLE_LEVELS == 2 ||
Jeremy Fitzhardinge85958b42008-03-17 16:37:14 -0700108 (PAGETABLE_LEVELS == 3 && SHARED_KERNEL_PMD) ||
109 PAGETABLE_LEVELS == 4) {
Jeremy Fitzhardinge68db0652008-03-17 16:37:13 -0700110 clone_pgd_range(pgd + KERNEL_PGD_BOUNDARY,
111 swapper_pg_dir + KERNEL_PGD_BOUNDARY,
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -0700112 KERNEL_PGD_PTRS);
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -0700113 }
114
115 /* list required to sync kernel mapping updates */
Jeremy Fitzhardinge617d34d2010-09-21 12:01:51 -0700116 if (!SHARED_KERNEL_PMD) {
117 pgd_set_mm(pgd, mm);
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -0700118 pgd_list_add(pgd);
Jeremy Fitzhardinge617d34d2010-09-21 12:01:51 -0700119 }
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -0700120}
121
Jan Beulich17b74622008-08-29 12:51:32 +0100122static void pgd_dtor(pgd_t *pgd)
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -0700123{
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -0700124 if (SHARED_KERNEL_PMD)
125 return;
126
Andrea Arcangelia79e53d2011-02-16 15:45:22 -0800127 spin_lock(&pgd_lock);
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -0700128 pgd_list_del(pgd);
Andrea Arcangelia79e53d2011-02-16 15:45:22 -0800129 spin_unlock(&pgd_lock);
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -0700130}
131
Jeremy Fitzhardinge85958b42008-03-17 16:37:14 -0700132/*
133 * List of all pgd's needed for non-PAE so it can invalidate entries
134 * in both cached and uncached pgd's; not needed for PAE since the
135 * kernel pmd is shared. If PAE were not to share the pmd a similar
136 * tactic would be needed. This is essentially codepath-based locking
137 * against pageattr.c; it is the unique case in which a valid change
138 * of kernel pagetables can't be lazily synchronized by vmalloc faults.
139 * vmalloc faults work because attached pagetables are never freed.
140 * -- wli
141 */
142
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -0700143#ifdef CONFIG_X86_PAE
144/*
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -0700145 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
146 * updating the top-level pagetable entries to guarantee the
147 * processor notices the update. Since this is expensive, and
148 * all 4 top-level entries are used almost immediately in a
149 * new process's life, we just pre-populate them here.
150 *
151 * Also, if we're in a paravirt environment where the kernel pmd is
152 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
153 * and initialize the kernel pmds here.
154 */
Jeremy Fitzhardinged8d59002008-06-25 00:19:13 -0400155#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
Ingo Molnar1ec1fe72008-03-19 20:30:40 +0100156
157void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
158{
Jeremy Fitzhardinge6944a9c2008-03-17 16:37:01 -0700159 paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
Ingo Molnar1ec1fe72008-03-19 20:30:40 +0100160
161 /* Note: almost everything apart from _PAGE_PRESENT is
162 reserved at the pmd (PDPT) level. */
163 set_pud(pudp, __pud(__pa(pmd) | _PAGE_PRESENT));
164
165 /*
166 * According to Intel App note "TLBs, Paging-Structure Caches,
167 * and Their Invalidation", April 2007, document 317080-001,
168 * section 8.1: in PAE mode we explicitly have to flush the
169 * TLB via cr3 if the top-level pgd is changed...
170 */
Shaohua Li4981d012011-03-16 11:37:29 +0800171 flush_tlb_mm(mm);
Ingo Molnar1ec1fe72008-03-19 20:30:40 +0100172}
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -0700173#else /* !CONFIG_X86_PAE */
Jeremy Fitzhardinged8d59002008-06-25 00:19:13 -0400174
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -0700175/* No need to prepopulate any pagetable entries in non-PAE modes. */
Jeremy Fitzhardinged8d59002008-06-25 00:19:13 -0400176#define PREALLOCATED_PMDS 0
177
178#endif /* CONFIG_X86_PAE */
179
180static void free_pmds(pmd_t *pmds[])
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -0700181{
Jeremy Fitzhardinged8d59002008-06-25 00:19:13 -0400182 int i;
183
184 for(i = 0; i < PREALLOCATED_PMDS; i++)
185 if (pmds[i])
186 free_page((unsigned long)pmds[i]);
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -0700187}
188
Jeremy Fitzhardinged8d59002008-06-25 00:19:13 -0400189static int preallocate_pmds(pmd_t *pmds[])
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -0700190{
Jeremy Fitzhardinged8d59002008-06-25 00:19:13 -0400191 int i;
192 bool failed = false;
193
194 for(i = 0; i < PREALLOCATED_PMDS; i++) {
Vegard Nossum9e730232009-02-22 11:28:25 +0100195 pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
Jeremy Fitzhardinged8d59002008-06-25 00:19:13 -0400196 if (pmd == NULL)
197 failed = true;
198 pmds[i] = pmd;
199 }
200
201 if (failed) {
202 free_pmds(pmds);
203 return -ENOMEM;
204 }
205
206 return 0;
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -0700207}
Jeremy Fitzhardinged8d59002008-06-25 00:19:13 -0400208
209/*
210 * Mop up any pmd pages which may still be attached to the pgd.
211 * Normally they will be freed by munmap/exit_mmap, but any pmd we
212 * preallocate which never got a corresponding vma will need to be
213 * freed manually.
214 */
215static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
216{
217 int i;
218
219 for(i = 0; i < PREALLOCATED_PMDS; i++) {
220 pgd_t pgd = pgdp[i];
221
222 if (pgd_val(pgd) != 0) {
223 pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
224
225 pgdp[i] = native_make_pgd(0);
226
227 paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
228 pmd_free(mm, pmd);
229 }
230 }
231}
232
233static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
234{
235 pud_t *pud;
236 unsigned long addr;
237 int i;
238
Jeremy Fitzhardingecf3e5052008-08-08 13:46:07 -0700239 if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
240 return;
241
Jeremy Fitzhardinged8d59002008-06-25 00:19:13 -0400242 pud = pud_offset(pgd, 0);
243
244 for (addr = i = 0; i < PREALLOCATED_PMDS;
245 i++, pud++, addr += PUD_SIZE) {
246 pmd_t *pmd = pmds[i];
247
248 if (i >= KERNEL_PGD_BOUNDARY)
249 memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
250 sizeof(pmd_t) * PTRS_PER_PMD);
251
252 pud_populate(mm, pud, pmd);
253 }
254}
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -0700255
256pgd_t *pgd_alloc(struct mm_struct *mm)
257{
Jeremy Fitzhardinged8d59002008-06-25 00:19:13 -0400258 pgd_t *pgd;
259 pmd_t *pmds[PREALLOCATED_PMDS];
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -0700260
Vegard Nossum9e730232009-02-22 11:28:25 +0100261 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
Jeremy Fitzhardinged8d59002008-06-25 00:19:13 -0400262
263 if (pgd == NULL)
264 goto out;
265
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -0700266 mm->pgd = pgd;
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -0700267
Jeremy Fitzhardinged8d59002008-06-25 00:19:13 -0400268 if (preallocate_pmds(pmds) != 0)
269 goto out_free_pgd;
270
271 if (paravirt_pgd_alloc(mm) != 0)
272 goto out_free_pmds;
273
274 /*
275 * Make sure that pre-populating the pmds is atomic with
276 * respect to anything walking the pgd_list, so that they
277 * never see a partially populated pgd.
278 */
Andrea Arcangelia79e53d2011-02-16 15:45:22 -0800279 spin_lock(&pgd_lock);
Jeremy Fitzhardinged8d59002008-06-25 00:19:13 -0400280
Jeremy Fitzhardinge617d34d2010-09-21 12:01:51 -0700281 pgd_ctor(mm, pgd);
Jeremy Fitzhardinged8d59002008-06-25 00:19:13 -0400282 pgd_prepopulate_pmd(mm, pgd, pmds);
283
Andrea Arcangelia79e53d2011-02-16 15:45:22 -0800284 spin_unlock(&pgd_lock);
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -0700285
286 return pgd;
Jeremy Fitzhardinged8d59002008-06-25 00:19:13 -0400287
288out_free_pmds:
289 free_pmds(pmds);
290out_free_pgd:
291 free_page((unsigned long)pgd);
292out:
293 return NULL;
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -0700294}
295
296void pgd_free(struct mm_struct *mm, pgd_t *pgd)
297{
298 pgd_mop_up_pmds(mm, pgd);
299 pgd_dtor(pgd);
Jeremy Fitzhardingeeba00452008-06-25 00:19:12 -0400300 paravirt_pgd_free(mm, pgd);
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -0700301 free_page((unsigned long)pgd);
302}
Jeremy Fitzhardingeee5aa8d2008-03-17 16:37:03 -0700303
304int ptep_set_access_flags(struct vm_area_struct *vma,
305 unsigned long address, pte_t *ptep,
306 pte_t entry, int dirty)
307{
308 int changed = !pte_same(*ptep, entry);
309
310 if (changed && dirty) {
311 *ptep = entry;
312 pte_update_defer(vma->vm_mm, address, ptep);
313 flush_tlb_page(vma, address);
314 }
315
316 return changed;
317}
Jeremy Fitzhardingef9fbf1a2008-03-17 16:37:04 -0700318
Andrea Arcangelidb3eb96f2011-01-13 15:46:41 -0800319#ifdef CONFIG_TRANSPARENT_HUGEPAGE
320int pmdp_set_access_flags(struct vm_area_struct *vma,
321 unsigned long address, pmd_t *pmdp,
322 pmd_t entry, int dirty)
323{
324 int changed = !pmd_same(*pmdp, entry);
325
326 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
327
328 if (changed && dirty) {
329 *pmdp = entry;
330 pmd_update_defer(vma->vm_mm, address, pmdp);
331 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
332 }
333
334 return changed;
335}
336#endif
337
Jeremy Fitzhardingef9fbf1a2008-03-17 16:37:04 -0700338int ptep_test_and_clear_young(struct vm_area_struct *vma,
339 unsigned long addr, pte_t *ptep)
340{
341 int ret = 0;
342
343 if (pte_young(*ptep))
344 ret = test_and_clear_bit(_PAGE_BIT_ACCESSED,
Thomas Gleixner48e23952008-05-24 17:24:34 +0200345 (unsigned long *) &ptep->pte);
Jeremy Fitzhardingef9fbf1a2008-03-17 16:37:04 -0700346
347 if (ret)
348 pte_update(vma->vm_mm, addr, ptep);
349
350 return ret;
351}
Jeremy Fitzhardingec20311e2008-03-17 16:37:05 -0700352
Andrea Arcangelidb3eb96f2011-01-13 15:46:41 -0800353#ifdef CONFIG_TRANSPARENT_HUGEPAGE
354int pmdp_test_and_clear_young(struct vm_area_struct *vma,
355 unsigned long addr, pmd_t *pmdp)
356{
357 int ret = 0;
358
359 if (pmd_young(*pmdp))
360 ret = test_and_clear_bit(_PAGE_BIT_ACCESSED,
Johannes Weinerf2d6bfe2011-01-13 15:47:01 -0800361 (unsigned long *)pmdp);
Andrea Arcangelidb3eb96f2011-01-13 15:46:41 -0800362
363 if (ret)
364 pmd_update(vma->vm_mm, addr, pmdp);
365
366 return ret;
367}
368#endif
369
Jeremy Fitzhardingec20311e2008-03-17 16:37:05 -0700370int ptep_clear_flush_young(struct vm_area_struct *vma,
371 unsigned long address, pte_t *ptep)
372{
373 int young;
374
375 young = ptep_test_and_clear_young(vma, address, ptep);
376 if (young)
377 flush_tlb_page(vma, address);
378
379 return young;
380}
Jeremy Fitzhardinge7c7e6e02008-06-17 11:41:54 -0700381
Andrea Arcangelidb3eb96f2011-01-13 15:46:41 -0800382#ifdef CONFIG_TRANSPARENT_HUGEPAGE
383int pmdp_clear_flush_young(struct vm_area_struct *vma,
384 unsigned long address, pmd_t *pmdp)
385{
386 int young;
387
388 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
389
390 young = pmdp_test_and_clear_young(vma, address, pmdp);
391 if (young)
392 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
393
394 return young;
395}
396
397void pmdp_splitting_flush(struct vm_area_struct *vma,
398 unsigned long address, pmd_t *pmdp)
399{
400 int set;
401 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
402 set = !test_and_set_bit(_PAGE_BIT_SPLITTING,
Johannes Weinerf2d6bfe2011-01-13 15:47:01 -0800403 (unsigned long *)pmdp);
Andrea Arcangelidb3eb96f2011-01-13 15:46:41 -0800404 if (set) {
405 pmd_update(vma->vm_mm, address, pmdp);
406 /* need tlb flush only to serialize against gup-fast */
407 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
408 }
409}
410#endif
411
Gustavo F. Padovanfd862dd2009-02-15 21:48:54 -0300412/**
413 * reserve_top_address - reserves a hole in the top of kernel address space
414 * @reserve - size of hole to reserve
415 *
416 * Can be used to relocate the fixmap area and poke a hole in the top
417 * of kernel address space to make room for a hypervisor.
418 */
419void __init reserve_top_address(unsigned long reserve)
420{
421#ifdef CONFIG_X86_32
422 BUG_ON(fixmaps_set > 0);
423 printk(KERN_INFO "Reserving virtual address space above 0x%08x\n",
424 (int)-reserve);
425 __FIXADDR_TOP = -reserve - PAGE_SIZE;
Gustavo F. Padovanfd862dd2009-02-15 21:48:54 -0300426#endif
427}
428
Jeremy Fitzhardinge7c7e6e02008-06-17 11:41:54 -0700429int fixmaps_set;
430
Jeremy Fitzhardingeaeaaa592008-06-17 11:42:01 -0700431void __native_set_fixmap(enum fixed_addresses idx, pte_t pte)
Jeremy Fitzhardinge7c7e6e02008-06-17 11:41:54 -0700432{
433 unsigned long address = __fix_to_virt(idx);
434
435 if (idx >= __end_of_fixed_addresses) {
436 BUG();
437 return;
438 }
Jeremy Fitzhardingeaeaaa592008-06-17 11:42:01 -0700439 set_pte_vaddr(address, pte);
Jeremy Fitzhardinge7c7e6e02008-06-17 11:41:54 -0700440 fixmaps_set++;
441}
Jeremy Fitzhardingeaeaaa592008-06-17 11:42:01 -0700442
Masami Hiramatsu3b3809a2009-04-09 10:55:33 -0700443void native_set_fixmap(enum fixed_addresses idx, phys_addr_t phys,
444 pgprot_t flags)
Jeremy Fitzhardingeaeaaa592008-06-17 11:42:01 -0700445{
446 __native_set_fixmap(idx, pfn_pte(phys >> PAGE_SHIFT, flags));
447}