blob: b495e5b5a9427bddae499243a634d6583711b9ae [file] [log] [blame]
Chris Zankel9a8fd552005-06-23 22:01:26 -07001/*
2 * linux/include/asm-xtensa/page.h
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version2 as
6 * published by the Free Software Foundation.
7 *
8 * Copyright (C) 2001 - 2005 Tensilica Inc.
9 */
10
11#ifndef _XTENSA_PAGE_H
12#define _XTENSA_PAGE_H
13
14#ifdef __KERNEL__
15
16#include <asm/processor.h>
17#include <linux/config.h>
18
19/*
20 * PAGE_SHIFT determines the page size
21 * PAGE_ALIGN(x) aligns the pointer to the (next) page boundary
22 */
23
24#define PAGE_SHIFT XCHAL_MMU_MIN_PTE_PAGE_SIZE
25#define PAGE_SIZE (1 << PAGE_SHIFT)
26#define PAGE_MASK (~(PAGE_SIZE-1))
27#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE - 1) & PAGE_MASK)
28
29#define DCACHE_WAY_SIZE (XCHAL_DCACHE_SIZE / XCHAL_DCACHE_WAYS)
30#define PAGE_OFFSET XCHAL_KSEG_CACHED_VADDR
31
32#ifdef __ASSEMBLY__
33
34#define __pgprot(x) (x)
35
36#else
37
38/*
39 * These are used to make use of C type-checking..
40 */
41
42typedef struct { unsigned long pte; } pte_t; /* page table entry */
43typedef struct { unsigned long pgd; } pgd_t; /* PGD table entry */
44typedef struct { unsigned long pgprot; } pgprot_t;
45
46#define pte_val(x) ((x).pte)
47#define pgd_val(x) ((x).pgd)
48#define pgprot_val(x) ((x).pgprot)
49
50#define __pte(x) ((pte_t) { (x) } )
51#define __pgd(x) ((pgd_t) { (x) } )
52#define __pgprot(x) ((pgprot_t) { (x) } )
53
54/*
55 * Pure 2^n version of get_order
56 */
57
58extern __inline__ int get_order(unsigned long size)
59{
60 int order;
61#ifndef XCHAL_HAVE_NSU
62 unsigned long x1, x2, x4, x8, x16;
63
64 size = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
65 x1 = size & 0xAAAAAAAA;
66 x2 = size & 0xCCCCCCCC;
67 x4 = size & 0xF0F0F0F0;
68 x8 = size & 0xFF00FF00;
69 x16 = size & 0xFFFF0000;
70 order = x2 ? 2 : 0;
71 order += (x16 != 0) * 16;
72 order += (x8 != 0) * 8;
73 order += (x4 != 0) * 4;
74 order += (x1 != 0);
75
76 return order;
77#else
78 size = (size - 1) >> PAGE_SHIFT;
79 asm ("nsau %0, %1" : "=r" (order) : "r" (size));
80 return 32 - order;
81#endif
82}
83
84
85struct page;
86extern void clear_page(void *page);
87extern void copy_page(void *to, void *from);
88
89/*
90 * If we have cache aliasing and writeback caches, we might have to do
91 * some extra work
92 */
93
94#if (DCACHE_WAY_SIZE > PAGE_SIZE)
95void clear_user_page(void *addr, unsigned long vaddr, struct page* page);
96void copy_user_page(void *to,void* from,unsigned long vaddr,struct page* page);
97#else
98# define clear_user_page(page,vaddr,pg) clear_page(page)
99# define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
100#endif
101
102/*
103 * This handles the memory map. We handle pages at
104 * XCHAL_KSEG_CACHED_VADDR for kernels with 32 bit address space.
105 * These macros are for conversion of kernel address, not user
106 * addresses.
107 */
108
109#define __pa(x) ((unsigned long) (x) - PAGE_OFFSET)
110#define __va(x) ((void *)((unsigned long) (x) + PAGE_OFFSET))
111#define pfn_valid(pfn) ((unsigned long)pfn < max_mapnr)
112#ifndef CONFIG_DISCONTIGMEM
113# define pfn_to_page(pfn) (mem_map + (pfn))
114# define page_to_pfn(page) ((unsigned long)((page) - mem_map))
115#else
116# error CONFIG_DISCONTIGMEM not supported
117#endif
118
119#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
120#define page_to_virt(page) __va(page_to_pfn(page) << PAGE_SHIFT)
121#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
122#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
123
124#define WANT_PAGE_VIRTUAL
125
126
127#endif /* __ASSEMBLY__ */
128
129#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
130 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
131
132#endif /* __KERNEL__ */
133#endif /* _XTENSA_PAGE_H */