blob: 1ddea5afba09344ba8e807e6ce8f5edc5c26241f [file] [log] [blame]
David Howellsb920de12008-02-08 04:19:31 -08001/* MN10300 Virtual kernel memory mappings for high memory
2 *
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 * - Derived from include/asm-i386/highmem.h
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public Licence
9 * as published by the Free Software Foundation; either version
10 * 2 of the Licence, or (at your option) any later version.
11 */
12#ifndef _ASM_HIGHMEM_H
13#define _ASM_HIGHMEM_H
14
15#ifdef __KERNEL__
16
17#include <linux/init.h>
18#include <linux/interrupt.h>
Akinobu Mita7ca43e72009-03-31 15:23:25 -070019#include <linux/highmem.h>
David Howellsb920de12008-02-08 04:19:31 -080020#include <asm/kmap_types.h>
21#include <asm/pgtable.h>
22
23/* undef for production */
24#undef HIGHMEM_DEBUG
25
26/* declarations for highmem.c */
27extern unsigned long highstart_pfn, highend_pfn;
28
29extern pte_t *kmap_pte;
30extern pgprot_t kmap_prot;
31extern pte_t *pkmap_page_table;
32
33extern void __init kmap_init(void);
34
35/*
36 * Right now we initialize only a single pte table. It can be extended
37 * easily, subsequent pte tables have to be allocated in one physical
38 * chunk of RAM.
39 */
40#define PKMAP_BASE 0xfe000000UL
41#define LAST_PKMAP 1024
42#define LAST_PKMAP_MASK (LAST_PKMAP - 1)
43#define PKMAP_NR(virt) ((virt - PKMAP_BASE) >> PAGE_SHIFT)
44#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
45
Harvey Harrison21534302008-02-13 15:03:17 -080046extern unsigned long kmap_high(struct page *page);
47extern void kunmap_high(struct page *page);
David Howellsb920de12008-02-08 04:19:31 -080048
49static inline unsigned long kmap(struct page *page)
50{
51 if (in_interrupt())
52 BUG();
53 if (page < highmem_start_page)
54 return page_address(page);
55 return kmap_high(page);
56}
57
58static inline void kunmap(struct page *page)
59{
60 if (in_interrupt())
61 BUG();
62 if (page < highmem_start_page)
63 return;
64 kunmap_high(page);
65}
66
67/*
68 * The use of kmap_atomic/kunmap_atomic is discouraged - kmap/kunmap
69 * gives a more generic (and caching) interface. But kmap_atomic can
70 * be used in IRQ contexts, so in some (very limited) cases we need
71 * it.
72 */
Al Viro3ef120a2014-02-02 06:31:19 -050073static inline void *kmap_atomic(struct page *page)
David Howellsb920de12008-02-08 04:19:31 -080074{
David Howellsb920de12008-02-08 04:19:31 -080075 unsigned long vaddr;
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -070076 int idx, type;
David Howellsb920de12008-02-08 04:19:31 -080077
David Hildenbrand2cb7c9c2015-05-11 17:52:09 +020078 preempt_disable();
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -070079 pagefault_disable();
David Howellsb920de12008-02-08 04:19:31 -080080 if (page < highmem_start_page)
81 return page_address(page);
82
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -070083 type = kmap_atomic_idx_push();
David Howellsb920de12008-02-08 04:19:31 -080084 idx = type + KM_TYPE_NR * smp_processor_id();
85 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
86#if HIGHMEM_DEBUG
87 if (!pte_none(*(kmap_pte - idx)))
88 BUG();
89#endif
90 set_pte(kmap_pte - idx, mk_pte(page, kmap_prot));
David Howells492e6752010-10-27 17:28:49 +010091 local_flush_tlb_one(vaddr);
David Howellsb920de12008-02-08 04:19:31 -080092
Al Viro3ef120a2014-02-02 06:31:19 -050093 return (void *)vaddr;
David Howellsb920de12008-02-08 04:19:31 -080094}
95
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -070096static inline void __kunmap_atomic(unsigned long vaddr)
David Howellsb920de12008-02-08 04:19:31 -080097{
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -070098 int type;
David Howellsb920de12008-02-08 04:19:31 -080099
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -0700100 if (vaddr < FIXADDR_START) { /* FIXME */
101 pagefault_enable();
David Hildenbrand2cb7c9c2015-05-11 17:52:09 +0200102 preempt_enable();
David Howellsb920de12008-02-08 04:19:31 -0800103 return;
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -0700104 }
David Howellsb920de12008-02-08 04:19:31 -0800105
Peter Zijlstra20273942010-10-27 15:32:58 -0700106 type = kmap_atomic_idx();
David Howellsb920de12008-02-08 04:19:31 -0800107
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -0700108#if HIGHMEM_DEBUG
109 {
110 unsigned int idx;
111 idx = type + KM_TYPE_NR * smp_processor_id();
112
113 if (vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx))
114 BUG();
115
116 /*
117 * force other mappings to Oops if they'll try to access
118 * this pte without first remap it
119 */
120 pte_clear(kmap_pte - idx);
David Howells492e6752010-10-27 17:28:49 +0100121 local_flush_tlb_one(vaddr);
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -0700122 }
David Howellsb920de12008-02-08 04:19:31 -0800123#endif
Peter Zijlstra20273942010-10-27 15:32:58 -0700124
125 kmap_atomic_idx_pop();
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -0700126 pagefault_enable();
David Hildenbrand2cb7c9c2015-05-11 17:52:09 +0200127 preempt_enable();
David Howellsb920de12008-02-08 04:19:31 -0800128}
David Howellsb920de12008-02-08 04:19:31 -0800129#endif /* __KERNEL__ */
130
131#endif /* _ASM_HIGHMEM_H */