blob: 2e1353c2d18d7d256a0d499ac3f6beae581a6dee [file] [log] [blame]
Michal Simeka95d0e12009-03-27 14:25:29 +01001/*
Michal Simek45be7d42009-05-26 16:30:18 +02002 * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
3 * Copyright (C) 2008-2009 PetaLogix
Michal Simeka95d0e12009-03-27 14:25:29 +01004 * Copyright (C) 2006 Atmark Techno, Inc.
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details.
9 */
10
11#ifndef _ASM_MICROBLAZE_TLBFLUSH_H
12#define _ASM_MICROBLAZE_TLBFLUSH_H
13
Michal Simek45be7d42009-05-26 16:30:18 +020014#ifdef CONFIG_MMU
15
16#include <linux/sched.h>
17#include <linux/threads.h>
18#include <asm/processor.h> /* For TASK_SIZE */
19#include <asm/mmu.h>
20#include <asm/page.h>
21#include <asm/pgalloc.h>
22
23extern void _tlbie(unsigned long address);
24extern void _tlbia(void);
25
Michal Simek77753792010-01-12 09:55:10 +010026#define __tlbia() { preempt_disable(); _tlbia(); preempt_enable(); }
Michal Simeke84452d2010-03-22 14:54:35 +010027#define __tlbie(x) { _tlbie(x); }
Michal Simek45be7d42009-05-26 16:30:18 +020028
29static inline void local_flush_tlb_all(void)
30 { __tlbia(); }
31static inline void local_flush_tlb_mm(struct mm_struct *mm)
32 { __tlbia(); }
33static inline void local_flush_tlb_page(struct vm_area_struct *vma,
34 unsigned long vmaddr)
Michal Simeke84452d2010-03-22 14:54:35 +010035 { __tlbie(vmaddr); }
Michal Simek45be7d42009-05-26 16:30:18 +020036static inline void local_flush_tlb_range(struct vm_area_struct *vma,
37 unsigned long start, unsigned long end)
38 { __tlbia(); }
39
40#define flush_tlb_kernel_range(start, end) do { } while (0)
41
Russell King4b3073e2009-12-18 16:40:18 +000042#define update_mmu_cache(vma, addr, ptep) do { } while (0)
Michal Simek45be7d42009-05-26 16:30:18 +020043
44#define flush_tlb_all local_flush_tlb_all
45#define flush_tlb_mm local_flush_tlb_mm
46#define flush_tlb_page local_flush_tlb_page
47#define flush_tlb_range local_flush_tlb_range
48
49/*
50 * This is called in munmap when we have freed up some page-table
51 * pages. We don't need to do anything here, there's nothing special
52 * about our page-table pages. -- paulus
53 */
54static inline void flush_tlb_pgtables(struct mm_struct *mm,
55 unsigned long start, unsigned long end) { }
56
57#else /* CONFIG_MMU */
58
Michal Simeka95d0e12009-03-27 14:25:29 +010059#define flush_tlb() BUG()
60#define flush_tlb_all() BUG()
61#define flush_tlb_mm(mm) BUG()
62#define flush_tlb_page(vma, addr) BUG()
63#define flush_tlb_range(mm, start, end) BUG()
64#define flush_tlb_pgtables(mm, start, end) BUG()
65#define flush_tlb_kernel_range(start, end) BUG()
66
Michal Simek45be7d42009-05-26 16:30:18 +020067#endif /* CONFIG_MMU */
68
Michal Simeka95d0e12009-03-27 14:25:29 +010069#endif /* _ASM_MICROBLAZE_TLBFLUSH_H */