| /* |
| * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) |
| * |
| * This program is free software; you can redistribute it and/or modify |
| * it under the terms of the GNU General Public License version 2 as |
| * published by the Free Software Foundation. |
| * |
| * vineetg: May 2011 |
| * -Refactored get_new_mmu_context( ) to only handle live-mm. |
| * retiring-mm handled in other hooks |
| * |
| * Vineetg: March 25th, 2008: Bug #92690 |
| * -Major rewrite of Core ASID allocation routine get_new_mmu_context |
| * |
| * Amit Bhor, Sameer Dhavale: Codito Technologies 2004 |
| */ |
| |
| #ifndef _ASM_ARC_MMU_CONTEXT_H |
| #define _ASM_ARC_MMU_CONTEXT_H |
| |
| #include <asm/arcregs.h> |
| #include <asm/tlb.h> |
| |
| #include <asm-generic/mm_hooks.h> |
| |
| /* ARC700 ASID Management |
| * |
| * ARC MMU provides 8-bit ASID (0..255) to TAG TLB entries, allowing entries |
| * with same vaddr (different tasks) to co-exit. This provides for |
| * "Fast Context Switch" i.e. no TLB flush on ctxt-switch |
| * |
| * Linux assigns each task a unique ASID. A simple round-robin allocation |
| * of H/w ASID is done using software tracker @asid_cache. |
| * When it reaches max 255, the allocation cycle starts afresh by flushing |
| * the entire TLB and wrapping ASID back to zero. |
| * |
| * A new allocation cycle, post rollover, could potentially reassign an ASID |
| * to a different task. Thus the rule is to refresh the ASID in a new cycle. |
| * The 32 bit @asid_cache (and mm->asid) have 8 bits MMU PID and rest 24 bits |
| * serve as cycle/generation indicator and natural 32 bit unsigned math |
| * automagically increments the generation when lower 8 bits rollover. |
| */ |
| |
| #define MM_CTXT_ASID_MASK 0x000000ff /* MMU PID reg :8 bit PID */ |
| #define MM_CTXT_CYCLE_MASK (~MM_CTXT_ASID_MASK) |
| |
| #define MM_CTXT_FIRST_CYCLE (MM_CTXT_ASID_MASK + 1) |
| #define MM_CTXT_NO_ASID 0UL |
| |
| #define hw_pid(mm) (mm->context.asid & MM_CTXT_ASID_MASK) |
| |
| extern unsigned int asid_cache; |
| |
| /* |
| * Get a new ASID if task doesn't have a valid one (unalloc or from prev cycle) |
| * Also set the MMU PID register to existing/updated ASID |
| */ |
| static inline void get_new_mmu_context(struct mm_struct *mm) |
| { |
| unsigned long flags; |
| |
| local_irq_save(flags); |
| |
| /* |
| * Move to new ASID if it was not from current alloc-cycle/generation. |
| * This is done by ensuring that the generation bits in both mm->ASID |
| * and cpu's ASID counter are exactly same. |
| * |
| * Note: Callers needing new ASID unconditionally, independent of |
| * generation, e.g. local_flush_tlb_mm() for forking parent, |
| * first need to destroy the context, setting it to invalid |
| * value. |
| */ |
| if (!((mm->context.asid ^ asid_cache) & MM_CTXT_CYCLE_MASK)) |
| goto set_hw; |
| |
| /* move to new ASID and handle rollover */ |
| if (unlikely(!(++asid_cache & MM_CTXT_ASID_MASK))) { |
| |
| flush_tlb_all(); |
| |
| /* |
| * Above checke for rollover of 8 bit ASID in 32 bit container. |
| * If the container itself wrapped around, set it to a non zero |
| * "generation" to distinguish from no context |
| */ |
| if (!asid_cache) |
| asid_cache = MM_CTXT_FIRST_CYCLE; |
| } |
| |
| /* Assign new ASID to tsk */ |
| mm->context.asid = asid_cache; |
| |
| set_hw: |
| write_aux_reg(ARC_REG_PID, hw_pid(mm) | MMU_ENABLE); |
| |
| local_irq_restore(flags); |
| } |
| |
| /* |
| * Initialize the context related info for a new mm_struct |
| * instance. |
| */ |
| static inline int |
| init_new_context(struct task_struct *tsk, struct mm_struct *mm) |
| { |
| mm->context.asid = MM_CTXT_NO_ASID; |
| return 0; |
| } |
| |
| /* Prepare the MMU for task: setup PID reg with allocated ASID |
| If task doesn't have an ASID (never alloc or stolen, get a new ASID) |
| */ |
| static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, |
| struct task_struct *tsk) |
| { |
| #ifndef CONFIG_SMP |
| /* PGD cached in MMU reg to avoid 3 mem lookups: task->mm->pgd */ |
| write_aux_reg(ARC_REG_SCRATCH_DATA0, next->pgd); |
| #endif |
| |
| get_new_mmu_context(next); |
| } |
| |
| /* |
| * Called at the time of execve() to get a new ASID |
| * Note the subtlety here: get_new_mmu_context() behaves differently here |
| * vs. in switch_mm(). Here it always returns a new ASID, because mm has |
| * an unallocated "initial" value, while in latter, it moves to a new ASID, |
| * only if it was unallocated |
| */ |
| #define activate_mm(prev, next) switch_mm(prev, next, NULL) |
| |
| static inline void destroy_context(struct mm_struct *mm) |
| { |
| mm->context.asid = MM_CTXT_NO_ASID; |
| } |
| |
| /* it seemed that deactivate_mm( ) is a reasonable place to do book-keeping |
| * for retiring-mm. However destroy_context( ) still needs to do that because |
| * between mm_release( ) = >deactive_mm( ) and |
| * mmput => .. => __mmdrop( ) => destroy_context( ) |
| * there is a good chance that task gets sched-out/in, making it's ASID valid |
| * again (this teased me for a whole day). |
| */ |
| #define deactivate_mm(tsk, mm) do { } while (0) |
| |
| #define enter_lazy_tlb(mm, tsk) |
| |
| #endif /* __ASM_ARC_MMU_CONTEXT_H */ |