Vineet Gupta | b8a0330 | 2015-03-11 21:42:37 +0530 | [diff] [blame^] | 1 | /* |
| 2 | * Copyright (C) 2014-15 Synopsys, Inc. (www.synopsys.com) |
| 3 | * |
| 4 | * This program is free software; you can redistribute it and/or modify |
| 5 | * it under the terms of the GNU General Public License version 2 as |
| 6 | * published by the Free Software Foundation. |
| 7 | */ |
| 8 | |
| 9 | #ifndef __ASM_BARRIER_H |
| 10 | #define __ASM_BARRIER_H |
| 11 | |
| 12 | #ifdef CONFIG_ISA_ARCV2 |
| 13 | |
| 14 | /* |
| 15 | * ARCv2 based HS38 cores are in-order issue, but still weakly ordered |
| 16 | * due to micro-arch buffering/queuing of load/store, cache hit vs. miss ... |
| 17 | * |
| 18 | * Explicit barrier provided by DMB instruction |
| 19 | * - Operand supports fine grained load/store/load+store semantics |
| 20 | * - Ensures that selected memory operation issued before it will complete |
| 21 | * before any subsequent memory operation of same type |
| 22 | * - DMB guarantees SMP as well as local barrier semantics |
| 23 | * (asm-generic/barrier.h ensures sane smp_*mb if not defined here, i.e. |
| 24 | * UP: barrier(), SMP: smp_*mb == *mb) |
| 25 | * - DSYNC provides DMB+completion_of_cache_bpu_maintenance_ops hence not needed |
| 26 | * in the general case. Plus it only provides full barrier. |
| 27 | */ |
| 28 | |
| 29 | #define mb() asm volatile("dmb 3\n" : : : "memory") |
| 30 | #define rmb() asm volatile("dmb 1\n" : : : "memory") |
| 31 | #define wmb() asm volatile("dmb 2\n" : : : "memory") |
| 32 | |
| 33 | #endif |
| 34 | |
| 35 | #ifdef CONFIG_ISA_ARCOMPACT |
| 36 | |
| 37 | /* |
| 38 | * ARCompact based cores (ARC700) only have SYNC instruction which is super |
| 39 | * heavy weight as it flushes the pipeline as well. |
| 40 | * There are no real SMP implementations of such cores. |
| 41 | */ |
| 42 | |
| 43 | #define mb() asm volatile("sync\n" : : : "memory") |
| 44 | #endif |
| 45 | |
| 46 | #include <asm-generic/barrier.h> |
| 47 | |
| 48 | #endif |