blob: 94105d3f58f4882849643cfcb8668857198f2117 [file] [log] [blame]
Doug Thompson39c29652007-07-26 10:41:15 -07001#ifndef ASM_EDAC_H
2#define ASM_EDAC_H
3
Maciej W. Rozyckib0984c42014-11-15 22:08:48 +00004#include <asm/compiler.h>
5
Doug Thompson39c29652007-07-26 10:41:15 -07006/* ECC atomic, DMA, SMP and interrupt safe scrub function */
7
8static inline void atomic_scrub(void *va, u32 size)
9{
10 unsigned long *virt_addr = va;
11 unsigned long temp;
12 u32 i;
13
Ralf Baechle1bfa7712007-08-22 22:42:18 +010014 for (i = 0; i < size / sizeof(unsigned long); i++) {
Doug Thompson39c29652007-07-26 10:41:15 -070015 /*
16 * Very carefully read and write to memory atomically
17 * so we are interrupt, DMA and SMP safe.
18 *
19 * Intel: asm("lock; addl $0, %0"::"m"(*virt_addr));
20 */
21
22 __asm__ __volatile__ (
Ralf Baechle1bfa7712007-08-22 22:42:18 +010023 " .set mips2 \n"
24 "1: ll %0, %1 # atomic_scrub \n"
25 " addu %0, $0 \n"
26 " sc %0, %1 \n"
27 " beqz %0, 1b \n"
28 " .set mips0 \n"
Markos Chandras94bfb752015-01-26 12:44:11 +000029 : "=&r" (temp), "=" GCC_OFF_SMALL_ASM() (*virt_addr)
30 : GCC_OFF_SMALL_ASM() (*virt_addr));
Doug Thompson39c29652007-07-26 10:41:15 -070031
Ralf Baechle1bfa7712007-08-22 22:42:18 +010032 virt_addr++;
Doug Thompson39c29652007-07-26 10:41:15 -070033 }
34}
35
36#endif