H. Peter Anvin | 1965aae | 2008-10-22 22:26:29 -0700 | [diff] [blame] | 1 | #ifndef _ASM_X86_STRING_64_H |
| 2 | #define _ASM_X86_STRING_64_H |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3 | |
| 4 | #ifdef __KERNEL__ |
Tony Luck | 3637efb | 2016-09-01 11:39:33 -0700 | [diff] [blame] | 5 | #include <linux/jump_label.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6 | |
Joe Perches | 953b2f1 | 2008-03-23 01:03:34 -0700 | [diff] [blame] | 7 | /* Written 2002 by Andi Kleen */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8 | |
Joe Perches | 953b2f1 | 2008-03-23 01:03:34 -0700 | [diff] [blame] | 9 | /* Only used for special circumstances. Stolen from i386/string.h */ |
| 10 | static __always_inline void *__inline_memcpy(void *to, const void *from, size_t n) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11 | { |
Joe Perches | 953b2f1 | 2008-03-23 01:03:34 -0700 | [diff] [blame] | 12 | unsigned long d0, d1, d2; |
| 13 | asm volatile("rep ; movsl\n\t" |
| 14 | "testb $2,%b4\n\t" |
| 15 | "je 1f\n\t" |
| 16 | "movsw\n" |
| 17 | "1:\ttestb $1,%b4\n\t" |
| 18 | "je 2f\n\t" |
| 19 | "movsb\n" |
| 20 | "2:" |
| 21 | : "=&c" (d0), "=&D" (d1), "=&S" (d2) |
| 22 | : "0" (n / 4), "q" (n), "1" ((long)to), "2" ((long)from) |
| 23 | : "memory"); |
| 24 | return to; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 25 | } |
| 26 | |
| 27 | /* Even with __builtin_ the compiler may decide to use the out of line |
| 28 | function. */ |
| 29 | |
| 30 | #define __HAVE_ARCH_MEMCPY 1 |
Andrey Ryabinin | a75ca54 | 2015-10-16 14:28:53 +0300 | [diff] [blame] | 31 | extern void *memcpy(void *to, const void *from, size_t len); |
Andrey Ryabinin | 393f203 | 2015-02-13 14:39:56 -0800 | [diff] [blame] | 32 | extern void *__memcpy(void *to, const void *from, size_t len); |
| 33 | |
Daniel Micay | 6974f0c | 2017-07-12 14:36:10 -0700 | [diff] [blame] | 34 | #ifndef CONFIG_FORTIFY_SOURCE |
Vegard Nossum | f856129 | 2008-04-04 00:53:23 +0200 | [diff] [blame] | 35 | #ifndef CONFIG_KMEMCHECK |
Andrey Ryabinin | a75ca54 | 2015-10-16 14:28:53 +0300 | [diff] [blame] | 36 | #if (__GNUC__ == 4 && __GNUC_MINOR__ < 3) || __GNUC__ < 4 |
Joe Perches | 953b2f1 | 2008-03-23 01:03:34 -0700 | [diff] [blame] | 37 | #define memcpy(dst, src, len) \ |
| 38 | ({ \ |
| 39 | size_t __len = (len); \ |
| 40 | void *__ret; \ |
| 41 | if (__builtin_constant_p(len) && __len >= 64) \ |
| 42 | __ret = __memcpy((dst), (src), __len); \ |
| 43 | else \ |
| 44 | __ret = __builtin_memcpy((dst), (src), __len); \ |
| 45 | __ret; \ |
| 46 | }) |
Andi Kleen | aac57f8 | 2007-07-21 17:09:58 +0200 | [diff] [blame] | 47 | #endif |
Vegard Nossum | f856129 | 2008-04-04 00:53:23 +0200 | [diff] [blame] | 48 | #else |
| 49 | /* |
| 50 | * kmemcheck becomes very happy if we use the REP instructions unconditionally, |
| 51 | * because it means that we know both memory operands in advance. |
| 52 | */ |
| 53 | #define memcpy(dst, src, len) __inline_memcpy((dst), (src), (len)) |
| 54 | #endif |
Daniel Micay | 6974f0c | 2017-07-12 14:36:10 -0700 | [diff] [blame] | 55 | #endif /* !CONFIG_FORTIFY_SOURCE */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 56 | |
| 57 | #define __HAVE_ARCH_MEMSET |
Andi Kleen | 6edfba1 | 2006-03-25 16:29:49 +0100 | [diff] [blame] | 58 | void *memset(void *s, int c, size_t n); |
Andrey Ryabinin | 393f203 | 2015-02-13 14:39:56 -0800 | [diff] [blame] | 59 | void *__memset(void *s, int c, size_t n); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 60 | |
Matthew Wilcox | 4c51248 | 2017-09-08 16:13:56 -0700 | [diff] [blame^] | 61 | #define __HAVE_ARCH_MEMSET16 |
| 62 | static inline void *memset16(uint16_t *s, uint16_t v, size_t n) |
| 63 | { |
| 64 | long d0, d1; |
| 65 | asm volatile("rep\n\t" |
| 66 | "stosw" |
| 67 | : "=&c" (d0), "=&D" (d1) |
| 68 | : "a" (v), "1" (s), "0" (n) |
| 69 | : "memory"); |
| 70 | return s; |
| 71 | } |
| 72 | |
| 73 | #define __HAVE_ARCH_MEMSET32 |
| 74 | static inline void *memset32(uint32_t *s, uint32_t v, size_t n) |
| 75 | { |
| 76 | long d0, d1; |
| 77 | asm volatile("rep\n\t" |
| 78 | "stosl" |
| 79 | : "=&c" (d0), "=&D" (d1) |
| 80 | : "a" (v), "1" (s), "0" (n) |
| 81 | : "memory"); |
| 82 | return s; |
| 83 | } |
| 84 | |
| 85 | #define __HAVE_ARCH_MEMSET64 |
| 86 | static inline void *memset64(uint64_t *s, uint64_t v, size_t n) |
| 87 | { |
| 88 | long d0, d1; |
| 89 | asm volatile("rep\n\t" |
| 90 | "stosq" |
| 91 | : "=&c" (d0), "=&D" (d1) |
| 92 | : "a" (v), "1" (s), "0" (n) |
| 93 | : "memory"); |
| 94 | return s; |
| 95 | } |
| 96 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 97 | #define __HAVE_ARCH_MEMMOVE |
Joe Perches | 953b2f1 | 2008-03-23 01:03:34 -0700 | [diff] [blame] | 98 | void *memmove(void *dest, const void *src, size_t count); |
Andrey Ryabinin | 393f203 | 2015-02-13 14:39:56 -0800 | [diff] [blame] | 99 | void *__memmove(void *dest, const void *src, size_t count); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 100 | |
Joe Perches | 953b2f1 | 2008-03-23 01:03:34 -0700 | [diff] [blame] | 101 | int memcmp(const void *cs, const void *ct, size_t count); |
| 102 | size_t strlen(const char *s); |
| 103 | char *strcpy(char *dest, const char *src); |
| 104 | char *strcat(char *dest, const char *src); |
| 105 | int strcmp(const char *cs, const char *ct); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 106 | |
Andrey Ryabinin | 393f203 | 2015-02-13 14:39:56 -0800 | [diff] [blame] | 107 | #if defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__) |
| 108 | |
| 109 | /* |
| 110 | * For files that not instrumented (e.g. mm/slub.c) we |
| 111 | * should use not instrumented version of mem* functions. |
| 112 | */ |
| 113 | |
| 114 | #undef memcpy |
| 115 | #define memcpy(dst, src, len) __memcpy(dst, src, len) |
| 116 | #define memmove(dst, src, len) __memmove(dst, src, len) |
| 117 | #define memset(s, c, n) __memset(s, c, n) |
Daniel Micay | 6974f0c | 2017-07-12 14:36:10 -0700 | [diff] [blame] | 118 | |
| 119 | #ifndef __NO_FORTIFY |
| 120 | #define __NO_FORTIFY /* FORTIFY_SOURCE uses __builtin_memcpy, etc. */ |
| 121 | #endif |
| 122 | |
Andrey Ryabinin | 393f203 | 2015-02-13 14:39:56 -0800 | [diff] [blame] | 123 | #endif |
| 124 | |
Dan Williams | 6abccd1 | 2017-01-13 14:14:23 -0800 | [diff] [blame] | 125 | #define __HAVE_ARCH_MEMCPY_MCSAFE 1 |
Tony Luck | 9a6fb28 | 2016-09-01 11:39:33 -0700 | [diff] [blame] | 126 | __must_check int memcpy_mcsafe_unrolled(void *dst, const void *src, size_t cnt); |
Tony Luck | 3637efb | 2016-09-01 11:39:33 -0700 | [diff] [blame] | 127 | DECLARE_STATIC_KEY_FALSE(mcsafe_key); |
| 128 | |
Tony Luck | 92b0729 | 2016-02-18 11:47:26 -0800 | [diff] [blame] | 129 | /** |
| 130 | * memcpy_mcsafe - copy memory with indication if a machine check happened |
| 131 | * |
| 132 | * @dst: destination address |
| 133 | * @src: source address |
| 134 | * @cnt: number of bytes to copy |
| 135 | * |
| 136 | * Low level memory copy function that catches machine checks |
Tony Luck | 9a6fb28 | 2016-09-01 11:39:33 -0700 | [diff] [blame] | 137 | * We only call into the "safe" function on systems that can |
| 138 | * actually do machine check recovery. Everyone else can just |
| 139 | * use memcpy(). |
Tony Luck | 92b0729 | 2016-02-18 11:47:26 -0800 | [diff] [blame] | 140 | * |
Tony Luck | cbf8b5a | 2016-03-14 15:33:39 -0700 | [diff] [blame] | 141 | * Return 0 for success, -EFAULT for fail |
Tony Luck | 92b0729 | 2016-02-18 11:47:26 -0800 | [diff] [blame] | 142 | */ |
Tony Luck | 9a6fb28 | 2016-09-01 11:39:33 -0700 | [diff] [blame] | 143 | static __always_inline __must_check int |
| 144 | memcpy_mcsafe(void *dst, const void *src, size_t cnt) |
| 145 | { |
| 146 | #ifdef CONFIG_X86_MCE |
| 147 | if (static_branch_unlikely(&mcsafe_key)) |
| 148 | return memcpy_mcsafe_unrolled(dst, src, cnt); |
| 149 | else |
| 150 | #endif |
| 151 | memcpy(dst, src, cnt); |
| 152 | return 0; |
| 153 | } |
Tony Luck | 92b0729 | 2016-02-18 11:47:26 -0800 | [diff] [blame] | 154 | |
Dan Williams | 0aed55a | 2017-05-29 12:22:50 -0700 | [diff] [blame] | 155 | #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE |
| 156 | #define __HAVE_ARCH_MEMCPY_FLUSHCACHE 1 |
| 157 | void memcpy_flushcache(void *dst, const void *src, size_t cnt); |
| 158 | #endif |
| 159 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 160 | #endif /* __KERNEL__ */ |
| 161 | |
H. Peter Anvin | 1965aae | 2008-10-22 22:26:29 -0700 | [diff] [blame] | 162 | #endif /* _ASM_X86_STRING_64_H */ |