blob: f372a70a523f10f07509d95b30b10f620a29d46e [file] [log] [blame]
H. Peter Anvin1965aae2008-10-22 22:26:29 -07001#ifndef _ASM_X86_STRING_64_H
2#define _ASM_X86_STRING_64_H
Linus Torvalds1da177e2005-04-16 15:20:36 -07003
4#ifdef __KERNEL__
Tony Luck3637efb2016-09-01 11:39:33 -07005#include <linux/jump_label.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07006
Joe Perches953b2f12008-03-23 01:03:34 -07007/* Written 2002 by Andi Kleen */
Linus Torvalds1da177e2005-04-16 15:20:36 -07008
Joe Perches953b2f12008-03-23 01:03:34 -07009/* Only used for special circumstances. Stolen from i386/string.h */
10static __always_inline void *__inline_memcpy(void *to, const void *from, size_t n)
Linus Torvalds1da177e2005-04-16 15:20:36 -070011{
Joe Perches953b2f12008-03-23 01:03:34 -070012 unsigned long d0, d1, d2;
13 asm volatile("rep ; movsl\n\t"
14 "testb $2,%b4\n\t"
15 "je 1f\n\t"
16 "movsw\n"
17 "1:\ttestb $1,%b4\n\t"
18 "je 2f\n\t"
19 "movsb\n"
20 "2:"
21 : "=&c" (d0), "=&D" (d1), "=&S" (d2)
22 : "0" (n / 4), "q" (n), "1" ((long)to), "2" ((long)from)
23 : "memory");
24 return to;
Linus Torvalds1da177e2005-04-16 15:20:36 -070025}
26
27/* Even with __builtin_ the compiler may decide to use the out of line
28 function. */
29
30#define __HAVE_ARCH_MEMCPY 1
Andrey Ryabinina75ca542015-10-16 14:28:53 +030031extern void *memcpy(void *to, const void *from, size_t len);
Andrey Ryabinin393f2032015-02-13 14:39:56 -080032extern void *__memcpy(void *to, const void *from, size_t len);
33
Daniel Micay6974f0c2017-07-12 14:36:10 -070034#ifndef CONFIG_FORTIFY_SOURCE
Vegard Nossumf8561292008-04-04 00:53:23 +020035#ifndef CONFIG_KMEMCHECK
Andrey Ryabinina75ca542015-10-16 14:28:53 +030036#if (__GNUC__ == 4 && __GNUC_MINOR__ < 3) || __GNUC__ < 4
Joe Perches953b2f12008-03-23 01:03:34 -070037#define memcpy(dst, src, len) \
38({ \
39 size_t __len = (len); \
40 void *__ret; \
41 if (__builtin_constant_p(len) && __len >= 64) \
42 __ret = __memcpy((dst), (src), __len); \
43 else \
44 __ret = __builtin_memcpy((dst), (src), __len); \
45 __ret; \
46})
Andi Kleenaac57f82007-07-21 17:09:58 +020047#endif
Vegard Nossumf8561292008-04-04 00:53:23 +020048#else
49/*
50 * kmemcheck becomes very happy if we use the REP instructions unconditionally,
51 * because it means that we know both memory operands in advance.
52 */
53#define memcpy(dst, src, len) __inline_memcpy((dst), (src), (len))
54#endif
Daniel Micay6974f0c2017-07-12 14:36:10 -070055#endif /* !CONFIG_FORTIFY_SOURCE */
Linus Torvalds1da177e2005-04-16 15:20:36 -070056
57#define __HAVE_ARCH_MEMSET
Andi Kleen6edfba12006-03-25 16:29:49 +010058void *memset(void *s, int c, size_t n);
Andrey Ryabinin393f2032015-02-13 14:39:56 -080059void *__memset(void *s, int c, size_t n);
Linus Torvalds1da177e2005-04-16 15:20:36 -070060
Matthew Wilcox4c512482017-09-08 16:13:56 -070061#define __HAVE_ARCH_MEMSET16
62static inline void *memset16(uint16_t *s, uint16_t v, size_t n)
63{
64 long d0, d1;
65 asm volatile("rep\n\t"
66 "stosw"
67 : "=&c" (d0), "=&D" (d1)
68 : "a" (v), "1" (s), "0" (n)
69 : "memory");
70 return s;
71}
72
73#define __HAVE_ARCH_MEMSET32
74static inline void *memset32(uint32_t *s, uint32_t v, size_t n)
75{
76 long d0, d1;
77 asm volatile("rep\n\t"
78 "stosl"
79 : "=&c" (d0), "=&D" (d1)
80 : "a" (v), "1" (s), "0" (n)
81 : "memory");
82 return s;
83}
84
85#define __HAVE_ARCH_MEMSET64
86static inline void *memset64(uint64_t *s, uint64_t v, size_t n)
87{
88 long d0, d1;
89 asm volatile("rep\n\t"
90 "stosq"
91 : "=&c" (d0), "=&D" (d1)
92 : "a" (v), "1" (s), "0" (n)
93 : "memory");
94 return s;
95}
96
Linus Torvalds1da177e2005-04-16 15:20:36 -070097#define __HAVE_ARCH_MEMMOVE
Joe Perches953b2f12008-03-23 01:03:34 -070098void *memmove(void *dest, const void *src, size_t count);
Andrey Ryabinin393f2032015-02-13 14:39:56 -080099void *__memmove(void *dest, const void *src, size_t count);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100
Joe Perches953b2f12008-03-23 01:03:34 -0700101int memcmp(const void *cs, const void *ct, size_t count);
102size_t strlen(const char *s);
103char *strcpy(char *dest, const char *src);
104char *strcat(char *dest, const char *src);
105int strcmp(const char *cs, const char *ct);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106
Andrey Ryabinin393f2032015-02-13 14:39:56 -0800107#if defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__)
108
109/*
110 * For files that not instrumented (e.g. mm/slub.c) we
111 * should use not instrumented version of mem* functions.
112 */
113
114#undef memcpy
115#define memcpy(dst, src, len) __memcpy(dst, src, len)
116#define memmove(dst, src, len) __memmove(dst, src, len)
117#define memset(s, c, n) __memset(s, c, n)
Daniel Micay6974f0c2017-07-12 14:36:10 -0700118
119#ifndef __NO_FORTIFY
120#define __NO_FORTIFY /* FORTIFY_SOURCE uses __builtin_memcpy, etc. */
121#endif
122
Andrey Ryabinin393f2032015-02-13 14:39:56 -0800123#endif
124
Dan Williams6abccd12017-01-13 14:14:23 -0800125#define __HAVE_ARCH_MEMCPY_MCSAFE 1
Tony Luck9a6fb282016-09-01 11:39:33 -0700126__must_check int memcpy_mcsafe_unrolled(void *dst, const void *src, size_t cnt);
Tony Luck3637efb2016-09-01 11:39:33 -0700127DECLARE_STATIC_KEY_FALSE(mcsafe_key);
128
Tony Luck92b07292016-02-18 11:47:26 -0800129/**
130 * memcpy_mcsafe - copy memory with indication if a machine check happened
131 *
132 * @dst: destination address
133 * @src: source address
134 * @cnt: number of bytes to copy
135 *
136 * Low level memory copy function that catches machine checks
Tony Luck9a6fb282016-09-01 11:39:33 -0700137 * We only call into the "safe" function on systems that can
138 * actually do machine check recovery. Everyone else can just
139 * use memcpy().
Tony Luck92b07292016-02-18 11:47:26 -0800140 *
Tony Luckcbf8b5a2016-03-14 15:33:39 -0700141 * Return 0 for success, -EFAULT for fail
Tony Luck92b07292016-02-18 11:47:26 -0800142 */
Tony Luck9a6fb282016-09-01 11:39:33 -0700143static __always_inline __must_check int
144memcpy_mcsafe(void *dst, const void *src, size_t cnt)
145{
146#ifdef CONFIG_X86_MCE
147 if (static_branch_unlikely(&mcsafe_key))
148 return memcpy_mcsafe_unrolled(dst, src, cnt);
149 else
150#endif
151 memcpy(dst, src, cnt);
152 return 0;
153}
Tony Luck92b07292016-02-18 11:47:26 -0800154
Dan Williams0aed55a2017-05-29 12:22:50 -0700155#ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
156#define __HAVE_ARCH_MEMCPY_FLUSHCACHE 1
157void memcpy_flushcache(void *dst, const void *src, size_t cnt);
158#endif
159
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160#endif /* __KERNEL__ */
161
H. Peter Anvin1965aae2008-10-22 22:26:29 -0700162#endif /* _ASM_X86_STRING_64_H */