blob: 546f1e3b87cce3d346f95fec83ff2630342da47c [file] [log] [blame]
H. Peter Anvin1965aae2008-10-22 22:26:29 -07001#ifndef _ASM_X86_XOR_64_H
2#define _ASM_X86_XOR_64_H
Vegard Nossum0db125c2008-06-10 23:45:45 +02003
Linus Torvalds1da177e2005-04-16 15:20:36 -07004static struct xor_block_template xor_block_sse = {
Joe Perches687c8052008-03-23 01:04:03 -07005 .name = "generic_sse",
6 .do_2 = xor_sse_2,
7 .do_3 = xor_sse_3,
8 .do_4 = xor_sse_4,
9 .do_5 = xor_sse_5,
Linus Torvalds1da177e2005-04-16 15:20:36 -070010};
11
Jim Kukunasea4d26a2012-05-22 13:54:04 +100012
13/* Also try the AVX routines */
David Howellsa1ce3922012-10-02 18:01:25 +010014#include <asm/xor_avx.h>
Jim Kukunasea4d26a2012-05-22 13:54:04 +100015
Jan Beulichf3178202012-11-02 14:20:24 +000016/* We force the use of the SSE xor block because it can write around L2.
17 We may also be able to load into the L1 only depending on how the cpu
18 deals with a load to a line that is being prefetched. */
Linus Torvalds1da177e2005-04-16 15:20:36 -070019#undef XOR_TRY_TEMPLATES
Joe Perches687c8052008-03-23 01:04:03 -070020#define XOR_TRY_TEMPLATES \
21do { \
Jim Kukunasea4d26a2012-05-22 13:54:04 +100022 AVX_XOR_SPEED; \
Jan Beulichf3178202012-11-02 14:20:24 +000023 xor_speed(&xor_block_sse_pf64); \
Joe Perches687c8052008-03-23 01:04:03 -070024 xor_speed(&xor_block_sse); \
25} while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -070026
H. Peter Anvin1965aae2008-10-22 22:26:29 -070027#endif /* _ASM_X86_XOR_64_H */