blob: 0307e4ec50440571399cc37137cd14423fc45a07 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
H. Peter Anvin1965aae2008-10-22 22:26:29 -07002#ifndef _ASM_X86_XOR_64_H
3#define _ASM_X86_XOR_64_H
Vegard Nossum0db125c2008-06-10 23:45:45 +02004
Linus Torvalds1da177e2005-04-16 15:20:36 -07005static struct xor_block_template xor_block_sse = {
Joe Perches687c8052008-03-23 01:04:03 -07006 .name = "generic_sse",
7 .do_2 = xor_sse_2,
8 .do_3 = xor_sse_3,
9 .do_4 = xor_sse_4,
10 .do_5 = xor_sse_5,
Linus Torvalds1da177e2005-04-16 15:20:36 -070011};
12
Jim Kukunasea4d26a2012-05-22 13:54:04 +100013
14/* Also try the AVX routines */
David Howellsa1ce3922012-10-02 18:01:25 +010015#include <asm/xor_avx.h>
Jim Kukunasea4d26a2012-05-22 13:54:04 +100016
Jan Beulichf3178202012-11-02 14:20:24 +000017/* We force the use of the SSE xor block because it can write around L2.
18 We may also be able to load into the L1 only depending on how the cpu
19 deals with a load to a line that is being prefetched. */
Linus Torvalds1da177e2005-04-16 15:20:36 -070020#undef XOR_TRY_TEMPLATES
Joe Perches687c8052008-03-23 01:04:03 -070021#define XOR_TRY_TEMPLATES \
22do { \
Jim Kukunasea4d26a2012-05-22 13:54:04 +100023 AVX_XOR_SPEED; \
Jan Beulichf3178202012-11-02 14:20:24 +000024 xor_speed(&xor_block_sse_pf64); \
Joe Perches687c8052008-03-23 01:04:03 -070025 xor_speed(&xor_block_sse); \
26} while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -070027
H. Peter Anvin1965aae2008-10-22 22:26:29 -070028#endif /* _ASM_X86_XOR_64_H */