blob: 43ac31d63f36415124bbceb65b21a464b4bf712f [file] [log] [blame]
Kyungsik Leecffb78b2013-07-08 16:01:45 -07001/*
2 * lz4defs.h -- architecture specific defines
3 *
4 * Copyright (C) 2013, LG Electronics, Kyungsik Lee <kyungsik.lee@lge.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11/*
12 * Detects 64 bits mode
13 */
14#if (defined(__x86_64__) || defined(__x86_64) || defined(__amd64__) \
15 || defined(__ppc64__) || defined(__LP64__))
16#define LZ4_ARCH64 1
17#else
18#define LZ4_ARCH64 0
19#endif
20
21/*
22 * Architecture-specific macros
23 */
24#define BYTE u8
25#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) \
26 || defined(CONFIG_ARM) && __LINUX_ARM_ARCH__ >= 6 \
27 && defined(ARM_EFFICIENT_UNALIGNED_ACCESS)
28typedef struct _U32_S { u32 v; } U32_S;
29typedef struct _U64_S { u64 v; } U64_S;
30
31#define A32(x) (((U32_S *)(x))->v)
32#define A64(x) (((U64_S *)(x))->v)
33
34#define PUT4(s, d) (A32(d) = A32(s))
35#define PUT8(s, d) (A64(d) = A64(s))
36#else /* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */
37
38#define PUT4(s, d) \
39 put_unaligned(get_unaligned((const u32 *) s), (u32 *) d)
40#define PUT8(s, d) \
41 put_unaligned(get_unaligned((const u64 *) s), (u64 *) d)
42#endif
43
44#define COPYLENGTH 8
45#define ML_BITS 4
46#define ML_MASK ((1U << ML_BITS) - 1)
47#define RUN_BITS (8 - ML_BITS)
48#define RUN_MASK ((1U << RUN_BITS) - 1)
49
50#if LZ4_ARCH64/* 64-bit */
51#define STEPSIZE 8
52
53#define LZ4_COPYSTEP(s, d) \
54 do { \
55 PUT8(s, d); \
56 d += 8; \
57 s += 8; \
58 } while (0)
59
60#define LZ4_COPYPACKET(s, d) LZ4_COPYSTEP(s, d)
61
62#define LZ4_SECURECOPY(s, d, e) \
63 do { \
64 if (d < e) { \
65 LZ4_WILDCOPY(s, d, e); \
66 } \
67 } while (0)
68
69#else /* 32-bit */
70#define STEPSIZE 4
71
72#define LZ4_COPYSTEP(s, d) \
73 do { \
74 PUT4(s, d); \
75 d += 4; \
76 s += 4; \
77 } while (0)
78
79#define LZ4_COPYPACKET(s, d) \
80 do { \
81 LZ4_COPYSTEP(s, d); \
82 LZ4_COPYSTEP(s, d); \
83 } while (0)
84
85#define LZ4_SECURECOPY LZ4_WILDCOPY
86#endif
87
88#define LZ4_READ_LITTLEENDIAN_16(d, s, p) \
89 (d = s - get_unaligned_le16(p))
90
91#define LZ4_WILDCOPY(s, d, e) \
92 do { \
93 LZ4_COPYPACKET(s, d); \
94 } while (d < e)