blob: 00a0b58a0871bc3c1f0afca8aa9bbd03aab09edd [file] [log] [blame]
Sven Schmidt4e1a33b2017-02-24 15:01:12 -08001#ifndef __LZ4DEFS_H__
2#define __LZ4DEFS_H__
Kyungsik Leecffb78b2013-07-08 16:01:45 -07003
4/*
Sven Schmidt4e1a33b2017-02-24 15:01:12 -08005 * lz4defs.h -- common and architecture specific defines for the kernel usage
6
7 * LZ4 - Fast LZ compression algorithm
8 * Copyright (C) 2011-2016, Yann Collet.
9 * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions are
12 * met:
13 * * Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * * Redistributions in binary form must reproduce the above
16 * copyright notice, this list of conditions and the following disclaimer
17 * in the documentation and/or other materials provided with the
18 * distribution.
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
22 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
23 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
25 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 * You can contact the author at :
31 * - LZ4 homepage : http://www.lz4.org
32 * - LZ4 source repository : https://github.com/lz4/lz4
33 *
34 * Changed for kernel usage by:
35 * Sven Schmidt <4sschmid@informatik.uni-hamburg.de>
Kyungsik Leecffb78b2013-07-08 16:01:45 -070036 */
Sven Schmidt4e1a33b2017-02-24 15:01:12 -080037
38#include <asm/unaligned.h>
39#include <linux/string.h> /* memset, memcpy */
40
41#define FORCE_INLINE __always_inline
42
43/*-************************************
44 * Basic Types
45 **************************************/
46#include <linux/types.h>
47
48typedef uint8_t BYTE;
49typedef uint16_t U16;
50typedef uint32_t U32;
51typedef int32_t S32;
52typedef uint64_t U64;
53typedef uintptr_t uptrval;
54
55/*-************************************
56 * Architecture specifics
57 **************************************/
Rui Salvaterra3e26a692016-04-09 22:05:34 +010058#if defined(CONFIG_64BIT)
Kyungsik Leecffb78b2013-07-08 16:01:45 -070059#define LZ4_ARCH64 1
60#else
61#define LZ4_ARCH64 0
62#endif
63
Sven Schmidt4e1a33b2017-02-24 15:01:12 -080064#if defined(__LITTLE_ENDIAN)
65#define LZ4_LITTLE_ENDIAN 1
66#else
67#define LZ4_LITTLE_ENDIAN 0
Kyungsik Leecffb78b2013-07-08 16:01:45 -070068#endif
69
Sven Schmidt4e1a33b2017-02-24 15:01:12 -080070/*-************************************
71 * Constants
72 **************************************/
73#define MINMATCH 4
74
75#define WILDCOPYLENGTH 8
76#define LASTLITERALS 5
77#define MFLIMIT (WILDCOPYLENGTH + MINMATCH)
78
79/* Increase this value ==> compression run slower on incompressible data */
80#define LZ4_SKIPTRIGGER 6
81
82#define HASH_UNIT sizeof(size_t)
83
84#define KB (1 << 10)
85#define MB (1 << 20)
86#define GB (1U << 30)
87
88#define MAXD_LOG 16
89#define MAX_DISTANCE ((1 << MAXD_LOG) - 1)
90#define STEPSIZE sizeof(size_t)
91
92#define ML_BITS 4
93#define ML_MASK ((1U << ML_BITS) - 1)
Kyungsik Leecffb78b2013-07-08 16:01:45 -070094#define RUN_BITS (8 - ML_BITS)
95#define RUN_MASK ((1U << RUN_BITS) - 1)
96
Sven Schmidt4e1a33b2017-02-24 15:01:12 -080097/*-************************************
98 * Reading and writing into memory
99 **************************************/
100static FORCE_INLINE U16 LZ4_read16(const void *ptr)
101{
102 return get_unaligned((const U16 *)ptr);
103}
Kyungsik Leecffb78b2013-07-08 16:01:45 -0700104
Sven Schmidt4e1a33b2017-02-24 15:01:12 -0800105static FORCE_INLINE U32 LZ4_read32(const void *ptr)
106{
107 return get_unaligned((const U32 *)ptr);
108}
Kyungsik Leecffb78b2013-07-08 16:01:45 -0700109
Sven Schmidt4e1a33b2017-02-24 15:01:12 -0800110static FORCE_INLINE size_t LZ4_read_ARCH(const void *ptr)
111{
112 return get_unaligned((const size_t *)ptr);
113}
Kyungsik Leecffb78b2013-07-08 16:01:45 -0700114
Sven Schmidt4e1a33b2017-02-24 15:01:12 -0800115static FORCE_INLINE void LZ4_write16(void *memPtr, U16 value)
116{
117 put_unaligned(value, (U16 *)memPtr);
118}
Chanho Minc72ac7a2013-07-08 16:01:49 -0700119
Sven Schmidt4e1a33b2017-02-24 15:01:12 -0800120static FORCE_INLINE void LZ4_write32(void *memPtr, U32 value)
121{
122 put_unaligned(value, (U32 *)memPtr);
123}
124
125static FORCE_INLINE U16 LZ4_readLE16(const void *memPtr)
126{
127 return get_unaligned_le16(memPtr);
128}
129
130static FORCE_INLINE void LZ4_writeLE16(void *memPtr, U16 value)
131{
132 return put_unaligned_le16(value, memPtr);
133}
134
135static FORCE_INLINE void LZ4_copy8(void *dst, const void *src)
136{
137#if LZ4_ARCH64
138 U64 a = get_unaligned((const U64 *)src);
139
140 put_unaligned(a, (U64 *)dst);
Chanho Minc72ac7a2013-07-08 16:01:49 -0700141#else
Sven Schmidt4e1a33b2017-02-24 15:01:12 -0800142 U32 a = get_unaligned((const U32 *)src);
143 U32 b = get_unaligned((const U32 *)src + 1);
144
145 put_unaligned(a, (U32 *)dst);
146 put_unaligned(b, (U32 *)dst + 1);
Chanho Minc72ac7a2013-07-08 16:01:49 -0700147#endif
Sven Schmidt4e1a33b2017-02-24 15:01:12 -0800148}
Kyungsik Leecffb78b2013-07-08 16:01:45 -0700149
Sven Schmidt4e1a33b2017-02-24 15:01:12 -0800150/*
151 * customized variant of memcpy,
152 * which can overwrite up to 7 bytes beyond dstEnd
153 */
154static FORCE_INLINE void LZ4_wildCopy(void *dstPtr,
155 const void *srcPtr, void *dstEnd)
156{
157 BYTE *d = (BYTE *)dstPtr;
158 const BYTE *s = (const BYTE *)srcPtr;
159 BYTE *const e = (BYTE *)dstEnd;
Kyungsik Leecffb78b2013-07-08 16:01:45 -0700160
Sven Schmidt4e1a33b2017-02-24 15:01:12 -0800161 do {
162 LZ4_copy8(d, s);
163 d += 8;
164 s += 8;
165 } while (d < e);
166}
Kyungsik Leecffb78b2013-07-08 16:01:45 -0700167
Sven Schmidt4e1a33b2017-02-24 15:01:12 -0800168static FORCE_INLINE unsigned int LZ4_NbCommonBytes(register size_t val)
169{
170#if LZ4_LITTLE_ENDIAN
171 return __ffs(val) >> 3;
Chanho Minc72ac7a2013-07-08 16:01:49 -0700172#else
Sven Schmidt4e1a33b2017-02-24 15:01:12 -0800173 return (BITS_PER_LONG - 1 - __fls(val)) >> 3;
174#endif
175}
176
177static FORCE_INLINE unsigned int LZ4_count(
178 const BYTE *pIn,
179 const BYTE *pMatch,
180 const BYTE *pInLimit)
181{
182 const BYTE *const pStart = pIn;
183
184 while (likely(pIn < pInLimit - (STEPSIZE - 1))) {
185 size_t const diff = LZ4_read_ARCH(pMatch) ^ LZ4_read_ARCH(pIn);
186
187 if (!diff) {
188 pIn += STEPSIZE;
189 pMatch += STEPSIZE;
190 continue;
191 }
192
193 pIn += LZ4_NbCommonBytes(diff);
194
195 return (unsigned int)(pIn - pStart);
196 }
197
198#if LZ4_ARCH64
199 if ((pIn < (pInLimit - 3))
200 && (LZ4_read32(pMatch) == LZ4_read32(pIn))) {
201 pIn += 4;
202 pMatch += 4;
203 }
Chanho Minc72ac7a2013-07-08 16:01:49 -0700204#endif
205
Sven Schmidt4e1a33b2017-02-24 15:01:12 -0800206 if ((pIn < (pInLimit - 1))
207 && (LZ4_read16(pMatch) == LZ4_read16(pIn))) {
208 pIn += 2;
209 pMatch += 2;
210 }
211
212 if ((pIn < pInLimit) && (*pMatch == *pIn))
213 pIn++;
214
215 return (unsigned int)(pIn - pStart);
216}
217
218typedef enum { noLimit = 0, limitedOutput = 1 } limitedOutput_directive;
219typedef enum { byPtr, byU32, byU16 } tableType_t;
220
221typedef enum { noDict = 0, withPrefix64k, usingExtDict } dict_directive;
222typedef enum { noDictIssue = 0, dictSmall } dictIssue_directive;
223
224typedef enum { endOnOutputSize = 0, endOnInputSize = 1 } endCondition_directive;
225typedef enum { full = 0, partial = 1 } earlyEnd_directive;
226
Kyungsik Leecffb78b2013-07-08 16:01:45 -0700227#endif