blob: ed1e5d1d6735b12529cf8896446e73a6f962a2ba [file] [log] [blame]
robert.bradford10dd6862014-11-05 06:59:34 -08001/*
2 * Fill Window with SSE2-optimized hash shifting
3 *
4 * Copyright (C) 2013 Intel Corporation
5 * Authors:
6 * Arjan van de Ven <arjan@linux.intel.com>
7 * Jim Kukunas <james.t.kukunas@linux.intel.com>
8 *
9 * For conditions of distribution and use, see copyright notice in zlib.h
10 */
11
12#include <immintrin.h>
13#include "deflate.h"
14
15#define UPDATE_HASH(s,h,i) \
16 {\
17 if (s->level < 6) { \
18 h = (3483 * (s->window[i]) +\
19 23081* (s->window[i+1]) +\
20 6954 * (s->window[i+2]) +\
21 20947* (s->window[i+3])) & s->hash_mask;\
22 } else {\
23 h = (25881* (s->window[i]) +\
24 24674* (s->window[i+1]) +\
25 25811* (s->window[i+2])) & s->hash_mask;\
26 }\
27 }\
28
Daniel Bratell2eb38892018-01-11 01:01:17 +000029extern int deflate_read_buf OF((z_streamp strm, Bytef *buf, unsigned size));
robert.bradford10dd6862014-11-05 06:59:34 -080030
31void fill_window_sse(deflate_state *s)
32{
33 const __m128i xmm_wsize = _mm_set1_epi16(s->w_size);
34
35 register unsigned n;
36 register Posf *p;
37 unsigned more; /* Amount of free space at the end of the window. */
38 uInt wsize = s->w_size;
39
40 Assert(s->lookahead < MIN_LOOKAHEAD, "already enough lookahead");
41
42 do {
43 more = (unsigned)(s->window_size -(ulg)s->lookahead -(ulg)s->strstart);
44
45 /* Deal with !@#$% 64K limit: */
46 if (sizeof(int) <= 2) {
47 if (more == 0 && s->strstart == 0 && s->lookahead == 0) {
48 more = wsize;
49
50 } else if (more == (unsigned)(-1)) {
51 /* Very unlikely, but possible on 16 bit machine if
52 * strstart == 0 && lookahead == 1 (input done a byte at time)
53 */
54 more--;
55 }
56 }
57
58 /* If the window is almost full and there is insufficient lookahead,
59 * move the upper half to the lower one to make room in the upper half.
60 */
61 if (s->strstart >= wsize+MAX_DIST(s)) {
62
63 zmemcpy(s->window, s->window+wsize, (unsigned)wsize);
64 s->match_start -= wsize;
65 s->strstart -= wsize; /* we now have strstart >= MAX_DIST */
66 s->block_start -= (long) wsize;
67
68 /* Slide the hash table (could be avoided with 32 bit values
69 at the expense of memory usage). We slide even when level == 0
70 to keep the hash table consistent if we switch back to level > 0
71 later. (Using level 0 permanently is not an optimal usage of
72 zlib, so we don't care about this pathological case.)
73 */
74 n = s->hash_size;
75 p = &s->head[n];
76 p -= 8;
77 do {
78 __m128i value, result;
79
80 value = _mm_loadu_si128((__m128i *)p);
81 result = _mm_subs_epu16(value, xmm_wsize);
82 _mm_storeu_si128((__m128i *)p, result);
83
84 p -= 8;
85 n -= 8;
86 } while (n > 0);
87
88 n = wsize;
89#ifndef FASTEST
90 p = &s->prev[n];
91 p -= 8;
92 do {
93 __m128i value, result;
94
95 value = _mm_loadu_si128((__m128i *)p);
96 result = _mm_subs_epu16(value, xmm_wsize);
97 _mm_storeu_si128((__m128i *)p, result);
98
99 p -= 8;
100 n -= 8;
101 } while (n > 0);
102#endif
103 more += wsize;
104 }
105 if (s->strm->avail_in == 0) break;
106
107 /* If there was no sliding:
108 * strstart <= WSIZE+MAX_DIST-1 && lookahead <= MIN_LOOKAHEAD - 1 &&
109 * more == window_size - lookahead - strstart
110 * => more >= window_size - (MIN_LOOKAHEAD-1 + WSIZE + MAX_DIST-1)
111 * => more >= window_size - 2*WSIZE + 2
112 * In the BIG_MEM or MMAP case (not yet supported),
113 * window_size == input_size + MIN_LOOKAHEAD &&
114 * strstart + s->lookahead <= input_size => more >= MIN_LOOKAHEAD.
115 * Otherwise, window_size == 2*WSIZE so more >= 2.
116 * If there was sliding, more >= WSIZE. So in all cases, more >= 2.
117 */
118 Assert(more >= 2, "more < 2");
119
Daniel Bratell2eb38892018-01-11 01:01:17 +0000120 n = deflate_read_buf(s->strm,
121 s->window + s->strstart + s->lookahead,
122 more);
robert.bradford10dd6862014-11-05 06:59:34 -0800123 s->lookahead += n;
124
125 /* Initialize the hash value now that we have some input: */
126 if (s->lookahead >= MIN_MATCH) {
127 uInt str = s->strstart;
128 s->ins_h = s->window[str];
129 if (str >= 1)
130 UPDATE_HASH(s, s->ins_h, str + 1 - (MIN_MATCH-1));
131#if MIN_MATCH != 3
132 Call UPDATE_HASH() MIN_MATCH-3 more times
133#endif
134 }
135 /* If the whole input has less than MIN_MATCH bytes, ins_h is garbage,
136 * but this is not important since only literal bytes will be emitted.
137 */
138
139 } while (s->lookahead < MIN_LOOKAHEAD && s->strm->avail_in != 0);
140
141 /* If the WIN_INIT bytes after the end of the current data have never been
142 * written, then zero those bytes in order to avoid memory check reports of
143 * the use of uninitialized (or uninitialised as Julian writes) bytes by
144 * the longest match routines. Update the high water mark for the next
145 * time through here. WIN_INIT is set to MAX_MATCH since the longest match
146 * routines allow scanning to strstart + MAX_MATCH, ignoring lookahead.
147 */
148 if (s->high_water < s->window_size) {
149 ulg curr = s->strstart + (ulg)(s->lookahead);
150 ulg init;
151
152 if (s->high_water < curr) {
153 /* Previous high water mark below current data -- zero WIN_INIT
154 * bytes or up to end of window, whichever is less.
155 */
156 init = s->window_size - curr;
157 if (init > WIN_INIT)
158 init = WIN_INIT;
159 zmemzero(s->window + curr, (unsigned)init);
160 s->high_water = curr + init;
161 }
162 else if (s->high_water < (ulg)curr + WIN_INIT) {
163 /* High water mark at or above current data, but below current data
164 * plus WIN_INIT -- zero out to current data plus WIN_INIT, or up
165 * to end of window, whichever is less.
166 */
167 init = (ulg)curr + WIN_INIT - s->high_water;
168 if (init > s->window_size - s->high_water)
169 init = s->window_size - s->high_water;
170 zmemzero(s->window + s->high_water, (unsigned)init);
171 s->high_water += init;
172 }
173 }
174
175 Assert((ulg)s->strstart <= s->window_size - MIN_LOOKAHEAD,
176 "not enough room for search");
177}