Rizin
unix-like reverse engineering framework and cli tools
lz4.c
Go to the documentation of this file.
1 /*
2  LZ4 - Fast LZ compression algorithm
3  Copyright (C) 2011-present, Yann Collet.
4 
5  BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
6 
7  Redistribution and use in source and binary forms, with or without
8  modification, are permitted provided that the following conditions are
9  met:
10 
11  * Redistributions of source code must retain the above copyright
12  notice, this list of conditions and the following disclaimer.
13  * Redistributions in binary form must reproduce the above
14  copyright notice, this list of conditions and the following disclaimer
15  in the documentation and/or other materials provided with the
16  distribution.
17 
18  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 
30  You can contact the author at :
31  - LZ4 homepage : http://www.lz4.org
32  - LZ4 source repository : https://github.com/lz4/lz4
33 */
34 
35 /*-************************************
36 * Tuning parameters
37 **************************************/
38 /*
39  * LZ4_HEAPMODE :
40  * Select how default compression functions will allocate memory for their hash table,
41  * in memory stack (0:default, fastest), or in memory heap (1:requires malloc()).
42  */
43 #ifndef LZ4_HEAPMODE
44 # define LZ4_HEAPMODE 0
45 #endif
46 
47 /*
48  * LZ4_ACCELERATION_DEFAULT :
49  * Select "acceleration" for LZ4_compress_fast() when parameter value <= 0
50  */
51 #define LZ4_ACCELERATION_DEFAULT 1
52 /*
53  * LZ4_ACCELERATION_MAX :
54  * Any "acceleration" value higher than this threshold
55  * get treated as LZ4_ACCELERATION_MAX instead (fix #876)
56  */
57 #define LZ4_ACCELERATION_MAX 65537
58 
59 
60 /*-************************************
61 * CPU Feature Detection
62 **************************************/
63 /* LZ4_FORCE_MEMORY_ACCESS
64  * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable.
65  * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal.
66  * The below switch allow to select different access method for improved performance.
67  * Method 0 (default) : use `memcpy()`. Safe and portable.
68  * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable).
69  * This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`.
70  * Method 2 : direct access. This method is portable but violate C standard.
71  * It can generate buggy code on targets which assembly generation depends on alignment.
72  * But in some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6)
73  * See https://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details.
74  * Prefer these methods in priority order (0 > 1 > 2)
75  */
76 #ifndef LZ4_FORCE_MEMORY_ACCESS /* can be defined externally */
77 # if defined(__GNUC__) && \
78  ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) \
79  || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) )
80 # define LZ4_FORCE_MEMORY_ACCESS 2
81 # elif (defined(__INTEL_COMPILER) && !defined(_WIN32)) || defined(__GNUC__)
82 # define LZ4_FORCE_MEMORY_ACCESS 1
83 # endif
84 #endif
85 
86 /*
87  * LZ4_FORCE_SW_BITCOUNT
88  * Define this parameter if your target system or compiler does not support hardware bit count
89  */
90 #if defined(_MSC_VER) && defined(_WIN32_WCE) /* Visual Studio for WinCE doesn't support Hardware bit count */
91 # undef LZ4_FORCE_SW_BITCOUNT /* avoid double def */
92 # define LZ4_FORCE_SW_BITCOUNT
93 #endif
94 
95 
96 
97 /*-************************************
98 * Dependency
99 **************************************/
100 /*
101  * LZ4_SRC_INCLUDED:
102  * Amalgamation flag, whether lz4.c is included
103  */
104 #ifndef LZ4_SRC_INCLUDED
105 # define LZ4_SRC_INCLUDED 1
106 #endif
107 
108 #ifndef LZ4_STATIC_LINKING_ONLY
109 #define LZ4_STATIC_LINKING_ONLY
110 #endif
111 
112 #ifndef LZ4_DISABLE_DEPRECATE_WARNINGS
113 #define LZ4_DISABLE_DEPRECATE_WARNINGS /* due to LZ4_decompress_safe_withPrefix64k */
114 #endif
115 
116 #define LZ4_STATIC_LINKING_ONLY /* LZ4_DISTANCE_MAX */
117 #include "lz4.h"
118 /* see also "memory routines" below */
119 
120 
121 /*-************************************
122 * Compiler Options
123 **************************************/
124 #if defined(_MSC_VER) && (_MSC_VER >= 1400) /* Visual Studio 2005+ */
125 # include <intrin.h> /* only present in VS2005+ */
126 # pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
127 #endif /* _MSC_VER */
128 
129 #ifndef LZ4_FORCE_INLINE
130 # ifdef _MSC_VER /* Visual Studio */
131 # define LZ4_FORCE_INLINE static __forceinline
132 # else
133 # if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */
134 # ifdef __GNUC__
135 # define LZ4_FORCE_INLINE static inline __attribute__((always_inline))
136 # else
137 # define LZ4_FORCE_INLINE static inline
138 # endif
139 # else
140 # define LZ4_FORCE_INLINE static
141 # endif /* __STDC_VERSION__ */
142 # endif /* _MSC_VER */
143 #endif /* LZ4_FORCE_INLINE */
144 
145 /* LZ4_FORCE_O2 and LZ4_FORCE_INLINE
146  * gcc on ppc64le generates an unrolled SIMDized loop for LZ4_wildCopy8,
147  * together with a simple 8-byte copy loop as a fall-back path.
148  * However, this optimization hurts the decompression speed by >30%,
149  * because the execution does not go to the optimized loop
150  * for typical compressible data, and all of the preamble checks
151  * before going to the fall-back path become useless overhead.
152  * This optimization happens only with the -O3 flag, and -O2 generates
153  * a simple 8-byte copy loop.
154  * With gcc on ppc64le, all of the LZ4_decompress_* and LZ4_wildCopy8
155  * functions are annotated with __attribute__((optimize("O2"))),
156  * and also LZ4_wildCopy8 is forcibly inlined, so that the O2 attribute
157  * of LZ4_wildCopy8 does not affect the compression speed.
158  */
159 #if defined(__PPC64__) && defined(__LITTLE_ENDIAN__) && defined(__GNUC__) && !defined(__clang__)
160 # define LZ4_FORCE_O2 __attribute__((optimize("O2")))
161 # undef LZ4_FORCE_INLINE
162 # define LZ4_FORCE_INLINE static __inline __attribute__((optimize("O2"),always_inline))
163 #else
164 # define LZ4_FORCE_O2
165 #endif
166 
167 #if (defined(__GNUC__) && (__GNUC__ >= 3)) || (defined(__INTEL_COMPILER) && (__INTEL_COMPILER >= 800)) || defined(__clang__)
168 # define expect(expr,value) (__builtin_expect ((expr),(value)) )
169 #else
170 # define expect(expr,value) (expr)
171 #endif
172 
173 #ifndef likely
174 #define likely(expr) expect((expr) != 0, 1)
175 #endif
176 #ifndef unlikely
177 #define unlikely(expr) expect((expr) != 0, 0)
178 #endif
179 
180 /* Should the alignment test prove unreliable, for some reason,
181  * it can be disabled by setting LZ4_ALIGN_TEST to 0 */
182 #ifndef LZ4_ALIGN_TEST /* can be externally provided */
183 # define LZ4_ALIGN_TEST 1
184 #endif
185 
186 
187 /*-************************************
188 * Memory routines
189 **************************************/
190 #ifdef LZ4_USER_MEMORY_FUNCTIONS
191 /* memory management functions can be customized by user project.
192  * Below functions must exist somewhere in the Project
193  * and be available at link time */
194 void* LZ4_malloc(size_t s);
195 void* LZ4_calloc(size_t n, size_t s);
196 void LZ4_free(void* p);
197 # define ALLOC(s) LZ4_malloc(s)
198 # define ALLOC_AND_ZERO(s) LZ4_calloc(1,s)
199 # define FREEMEM(p) LZ4_free(p)
200 #else
201 # include <stdlib.h> /* malloc, calloc, free */
202 # define ALLOC(s) malloc(s)
203 # define ALLOC_AND_ZERO(s) calloc(1,s)
204 # define FREEMEM(p) free(p)
205 #endif
206 
207 #include <string.h> /* memset, memcpy */
208 #define MEM_INIT(p,v,s) memset((p),(v),(s))
209 
210 
211 /*-************************************
212 * Common Constants
213 **************************************/
214 #define MINMATCH 4
215 
216 #define WILDCOPYLENGTH 8
217 #define LASTLITERALS 5 /* see ../doc/lz4_Block_format.md#parsing-restrictions */
218 #define MFLIMIT 12 /* see ../doc/lz4_Block_format.md#parsing-restrictions */
219 #define MATCH_SAFEGUARD_DISTANCE ((2*WILDCOPYLENGTH) - MINMATCH) /* ensure it's possible to write 2 x wildcopyLength without overflowing output buffer */
220 #define FASTLOOP_SAFE_DISTANCE 64
221 static const int LZ4_minLength = (MFLIMIT+1);
222 
223 #define KB *(1 <<10)
224 #define MB *(1 <<20)
225 #define GB *(1U<<30)
226 
227 #define LZ4_DISTANCE_ABSOLUTE_MAX 65535
228 #if (LZ4_DISTANCE_MAX > LZ4_DISTANCE_ABSOLUTE_MAX) /* max supported by LZ4 format */
229 # error "LZ4_DISTANCE_MAX is too big : must be <= 65535"
230 #endif
231 
232 #define ML_BITS 4
233 #define ML_MASK ((1U<<ML_BITS)-1)
234 #define RUN_BITS (8-ML_BITS)
235 #define RUN_MASK ((1U<<RUN_BITS)-1)
236 
237 
238 /*-************************************
239 * Error detection
240 **************************************/
241 #if defined(LZ4_DEBUG) && (LZ4_DEBUG>=1)
242 # include <assert.h>
243 #else
244 # ifndef assert
245 # define assert(condition) ((void)0)
246 # endif
247 #endif
248 
249 #define LZ4_STATIC_ASSERT(c) { enum { LZ4_static_assert = 1/(int)(!!(c)) }; } /* use after variable declarations */
250 
251 #if defined(LZ4_DEBUG) && (LZ4_DEBUG>=2)
252 # include <stdio.h>
253  static int g_debuglog_enable = 1;
254 # define DEBUGLOG(l, ...) { \
255  if ((g_debuglog_enable) && (l<=LZ4_DEBUG)) { \
256  fprintf(stderr, __FILE__ ": "); \
257  fprintf(stderr, __VA_ARGS__); \
258  fprintf(stderr, " \n"); \
259  } }
260 #else
261 # define DEBUGLOG(l, ...) {} /* disabled */
262 #endif
263 
264 static int LZ4_isAligned(const void* ptr, size_t alignment)
265 {
266  return ((size_t)ptr & (alignment -1)) == 0;
267 }
268 
269 
270 /*-************************************
271 * Types
272 **************************************/
273 #include <limits.h>
274 #if defined(__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
275 # include <stdint.h>
276  typedef uint8_t BYTE;
277  typedef uint16_t U16;
278  typedef uint32_t U32;
279  typedef int32_t S32;
280  typedef uint64_t U64;
281  typedef uintptr_t uptrval;
282 #else
283 # if UINT_MAX != 4294967295UL
284 # error "LZ4 code (when not C++ or C99) assumes that sizeof(int) == 4"
285 # endif
286  typedef unsigned char BYTE;
287  typedef unsigned short U16;
288  typedef unsigned int U32;
289  typedef signed int S32;
290  typedef unsigned long long U64;
291  typedef size_t uptrval; /* generally true, except OpenVMS-64 */
292 #endif
293 
294 #if defined(__x86_64__)
295  typedef U64 reg_t; /* 64-bits in x32 mode */
296 #else
297  typedef size_t reg_t; /* 32-bits in x32 mode */
298 #endif
299 
300 typedef enum {
303  fillOutput = 2
305 
306 
307 /*-************************************
308 * Reading and writing into memory
309 **************************************/
310 
319 #if defined(__GNUC__) && (__GNUC__ >= 4)
320 #define LZ4_memcpy(dst, src, size) __builtin_memcpy(dst, src, size)
321 #else
322 #define LZ4_memcpy(dst, src, size) memcpy(dst, src, size)
323 #endif
324 
325 static unsigned LZ4_isLittleEndian(void)
326 {
327  const union { U32 u; BYTE c[4]; } one = { 1 }; /* don't use static : performance detrimental */
328  return one.c[0];
329 }
330 
331 
332 #if defined(LZ4_FORCE_MEMORY_ACCESS) && (LZ4_FORCE_MEMORY_ACCESS==2)
333 /* lie to the compiler about data alignment; use with caution */
334 
335 static U16 LZ4_read16(const void* memPtr) { return *(const U16*) memPtr; }
336 static U32 LZ4_read32(const void* memPtr) { return *(const U32*) memPtr; }
337 static reg_t LZ4_read_ARCH(const void* memPtr) { return *(const reg_t*) memPtr; }
338 
339 static void LZ4_write16(void* memPtr, U16 value) { *(U16*)memPtr = value; }
340 static void LZ4_write32(void* memPtr, U32 value) { *(U32*)memPtr = value; }
341 
342 #elif defined(LZ4_FORCE_MEMORY_ACCESS) && (LZ4_FORCE_MEMORY_ACCESS==1)
343 
344 /* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */
345 /* currently only defined for gcc and icc */
346 typedef union { U16 u16; U32 u32; reg_t uArch; } __attribute__((packed)) unalign;
347 
348 static U16 LZ4_read16(const void* ptr) { return ((const unalign*)ptr)->u16; }
349 static U32 LZ4_read32(const void* ptr) { return ((const unalign*)ptr)->u32; }
350 static reg_t LZ4_read_ARCH(const void* ptr) { return ((const unalign*)ptr)->uArch; }
351 
352 static void LZ4_write16(void* memPtr, U16 value) { ((unalign*)memPtr)->u16 = value; }
353 static void LZ4_write32(void* memPtr, U32 value) { ((unalign*)memPtr)->u32 = value; }
354 
355 #else /* safe and portable access using memcpy() */
356 
357 static U16 LZ4_read16(const void* memPtr)
358 {
359  U16 val; LZ4_memcpy(&val, memPtr, sizeof(val)); return val;
360 }
361 
362 static U32 LZ4_read32(const void* memPtr)
363 {
364  U32 val; LZ4_memcpy(&val, memPtr, sizeof(val)); return val;
365 }
366 
367 static reg_t LZ4_read_ARCH(const void* memPtr)
368 {
369  reg_t val; LZ4_memcpy(&val, memPtr, sizeof(val)); return val;
370 }
371 
372 static void LZ4_write16(void* memPtr, U16 value)
373 {
374  LZ4_memcpy(memPtr, &value, sizeof(value));
375 }
376 
377 static void LZ4_write32(void* memPtr, U32 value)
378 {
379  LZ4_memcpy(memPtr, &value, sizeof(value));
380 }
381 
382 #endif /* LZ4_FORCE_MEMORY_ACCESS */
383 
384 
385 static U16 LZ4_readLE16(const void* memPtr)
386 {
387  if (LZ4_isLittleEndian()) {
388  return LZ4_read16(memPtr);
389  } else {
390  const BYTE* p = (const BYTE*)memPtr;
391  return (U16)((U16)p[0] + (p[1]<<8));
392  }
393 }
394 
395 static void LZ4_writeLE16(void* memPtr, U16 value)
396 {
397  if (LZ4_isLittleEndian()) {
398  LZ4_write16(memPtr, value);
399  } else {
400  BYTE* p = (BYTE*)memPtr;
401  p[0] = (BYTE) value;
402  p[1] = (BYTE)(value>>8);
403  }
404 }
405 
406 /* customized variant of memcpy, which can overwrite up to 8 bytes beyond dstEnd */
408 void LZ4_wildCopy8(void* dstPtr, const void* srcPtr, void* dstEnd)
409 {
410  BYTE* d = (BYTE*)dstPtr;
411  const BYTE* s = (const BYTE*)srcPtr;
412  BYTE* const e = (BYTE*)dstEnd;
413 
414  do { LZ4_memcpy(d,s,8); d+=8; s+=8; } while (d<e);
415 }
416 
417 static const unsigned inc32table[8] = {0, 1, 2, 1, 0, 4, 4, 4};
418 static const int dec64table[8] = {0, 0, 0, -1, -4, 1, 2, 3};
419 
420 
421 #ifndef LZ4_FAST_DEC_LOOP
422 # if defined __i386__ || defined _M_IX86 || defined __x86_64__ || defined _M_X64
423 # define LZ4_FAST_DEC_LOOP 1
424 # elif defined(__aarch64__) && !defined(__clang__)
425  /* On aarch64, we disable this optimization for clang because on certain
426  * mobile chipsets, performance is reduced with clang. For information
427  * refer to https://github.com/lz4/lz4/pull/707 */
428 # define LZ4_FAST_DEC_LOOP 1
429 # else
430 # define LZ4_FAST_DEC_LOOP 0
431 # endif
432 #endif
433 
434 #if LZ4_FAST_DEC_LOOP
435 
436 LZ4_FORCE_INLINE void
437 LZ4_memcpy_using_offset_base(BYTE* dstPtr, const BYTE* srcPtr, BYTE* dstEnd, const size_t offset)
438 {
439  assert(srcPtr + offset == dstPtr);
440  if (offset < 8) {
441  LZ4_write32(dstPtr, 0); /* silence an msan warning when offset==0 */
442  dstPtr[0] = srcPtr[0];
443  dstPtr[1] = srcPtr[1];
444  dstPtr[2] = srcPtr[2];
445  dstPtr[3] = srcPtr[3];
446  srcPtr += inc32table[offset];
447  LZ4_memcpy(dstPtr+4, srcPtr, 4);
448  srcPtr -= dec64table[offset];
449  dstPtr += 8;
450  } else {
451  LZ4_memcpy(dstPtr, srcPtr, 8);
452  dstPtr += 8;
453  srcPtr += 8;
454  }
455 
456  LZ4_wildCopy8(dstPtr, srcPtr, dstEnd);
457 }
458 
459 /* customized variant of memcpy, which can overwrite up to 32 bytes beyond dstEnd
460  * this version copies two times 16 bytes (instead of one time 32 bytes)
461  * because it must be compatible with offsets >= 16. */
462 LZ4_FORCE_INLINE void
463 LZ4_wildCopy32(void* dstPtr, const void* srcPtr, void* dstEnd)
464 {
465  BYTE* d = (BYTE*)dstPtr;
466  const BYTE* s = (const BYTE*)srcPtr;
467  BYTE* const e = (BYTE*)dstEnd;
468 
469  do { LZ4_memcpy(d,s,16); LZ4_memcpy(d+16,s+16,16); d+=32; s+=32; } while (d<e);
470 }
471 
472 /* LZ4_memcpy_using_offset() presumes :
473  * - dstEnd >= dstPtr + MINMATCH
474  * - there is at least 8 bytes available to write after dstEnd */
475 LZ4_FORCE_INLINE void
476 LZ4_memcpy_using_offset(BYTE* dstPtr, const BYTE* srcPtr, BYTE* dstEnd, const size_t offset)
477 {
478  BYTE v[8];
479 
480  assert(dstEnd >= dstPtr + MINMATCH);
481 
482  switch(offset) {
483  case 1:
484  MEM_INIT(v, *srcPtr, 8);
485  break;
486  case 2:
487  LZ4_memcpy(v, srcPtr, 2);
488  LZ4_memcpy(&v[2], srcPtr, 2);
489  LZ4_memcpy(&v[4], v, 4);
490  break;
491  case 4:
492  LZ4_memcpy(v, srcPtr, 4);
493  LZ4_memcpy(&v[4], srcPtr, 4);
494  break;
495  default:
496  LZ4_memcpy_using_offset_base(dstPtr, srcPtr, dstEnd, offset);
497  return;
498  }
499 
500  LZ4_memcpy(dstPtr, v, 8);
501  dstPtr += 8;
502  while (dstPtr < dstEnd) {
503  LZ4_memcpy(dstPtr, v, 8);
504  dstPtr += 8;
505  }
506 }
507 #endif
508 
509 
510 /*-************************************
511 * Common functions
512 **************************************/
513 static unsigned LZ4_NbCommonBytes (reg_t val)
514 {
515  assert(val != 0);
516  if (LZ4_isLittleEndian()) {
517  if (sizeof(val) == 8) {
518 # if defined(_MSC_VER) && (_MSC_VER >= 1800) && defined(_M_AMD64) && !defined(LZ4_FORCE_SW_BITCOUNT)
519  /* x64 CPUS without BMI support interpret `TZCNT` as `REP BSF` */
520  return (unsigned)_tzcnt_u64(val) >> 3;
521 # elif defined(_MSC_VER) && defined(_WIN64) && !defined(LZ4_FORCE_SW_BITCOUNT)
522  unsigned long r = 0;
523  _BitScanForward64(&r, (U64)val);
524  return (unsigned)r >> 3;
525 # elif (defined(__clang__) || (defined(__GNUC__) && ((__GNUC__ > 3) || \
526  ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 4))))) && \
527  !defined(LZ4_FORCE_SW_BITCOUNT)
528  return (unsigned)__builtin_ctzll((U64)val) >> 3;
529 # else
530  const U64 m = 0x0101010101010101ULL;
531  val ^= val - 1;
532  return (unsigned)(((U64)((val & (m - 1)) * m)) >> 56);
533 # endif
534  } else /* 32 bits */ {
535 # if defined(_MSC_VER) && (_MSC_VER >= 1400) && !defined(LZ4_FORCE_SW_BITCOUNT)
536  unsigned long r;
537  _BitScanForward(&r, (U32)val);
538  return (unsigned)r >> 3;
539 # elif (defined(__clang__) || (defined(__GNUC__) && ((__GNUC__ > 3) || \
540  ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 4))))) && \
541  !defined(__TINYC__) && !defined(LZ4_FORCE_SW_BITCOUNT)
542  return (unsigned)__builtin_ctz((U32)val) >> 3;
543 # else
544  const U32 m = 0x01010101;
545  return (unsigned)((((val - 1) ^ val) & (m - 1)) * m) >> 24;
546 # endif
547  }
548  } else /* Big Endian CPU */ {
549  if (sizeof(val)==8) {
550 # if (defined(__clang__) || (defined(__GNUC__) && ((__GNUC__ > 3) || \
551  ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 4))))) && \
552  !defined(__TINYC__) && !defined(LZ4_FORCE_SW_BITCOUNT)
553  return (unsigned)__builtin_clzll((U64)val) >> 3;
554 # else
555 #if 1
556  /* this method is probably faster,
557  * but adds a 128 bytes lookup table */
558  static const unsigned char ctz7_tab[128] = {
559  7, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
560  4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
561  5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
562  4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
563  6, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
564  4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
565  5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
566  4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
567  };
568  U64 const mask = 0x0101010101010101ULL;
569  U64 const t = (((val >> 8) - mask) | val) & mask;
570  return ctz7_tab[(t * 0x0080402010080402ULL) >> 57];
571 #else
572  /* this method doesn't consume memory space like the previous one,
573  * but it contains several branches,
574  * that may end up slowing execution */
575  static const U32 by32 = sizeof(val)*4; /* 32 on 64 bits (goal), 16 on 32 bits.
576  Just to avoid some static analyzer complaining about shift by 32 on 32-bits target.
577  Note that this code path is never triggered in 32-bits mode. */
578  unsigned r;
579  if (!(val>>by32)) { r=4; } else { r=0; val>>=by32; }
580  if (!(val>>16)) { r+=2; val>>=8; } else { val>>=24; }
581  r += (!val);
582  return r;
583 #endif
584 # endif
585  } else /* 32 bits */ {
586 # if (defined(__clang__) || (defined(__GNUC__) && ((__GNUC__ > 3) || \
587  ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 4))))) && \
588  !defined(LZ4_FORCE_SW_BITCOUNT)
589  return (unsigned)__builtin_clz((U32)val) >> 3;
590 # else
591  val >>= 8;
592  val = ((((val + 0x00FFFF00) | 0x00FFFFFF) + val) |
593  (val + 0x00FF0000)) >> 24;
594  return (unsigned)val ^ 3;
595 # endif
596  }
597  }
598 }
599 
600 
601 #define STEPSIZE sizeof(reg_t)
603 unsigned LZ4_count(const BYTE* pIn, const BYTE* pMatch, const BYTE* pInLimit)
604 {
605  const BYTE* const pStart = pIn;
606 
607  if (likely(pIn < pInLimit-(STEPSIZE-1))) {
608  reg_t const diff = LZ4_read_ARCH(pMatch) ^ LZ4_read_ARCH(pIn);
609  if (!diff) {
610  pIn+=STEPSIZE; pMatch+=STEPSIZE;
611  } else {
612  return LZ4_NbCommonBytes(diff);
613  } }
614 
615  while (likely(pIn < pInLimit-(STEPSIZE-1))) {
616  reg_t const diff = LZ4_read_ARCH(pMatch) ^ LZ4_read_ARCH(pIn);
617  if (!diff) { pIn+=STEPSIZE; pMatch+=STEPSIZE; continue; }
618  pIn += LZ4_NbCommonBytes(diff);
619  return (unsigned)(pIn - pStart);
620  }
621 
622  if ((STEPSIZE==8) && (pIn<(pInLimit-3)) && (LZ4_read32(pMatch) == LZ4_read32(pIn))) { pIn+=4; pMatch+=4; }
623  if ((pIn<(pInLimit-1)) && (LZ4_read16(pMatch) == LZ4_read16(pIn))) { pIn+=2; pMatch+=2; }
624  if ((pIn<pInLimit) && (*pMatch == *pIn)) pIn++;
625  return (unsigned)(pIn - pStart);
626 }
627 
628 
629 #ifndef LZ4_COMMONDEFS_ONLY
630 /*-************************************
631 * Local Constants
632 **************************************/
633 static const int LZ4_64Klimit = ((64 KB) + (MFLIMIT-1));
634 static const U32 LZ4_skipTrigger = 6; /* Increase this value ==> compression run slower on incompressible data */
635 
636 
637 /*-************************************
638 * Local Structures and types
639 **************************************/
640 typedef enum { clearedTable = 0, byPtr, byU32, byU16 } tableType_t;
641 
667 
668 
669 /*-************************************
670 * Local Utils
671 **************************************/
672 int LZ4_versionNumber (void) { return LZ4_VERSION_NUMBER; }
673 const char* LZ4_versionString(void) { return LZ4_VERSION_STRING; }
675 int LZ4_sizeofState(void) { return LZ4_STREAMSIZE; }
676 
677 
678 /*-************************************
679 * Internal Definitions used in Tests
680 **************************************/
681 #if defined (__cplusplus)
682 extern "C" {
683 #endif
684 
685 int LZ4_compress_forceExtDict (LZ4_stream_t* LZ4_dict, const char* source, char* dest, int srcSize);
686 
687 int LZ4_decompress_safe_forceExtDict(const char* source, char* dest,
688  int compressedSize, int maxOutputSize,
689  const void* dictStart, size_t dictSize);
690 
691 #if defined (__cplusplus)
692 }
693 #endif
694 
695 /*-******************************
696 * Compression functions
697 ********************************/
698 LZ4_FORCE_INLINE U32 LZ4_hash4(U32 sequence, tableType_t const tableType)
699 {
700  if (tableType == byU16)
701  return ((sequence * 2654435761U) >> ((MINMATCH*8)-(LZ4_HASHLOG+1)));
702  else
703  return ((sequence * 2654435761U) >> ((MINMATCH*8)-LZ4_HASHLOG));
704 }
705 
706 LZ4_FORCE_INLINE U32 LZ4_hash5(U64 sequence, tableType_t const tableType)
707 {
708  const U32 hashLog = (tableType == byU16) ? LZ4_HASHLOG+1 : LZ4_HASHLOG;
709  if (LZ4_isLittleEndian()) {
710  const U64 prime5bytes = 889523592379ULL;
711  return (U32)(((sequence << 24) * prime5bytes) >> (64 - hashLog));
712  } else {
713  const U64 prime8bytes = 11400714785074694791ULL;
714  return (U32)(((sequence >> 24) * prime8bytes) >> (64 - hashLog));
715  }
716 }
717 
718 LZ4_FORCE_INLINE U32 LZ4_hashPosition(const void* const p, tableType_t const tableType)
719 {
720  if ((sizeof(reg_t)==8) && (tableType != byU16)) return LZ4_hash5(LZ4_read_ARCH(p), tableType);
721  return LZ4_hash4(LZ4_read32(p), tableType);
722 }
723 
724 LZ4_FORCE_INLINE void LZ4_clearHash(U32 h, void* tableBase, tableType_t const tableType)
725 {
726  switch (tableType)
727  {
728  default: /* fallthrough */
729  case clearedTable: { /* illegal! */ assert(0); return; }
730  case byPtr: { const BYTE** hashTable = (const BYTE**)tableBase; hashTable[h] = NULL; return; }
731  case byU32: { U32* hashTable = (U32*) tableBase; hashTable[h] = 0; return; }
732  case byU16: { U16* hashTable = (U16*) tableBase; hashTable[h] = 0; return; }
733  }
734 }
735 
736 LZ4_FORCE_INLINE void LZ4_putIndexOnHash(U32 idx, U32 h, void* tableBase, tableType_t const tableType)
737 {
738  switch (tableType)
739  {
740  default: /* fallthrough */
741  case clearedTable: /* fallthrough */
742  case byPtr: { /* illegal! */ assert(0); return; }
743  case byU32: { U32* hashTable = (U32*) tableBase; hashTable[h] = idx; return; }
744  case byU16: { U16* hashTable = (U16*) tableBase; assert(idx < 65536); hashTable[h] = (U16)idx; return; }
745  }
746 }
747 
749  void* tableBase, tableType_t const tableType,
750  const BYTE* srcBase)
751 {
752  switch (tableType)
753  {
754  case clearedTable: { /* illegal! */ assert(0); return; }
755  case byPtr: { const BYTE** hashTable = (const BYTE**)tableBase; hashTable[h] = p; return; }
756  case byU32: { U32* hashTable = (U32*) tableBase; hashTable[h] = (U32)(p-srcBase); return; }
757  case byU16: { U16* hashTable = (U16*) tableBase; hashTable[h] = (U16)(p-srcBase); return; }
758  }
759 }
760 
761 LZ4_FORCE_INLINE void LZ4_putPosition(const BYTE* p, void* tableBase, tableType_t tableType, const BYTE* srcBase)
762 {
763  U32 const h = LZ4_hashPosition(p, tableType);
764  LZ4_putPositionOnHash(p, h, tableBase, tableType, srcBase);
765 }
766 
767 /* LZ4_getIndexOnHash() :
768  * Index of match position registered in hash table.
769  * hash position must be calculated by using base+index, or dictBase+index.
770  * Assumption 1 : only valid if tableType == byU32 or byU16.
771  * Assumption 2 : h is presumed valid (within limits of hash table)
772  */
773 LZ4_FORCE_INLINE U32 LZ4_getIndexOnHash(U32 h, const void* tableBase, tableType_t tableType)
774 {
776  if (tableType == byU32) {
777  const U32* const hashTable = (const U32*) tableBase;
778  assert(h < (1U << (LZ4_MEMORY_USAGE-2)));
779  return hashTable[h];
780  }
781  if (tableType == byU16) {
782  const U16* const hashTable = (const U16*) tableBase;
783  assert(h < (1U << (LZ4_MEMORY_USAGE-1)));
784  return hashTable[h];
785  }
786  assert(0); return 0; /* forbidden case */
787 }
788 
789 static const BYTE* LZ4_getPositionOnHash(U32 h, const void* tableBase, tableType_t tableType, const BYTE* srcBase)
790 {
791  if (tableType == byPtr) { const BYTE* const* hashTable = (const BYTE* const*) tableBase; return hashTable[h]; }
792  if (tableType == byU32) { const U32* const hashTable = (const U32*) tableBase; return hashTable[h] + srcBase; }
793  { const U16* const hashTable = (const U16*) tableBase; return hashTable[h] + srcBase; } /* default, to ensure a return */
794 }
795 
796 LZ4_FORCE_INLINE const BYTE*
798  const void* tableBase, tableType_t tableType,
799  const BYTE* srcBase)
800 {
801  U32 const h = LZ4_hashPosition(p, tableType);
802  return LZ4_getPositionOnHash(h, tableBase, tableType, srcBase);
803 }
804 
805 LZ4_FORCE_INLINE void
807  const int inputSize,
808  const tableType_t tableType) {
809  /* If the table hasn't been used, it's guaranteed to be zeroed out, and is
810  * therefore safe to use no matter what mode we're in. Otherwise, we figure
811  * out if it's safe to leave as is or whether it needs to be reset.
812  */
813  if ((tableType_t)cctx->tableType != clearedTable) {
814  assert(inputSize >= 0);
815  if ((tableType_t)cctx->tableType != tableType
816  || ((tableType == byU16) && cctx->currentOffset + (unsigned)inputSize >= 0xFFFFU)
817  || ((tableType == byU32) && cctx->currentOffset > 1 GB)
818  || tableType == byPtr
819  || inputSize >= 4 KB)
820  {
821  DEBUGLOG(4, "LZ4_prepareTable: Resetting table in %p", cctx);
823  cctx->currentOffset = 0;
824  cctx->tableType = (U32)clearedTable;
825  } else {
826  DEBUGLOG(4, "LZ4_prepareTable: Re-use hash table (no reset)");
827  }
828  }
829 
830  /* Adding a gap, so all previous entries are > LZ4_DISTANCE_MAX back, is faster
831  * than compressing without a gap. However, compressing with
832  * currentOffset == 0 is faster still, so we preserve that case.
833  */
834  if (cctx->currentOffset != 0 && tableType == byU32) {
835  DEBUGLOG(5, "LZ4_prepareTable: adding 64KB to currentOffset");
836  cctx->currentOffset += 64 KB;
837  }
838 
839  /* Finally, clear history */
840  cctx->dictCtx = NULL;
841  cctx->dictionary = NULL;
842  cctx->dictSize = 0;
843 }
844 
852  LZ4_stream_t_internal* const cctx,
853  const char* const source,
854  char* const dest,
855  const int inputSize,
856  int *inputConsumed, /* only written when outputDirective == fillOutput */
857  const int maxOutputSize,
858  const limitedOutput_directive outputDirective,
859  const tableType_t tableType,
860  const dict_directive dictDirective,
861  const dictIssue_directive dictIssue,
862  const int acceleration)
863 {
864  int result;
865  const BYTE* ip = (const BYTE*) source;
866 
867  U32 const startIndex = cctx->currentOffset;
868  const BYTE* base = (const BYTE*) source - startIndex;
869  const BYTE* lowLimit;
870 
871  const LZ4_stream_t_internal* dictCtx = (const LZ4_stream_t_internal*) cctx->dictCtx;
872  const BYTE* const dictionary =
873  dictDirective == usingDictCtx ? dictCtx->dictionary : cctx->dictionary;
874  const U32 dictSize =
875  dictDirective == usingDictCtx ? dictCtx->dictSize : cctx->dictSize;
876  const U32 dictDelta = (dictDirective == usingDictCtx) ? startIndex - dictCtx->currentOffset : 0; /* make indexes in dictCtx comparable with index in current context */
877 
878  int const maybe_extMem = (dictDirective == usingExtDict) || (dictDirective == usingDictCtx);
879  U32 const prefixIdxLimit = startIndex - dictSize; /* used when dictDirective == dictSmall */
880  const BYTE* const dictEnd = dictionary ? dictionary + dictSize : dictionary;
881  const BYTE* anchor = (const BYTE*) source;
882  const BYTE* const iend = ip + inputSize;
883  const BYTE* const mflimitPlusOne = iend - MFLIMIT + 1;
884  const BYTE* const matchlimit = iend - LASTLITERALS;
885 
886  /* the dictCtx currentOffset is indexed on the start of the dictionary,
887  * while a dictionary in the current context precedes the currentOffset */
888  const BYTE* dictBase = !dictionary ? NULL : (dictDirective == usingDictCtx) ?
889  dictionary + dictSize - dictCtx->currentOffset :
890  dictionary + dictSize - startIndex;
891 
892  BYTE* op = (BYTE*) dest;
893  BYTE* const olimit = op + maxOutputSize;
894 
895  U32 offset = 0;
896  U32 forwardH;
897 
898  DEBUGLOG(5, "LZ4_compress_generic_validated: srcSize=%i, tableType=%u", inputSize, tableType);
899  assert(ip != NULL);
900  /* If init conditions are not met, we don't have to mark stream
901  * as having dirty context, since no action was taken yet */
902  if (outputDirective == fillOutput && maxOutputSize < 1) { return 0; } /* Impossible to store anything */
903  if ((tableType == byU16) && (inputSize>=LZ4_64Klimit)) { return 0; } /* Size too large (not within 64K limit) */
904  if (tableType==byPtr) assert(dictDirective==noDict); /* only supported use case with byPtr */
905  assert(acceleration >= 1);
906 
907  lowLimit = (const BYTE*)source - (dictDirective == withPrefix64k ? dictSize : 0);
908 
909  /* Update context state */
910  if (dictDirective == usingDictCtx) {
911  /* Subsequent linked blocks can't use the dictionary. */
912  /* Instead, they use the block we just compressed. */
913  cctx->dictCtx = NULL;
914  cctx->dictSize = (U32)inputSize;
915  } else {
916  cctx->dictSize += (U32)inputSize;
917  }
918  cctx->currentOffset += (U32)inputSize;
919  cctx->tableType = (U32)tableType;
920 
921  if (inputSize<LZ4_minLength) goto _last_literals; /* Input too small, no compression (all literals) */
922 
923  /* First Byte */
924  LZ4_putPosition(ip, cctx->hashTable, tableType, base);
925  ip++; forwardH = LZ4_hashPosition(ip, tableType);
926 
927  /* Main Loop */
928  for ( ; ; ) {
929  const BYTE* match;
930  BYTE* token;
931  const BYTE* filledIp;
932 
933  /* Find a match */
934  if (tableType == byPtr) {
935  const BYTE* forwardIp = ip;
936  int step = 1;
937  int searchMatchNb = acceleration << LZ4_skipTrigger;
938  do {
939  U32 const h = forwardH;
940  ip = forwardIp;
941  forwardIp += step;
942  step = (searchMatchNb++ >> LZ4_skipTrigger);
943 
944  if (unlikely(forwardIp > mflimitPlusOne)) goto _last_literals;
945  assert(ip < mflimitPlusOne);
946 
947  match = LZ4_getPositionOnHash(h, cctx->hashTable, tableType, base);
948  forwardH = LZ4_hashPosition(forwardIp, tableType);
949  LZ4_putPositionOnHash(ip, h, cctx->hashTable, tableType, base);
950 
951  } while ( (match+LZ4_DISTANCE_MAX < ip)
952  || (LZ4_read32(match) != LZ4_read32(ip)) );
953 
954  } else { /* byU32, byU16 */
955 
956  const BYTE* forwardIp = ip;
957  int step = 1;
958  int searchMatchNb = acceleration << LZ4_skipTrigger;
959  do {
960  U32 const h = forwardH;
961  U32 const current = (U32)(forwardIp - base);
962  U32 matchIndex = LZ4_getIndexOnHash(h, cctx->hashTable, tableType);
963  assert(matchIndex <= current);
964  assert(forwardIp - base < (ptrdiff_t)(2 GB - 1));
965  ip = forwardIp;
966  forwardIp += step;
967  step = (searchMatchNb++ >> LZ4_skipTrigger);
968 
969  if (unlikely(forwardIp > mflimitPlusOne)) goto _last_literals;
970  assert(ip < mflimitPlusOne);
971 
972  if (dictDirective == usingDictCtx) {
973  if (matchIndex < startIndex) {
974  /* there was no match, try the dictionary */
975  assert(tableType == byU32);
976  matchIndex = LZ4_getIndexOnHash(h, dictCtx->hashTable, byU32);
977  match = dictBase + matchIndex;
978  matchIndex += dictDelta; /* make dictCtx index comparable with current context */
979  lowLimit = dictionary;
980  } else {
981  match = base + matchIndex;
982  lowLimit = (const BYTE*)source;
983  }
984  } else if (dictDirective==usingExtDict) {
985  if (matchIndex < startIndex) {
986  DEBUGLOG(7, "extDict candidate: matchIndex=%5u < startIndex=%5u", matchIndex, startIndex);
987  assert(startIndex - matchIndex >= MINMATCH);
988  match = dictBase + matchIndex;
989  lowLimit = dictionary;
990  } else {
991  match = base + matchIndex;
992  lowLimit = (const BYTE*)source;
993  }
994  } else { /* single continuous memory segment */
995  match = base + matchIndex;
996  }
997  forwardH = LZ4_hashPosition(forwardIp, tableType);
998  LZ4_putIndexOnHash(current, h, cctx->hashTable, tableType);
999 
1000  DEBUGLOG(7, "candidate at pos=%u (offset=%u \n", matchIndex, current - matchIndex);
1001  if ((dictIssue == dictSmall) && (matchIndex < prefixIdxLimit)) { continue; } /* match outside of valid area */
1002  assert(matchIndex < current);
1003  if ( ((tableType != byU16) || (LZ4_DISTANCE_MAX < LZ4_DISTANCE_ABSOLUTE_MAX))
1004  && (matchIndex+LZ4_DISTANCE_MAX < current)) {
1005  continue;
1006  } /* too far */
1007  assert((current - matchIndex) <= LZ4_DISTANCE_MAX); /* match now expected within distance */
1008 
1009  if (LZ4_read32(match) == LZ4_read32(ip)) {
1010  if (maybe_extMem) offset = current - matchIndex;
1011  break; /* match found */
1012  }
1013 
1014  } while(1);
1015  }
1016 
1017  /* Catch up */
1018  filledIp = ip;
1019  while (((ip>anchor) & (match > lowLimit)) && (unlikely(ip[-1]==match[-1]))) { ip--; match--; }
1020 
1021  /* Encode Literals */
1022  { unsigned const litLength = (unsigned)(ip - anchor);
1023  token = op++;
1024  if ((outputDirective == limitedOutput) && /* Check output buffer overflow */
1025  (unlikely(op + litLength + (2 + 1 + LASTLITERALS) + (litLength/255) > olimit)) ) {
1026  return 0; /* cannot compress within `dst` budget. Stored indexes in hash table are nonetheless fine */
1027  }
1028  if ((outputDirective == fillOutput) &&
1029  (unlikely(op + (litLength+240)/255 /* litlen */ + litLength /* literals */ + 2 /* offset */ + 1 /* token */ + MFLIMIT - MINMATCH /* min last literals so last match is <= end - MFLIMIT */ > olimit))) {
1030  op--;
1031  goto _last_literals;
1032  }
1033  if (litLength >= RUN_MASK) {
1034  int len = (int)(litLength - RUN_MASK);
1035  *token = (RUN_MASK<<ML_BITS);
1036  for(; len >= 255 ; len-=255) *op++ = 255;
1037  *op++ = (BYTE)len;
1038  }
1039  else *token = (BYTE)(litLength<<ML_BITS);
1040 
1041  /* Copy Literals */
1042  LZ4_wildCopy8(op, anchor, op+litLength);
1043  op+=litLength;
1044  DEBUGLOG(6, "seq.start:%i, literals=%u, match.start:%i",
1045  (int)(anchor-(const BYTE*)source), litLength, (int)(ip-(const BYTE*)source));
1046  }
1047 
1048 _next_match:
1049  /* at this stage, the following variables must be correctly set :
1050  * - ip : at start of LZ operation
1051  * - match : at start of previous pattern occurence; can be within current prefix, or within extDict
1052  * - offset : if maybe_ext_memSegment==1 (constant)
1053  * - lowLimit : must be == dictionary to mean "match is within extDict"; must be == source otherwise
1054  * - token and *token : position to write 4-bits for match length; higher 4-bits for literal length supposed already written
1055  */
1056 
1057  if ((outputDirective == fillOutput) &&
1058  (op + 2 /* offset */ + 1 /* token */ + MFLIMIT - MINMATCH /* min last literals so last match is <= end - MFLIMIT */ > olimit)) {
1059  /* the match was too close to the end, rewind and go to last literals */
1060  op = token;
1061  goto _last_literals;
1062  }
1063 
1064  /* Encode Offset */
1065  if (maybe_extMem) { /* static test */
1066  DEBUGLOG(6, " with offset=%u (ext if > %i)", offset, (int)(ip - (const BYTE*)source));
1068  LZ4_writeLE16(op, (U16)offset); op+=2;
1069  } else {
1070  DEBUGLOG(6, " with offset=%u (same segment)", (U32)(ip - match));
1071  assert(ip-match <= LZ4_DISTANCE_MAX);
1072  LZ4_writeLE16(op, (U16)(ip - match)); op+=2;
1073  }
1074 
1075  /* Encode MatchLength */
1076  { unsigned matchCode;
1077 
1078  if ( (dictDirective==usingExtDict || dictDirective==usingDictCtx)
1079  && (lowLimit==dictionary) /* match within extDict */ ) {
1080  const BYTE* limit = ip + (dictEnd-match);
1081  assert(dictEnd > match);
1082  if (limit > matchlimit) limit = matchlimit;
1083  matchCode = LZ4_count(ip+MINMATCH, match+MINMATCH, limit);
1084  ip += (size_t)matchCode + MINMATCH;
1085  if (ip==limit) {
1086  unsigned const more = LZ4_count(limit, (const BYTE*)source, matchlimit);
1087  matchCode += more;
1088  ip += more;
1089  }
1090  DEBUGLOG(6, " with matchLength=%u starting in extDict", matchCode+MINMATCH);
1091  } else {
1092  matchCode = LZ4_count(ip+MINMATCH, match+MINMATCH, matchlimit);
1093  ip += (size_t)matchCode + MINMATCH;
1094  DEBUGLOG(6, " with matchLength=%u", matchCode+MINMATCH);
1095  }
1096 
1097  if ((outputDirective) && /* Check output buffer overflow */
1098  (unlikely(op + (1 + LASTLITERALS) + (matchCode+240)/255 > olimit)) ) {
1099  if (outputDirective == fillOutput) {
1100  /* Match description too long : reduce it */
1101  U32 newMatchCode = 15 /* in token */ - 1 /* to avoid needing a zero byte */ + ((U32)(olimit - op) - 1 - LASTLITERALS) * 255;
1102  ip -= matchCode - newMatchCode;
1103  assert(newMatchCode < matchCode);
1104  matchCode = newMatchCode;
1105  if (unlikely(ip <= filledIp)) {
1106  /* We have already filled up to filledIp so if ip ends up less than filledIp
1107  * we have positions in the hash table beyond the current position. This is
1108  * a problem if we reuse the hash table. So we have to remove these positions
1109  * from the hash table.
1110  */
1111  const BYTE* ptr;
1112  DEBUGLOG(5, "Clearing %u positions", (U32)(filledIp - ip));
1113  for (ptr = ip; ptr <= filledIp; ++ptr) {
1114  U32 const h = LZ4_hashPosition(ptr, tableType);
1115  LZ4_clearHash(h, cctx->hashTable, tableType);
1116  }
1117  }
1118  } else {
1119  assert(outputDirective == limitedOutput);
1120  return 0; /* cannot compress within `dst` budget. Stored indexes in hash table are nonetheless fine */
1121  }
1122  }
1123  if (matchCode >= ML_MASK) {
1124  *token += ML_MASK;
1125  matchCode -= ML_MASK;
1126  LZ4_write32(op, 0xFFFFFFFF);
1127  while (matchCode >= 4*255) {
1128  op+=4;
1129  LZ4_write32(op, 0xFFFFFFFF);
1130  matchCode -= 4*255;
1131  }
1132  op += matchCode / 255;
1133  *op++ = (BYTE)(matchCode % 255);
1134  } else
1135  *token += (BYTE)(matchCode);
1136  }
1137  /* Ensure we have enough space for the last literals. */
1138  assert(!(outputDirective == fillOutput && op + 1 + LASTLITERALS > olimit));
1139 
1140  anchor = ip;
1141 
1142  /* Test end of chunk */
1143  if (ip >= mflimitPlusOne) break;
1144 
1145  /* Fill table */
1146  LZ4_putPosition(ip-2, cctx->hashTable, tableType, base);
1147 
1148  /* Test next position */
1149  if (tableType == byPtr) {
1150 
1151  match = LZ4_getPosition(ip, cctx->hashTable, tableType, base);
1152  LZ4_putPosition(ip, cctx->hashTable, tableType, base);
1153  if ( (match+LZ4_DISTANCE_MAX >= ip)
1154  && (LZ4_read32(match) == LZ4_read32(ip)) )
1155  { token=op++; *token=0; goto _next_match; }
1156 
1157  } else { /* byU32, byU16 */
1158 
1159  U32 const h = LZ4_hashPosition(ip, tableType);
1160  U32 const current = (U32)(ip-base);
1161  U32 matchIndex = LZ4_getIndexOnHash(h, cctx->hashTable, tableType);
1162  assert(matchIndex < current);
1163  if (dictDirective == usingDictCtx) {
1164  if (matchIndex < startIndex) {
1165  /* there was no match, try the dictionary */
1166  matchIndex = LZ4_getIndexOnHash(h, dictCtx->hashTable, byU32);
1167  match = dictBase + matchIndex;
1168  lowLimit = dictionary; /* required for match length counter */
1169  matchIndex += dictDelta;
1170  } else {
1171  match = base + matchIndex;
1172  lowLimit = (const BYTE*)source; /* required for match length counter */
1173  }
1174  } else if (dictDirective==usingExtDict) {
1175  if (matchIndex < startIndex) {
1176  match = dictBase + matchIndex;
1177  lowLimit = dictionary; /* required for match length counter */
1178  } else {
1179  match = base + matchIndex;
1180  lowLimit = (const BYTE*)source; /* required for match length counter */
1181  }
1182  } else { /* single memory segment */
1183  match = base + matchIndex;
1184  }
1185  LZ4_putIndexOnHash(current, h, cctx->hashTable, tableType);
1186  assert(matchIndex < current);
1187  if ( ((dictIssue==dictSmall) ? (matchIndex >= prefixIdxLimit) : 1)
1188  && (((tableType==byU16) && (LZ4_DISTANCE_MAX == LZ4_DISTANCE_ABSOLUTE_MAX)) ? 1 : (matchIndex+LZ4_DISTANCE_MAX >= current))
1189  && (LZ4_read32(match) == LZ4_read32(ip)) ) {
1190  token=op++;
1191  *token=0;
1192  if (maybe_extMem) offset = current - matchIndex;
1193  DEBUGLOG(6, "seq.start:%i, literals=%u, match.start:%i",
1194  (int)(anchor-(const BYTE*)source), 0, (int)(ip-(const BYTE*)source));
1195  goto _next_match;
1196  }
1197  }
1198 
1199  /* Prepare next loop */
1200  forwardH = LZ4_hashPosition(++ip, tableType);
1201 
1202  }
1203 
1204 _last_literals:
1205  /* Encode Last Literals */
1206  { size_t lastRun = (size_t)(iend - anchor);
1207  if ( (outputDirective) && /* Check output buffer overflow */
1208  (op + lastRun + 1 + ((lastRun+255-RUN_MASK)/255) > olimit)) {
1209  if (outputDirective == fillOutput) {
1210  /* adapt lastRun to fill 'dst' */
1211  assert(olimit >= op);
1212  lastRun = (size_t)(olimit-op) - 1/*token*/;
1213  lastRun -= (lastRun + 256 - RUN_MASK) / 256; /*additional length tokens*/
1214  } else {
1215  assert(outputDirective == limitedOutput);
1216  return 0; /* cannot compress within `dst` budget. Stored indexes in hash table are nonetheless fine */
1217  }
1218  }
1219  DEBUGLOG(6, "Final literal run : %i literals", (int)lastRun);
1220  if (lastRun >= RUN_MASK) {
1221  size_t accumulator = lastRun - RUN_MASK;
1222  *op++ = RUN_MASK << ML_BITS;
1223  for(; accumulator >= 255 ; accumulator-=255) *op++ = 255;
1224  *op++ = (BYTE) accumulator;
1225  } else {
1226  *op++ = (BYTE)(lastRun<<ML_BITS);
1227  }
1228  LZ4_memcpy(op, anchor, lastRun);
1229  ip = anchor + lastRun;
1230  op += lastRun;
1231  }
1232 
1233  if (outputDirective == fillOutput) {
1234  *inputConsumed = (int) (((const char*)ip)-source);
1235  }
1236  result = (int)(((char*)op) - dest);
1237  assert(result > 0);
1238  DEBUGLOG(5, "LZ4_compress_generic: compressed %i bytes into %i bytes", inputSize, result);
1239  return result;
1240 }
1241 
1247  LZ4_stream_t_internal* const cctx,
1248  const char* const src,
1249  char* const dst,
1250  const int srcSize,
1251  int *inputConsumed, /* only written when outputDirective == fillOutput */
1252  const int dstCapacity,
1253  const limitedOutput_directive outputDirective,
1254  const tableType_t tableType,
1255  const dict_directive dictDirective,
1256  const dictIssue_directive dictIssue,
1257  const int acceleration)
1258 {
1259  DEBUGLOG(5, "LZ4_compress_generic: srcSize=%i, dstCapacity=%i",
1260  srcSize, dstCapacity);
1261 
1262  if ((U32)srcSize > (U32)LZ4_MAX_INPUT_SIZE) { return 0; } /* Unsupported srcSize, too large (or negative) */
1263  if (srcSize == 0) { /* src == NULL supported if srcSize == 0 */
1264  if (outputDirective != notLimited && dstCapacity <= 0) return 0; /* no output, can't write anything */
1265  DEBUGLOG(5, "Generating an empty block");
1266  assert(outputDirective == notLimited || dstCapacity >= 1);
1267  assert(dst != NULL);
1268  dst[0] = 0;
1269  if (outputDirective == fillOutput) {
1270  assert (inputConsumed != NULL);
1271  *inputConsumed = 0;
1272  }
1273  return 1;
1274  }
1275  assert(src != NULL);
1276 
1278  inputConsumed, /* only written into if outputDirective == fillOutput */
1279  dstCapacity, outputDirective,
1280  tableType, dictDirective, dictIssue, acceleration);
1281 }
1282 
1283 
1284 int LZ4_compress_fast_extState(void* state, const char* source, char* dest, int inputSize, int maxOutputSize, int acceleration)
1285 {
1286  LZ4_stream_t_internal* const ctx = & LZ4_initStream(state, sizeof(LZ4_stream_t)) -> internal_donotuse;
1287  assert(ctx != NULL);
1288  if (acceleration < 1) acceleration = LZ4_ACCELERATION_DEFAULT;
1289  if (acceleration > LZ4_ACCELERATION_MAX) acceleration = LZ4_ACCELERATION_MAX;
1291  if (inputSize < LZ4_64Klimit) {
1293  } else {
1294  const tableType_t tableType = ((sizeof(void*)==4) && ((uptrval)source > LZ4_DISTANCE_MAX)) ? byPtr : byU32;
1295  return LZ4_compress_generic(ctx, source, dest, inputSize, NULL, 0, notLimited, tableType, noDict, noDictIssue, acceleration);
1296  }
1297  } else {
1298  if (inputSize < LZ4_64Klimit) {
1300  } else {
1301  const tableType_t tableType = ((sizeof(void*)==4) && ((uptrval)source > LZ4_DISTANCE_MAX)) ? byPtr : byU32;
1303  }
1304  }
1305 }
1306 
1316 int LZ4_compress_fast_extState_fastReset(void* state, const char* src, char* dst, int srcSize, int dstCapacity, int acceleration)
1317 {
1318  LZ4_stream_t_internal* ctx = &((LZ4_stream_t*)state)->internal_donotuse;
1319  if (acceleration < 1) acceleration = LZ4_ACCELERATION_DEFAULT;
1320  if (acceleration > LZ4_ACCELERATION_MAX) acceleration = LZ4_ACCELERATION_MAX;
1321 
1322  if (dstCapacity >= LZ4_compressBound(srcSize)) {
1323  if (srcSize < LZ4_64Klimit) {
1324  const tableType_t tableType = byU16;
1325  LZ4_prepareTable(ctx, srcSize, tableType);
1326  if (ctx->currentOffset) {
1327  return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, 0, notLimited, tableType, noDict, dictSmall, acceleration);
1328  } else {
1329  return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, 0, notLimited, tableType, noDict, noDictIssue, acceleration);
1330  }
1331  } else {
1332  const tableType_t tableType = ((sizeof(void*)==4) && ((uptrval)src > LZ4_DISTANCE_MAX)) ? byPtr : byU32;
1333  LZ4_prepareTable(ctx, srcSize, tableType);
1334  return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, 0, notLimited, tableType, noDict, noDictIssue, acceleration);
1335  }
1336  } else {
1337  if (srcSize < LZ4_64Klimit) {
1338  const tableType_t tableType = byU16;
1339  LZ4_prepareTable(ctx, srcSize, tableType);
1340  if (ctx->currentOffset) {
1341  return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, dstCapacity, limitedOutput, tableType, noDict, dictSmall, acceleration);
1342  } else {
1343  return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, dstCapacity, limitedOutput, tableType, noDict, noDictIssue, acceleration);
1344  }
1345  } else {
1346  const tableType_t tableType = ((sizeof(void*)==4) && ((uptrval)src > LZ4_DISTANCE_MAX)) ? byPtr : byU32;
1347  LZ4_prepareTable(ctx, srcSize, tableType);
1348  return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, dstCapacity, limitedOutput, tableType, noDict, noDictIssue, acceleration);
1349  }
1350  }
1351 }
1352 
1353 
1354 int LZ4_compress_fast(const char* source, char* dest, int inputSize, int maxOutputSize, int acceleration)
1355 {
1356  int result;
1357 #if (LZ4_HEAPMODE)
1358  LZ4_stream_t* ctxPtr = ALLOC(sizeof(LZ4_stream_t)); /* malloc-calloc always properly aligned */
1359  if (ctxPtr == NULL) return 0;
1360 #else
1361  LZ4_stream_t ctx;
1362  LZ4_stream_t* const ctxPtr = &ctx;
1363 #endif
1364  result = LZ4_compress_fast_extState(ctxPtr, source, dest, inputSize, maxOutputSize, acceleration);
1365 
1366 #if (LZ4_HEAPMODE)
1367  FREEMEM(ctxPtr);
1368 #endif
1369  return result;
1370 }
1371 
1372 
1373 int LZ4_compress_default(const char* src, char* dst, int srcSize, int maxOutputSize)
1374 {
1376 }
1377 
1378 
1379 /* Note!: This function leaves the stream in an unclean/broken state!
1380  * It is not safe to subsequently use the same state with a _fastReset() or
1381  * _continue() call without resetting it. */
1382 static int LZ4_compress_destSize_extState (LZ4_stream_t* state, const char* src, char* dst, int* srcSizePtr, int targetDstSize)
1383 {
1384  void* const s = LZ4_initStream(state, sizeof (*state));
1385  assert(s != NULL); (void)s;
1386 
1387  if (targetDstSize >= LZ4_compressBound(*srcSizePtr)) { /* compression success is guaranteed */
1388  return LZ4_compress_fast_extState(state, src, dst, *srcSizePtr, targetDstSize, 1);
1389  } else {
1390  if (*srcSizePtr < LZ4_64Klimit) {
1391  return LZ4_compress_generic(&state->internal_donotuse, src, dst, *srcSizePtr, srcSizePtr, targetDstSize, fillOutput, byU16, noDict, noDictIssue, 1);
1392  } else {
1393  tableType_t const addrMode = ((sizeof(void*)==4) && ((uptrval)src > LZ4_DISTANCE_MAX)) ? byPtr : byU32;
1394  return LZ4_compress_generic(&state->internal_donotuse, src, dst, *srcSizePtr, srcSizePtr, targetDstSize, fillOutput, addrMode, noDict, noDictIssue, 1);
1395  } }
1396 }
1397 
1398 
1399 int LZ4_compress_destSize(const char* src, char* dst, int* srcSizePtr, int targetDstSize)
1400 {
1401 #if (LZ4_HEAPMODE)
1402  LZ4_stream_t* ctx = (LZ4_stream_t*)ALLOC(sizeof(LZ4_stream_t)); /* malloc-calloc always properly aligned */
1403  if (ctx == NULL) return 0;
1404 #else
1405  LZ4_stream_t ctxBody;
1406  LZ4_stream_t* ctx = &ctxBody;
1407 #endif
1408 
1409  int result = LZ4_compress_destSize_extState(ctx, src, dst, srcSizePtr, targetDstSize);
1410 
1411 #if (LZ4_HEAPMODE)
1412  FREEMEM(ctx);
1413 #endif
1414  return result;
1415 }
1416 
1417 
1418 
1419 /*-******************************
1420 * Streaming functions
1421 ********************************/
1422 
1424 {
1425  LZ4_stream_t* const lz4s = (LZ4_stream_t*)ALLOC(sizeof(LZ4_stream_t));
1426  LZ4_STATIC_ASSERT(LZ4_STREAMSIZE >= sizeof(LZ4_stream_t_internal)); /* A compilation error here means LZ4_STREAMSIZE is not large enough */
1427  DEBUGLOG(4, "LZ4_createStream %p", lz4s);
1428  if (lz4s == NULL) return NULL;
1429  LZ4_initStream(lz4s, sizeof(*lz4s));
1430  return lz4s;
1431 }
1432 
1433 static size_t LZ4_stream_t_alignment(void)
1434 {
1435 #if LZ4_ALIGN_TEST
1436  typedef struct { char c; LZ4_stream_t t; } t_a;
1437  return sizeof(t_a) - sizeof(LZ4_stream_t);
1438 #else
1439  return 1; /* effectively disabled */
1440 #endif
1441 }
1442 
1444 {
1445  DEBUGLOG(5, "LZ4_initStream");
1446  if (buffer == NULL) { return NULL; }
1447  if (size < sizeof(LZ4_stream_t)) { return NULL; }
1449  MEM_INIT(buffer, 0, sizeof(LZ4_stream_t_internal));
1450  return (LZ4_stream_t*)buffer;
1451 }
1452 
1453 /* resetStream is now deprecated,
1454  * prefer initStream() which is more general */
1456 {
1457  DEBUGLOG(5, "LZ4_resetStream (ctx:%p)", LZ4_stream);
1459 }
1460 
1462  LZ4_prepareTable(&(ctx->internal_donotuse), 0, byU32);
1463 }
1464 
1466 {
1467  if (!LZ4_stream) return 0; /* support free on NULL */
1468  DEBUGLOG(5, "LZ4_freeStream %p", LZ4_stream);
1470  return (0);
1471 }
1472 
1473 
1474 #define HASH_UNIT sizeof(reg_t)
1475 int LZ4_loadDict (LZ4_stream_t* LZ4_dict, const char* dictionary, int dictSize)
1476 {
1477  LZ4_stream_t_internal* dict = &LZ4_dict->internal_donotuse;
1478  const tableType_t tableType = byU32;
1479  const BYTE* p = (const BYTE*)dictionary;
1480  const BYTE* const dictEnd = p + dictSize;
1481  const BYTE* base;
1482 
1483  DEBUGLOG(4, "LZ4_loadDict (%i bytes from %p into %p)", dictSize, dictionary, LZ4_dict);
1484 
1485  /* It's necessary to reset the context,
1486  * and not just continue it with prepareTable()
1487  * to avoid any risk of generating overflowing matchIndex
1488  * when compressing using this dictionary */
1489  LZ4_resetStream(LZ4_dict);
1490 
1491  /* We always increment the offset by 64 KB, since, if the dict is longer,
1492  * we truncate it to the last 64k, and if it's shorter, we still want to
1493  * advance by a whole window length so we can provide the guarantee that
1494  * there are only valid offsets in the window, which allows an optimization
1495  * in LZ4_compress_fast_continue() where it uses noDictIssue even when the
1496  * dictionary isn't a full 64k. */
1497  dict->currentOffset += 64 KB;
1498 
1499  if (dictSize < (int)HASH_UNIT) {
1500  return 0;
1501  }
1502 
1503  if ((dictEnd - p) > 64 KB) p = dictEnd - 64 KB;
1504  base = dictEnd - dict->currentOffset;
1505  dict->dictionary = p;
1506  dict->dictSize = (U32)(dictEnd - p);
1507  dict->tableType = (U32)tableType;
1508 
1509  while (p <= dictEnd-HASH_UNIT) {
1510  LZ4_putPosition(p, dict->hashTable, tableType, base);
1511  p+=3;
1512  }
1513 
1514  return (int)dict->dictSize;
1515 }
1516 
1517 void LZ4_attach_dictionary(LZ4_stream_t* workingStream, const LZ4_stream_t* dictionaryStream) {
1518  const LZ4_stream_t_internal* dictCtx = dictionaryStream == NULL ? NULL :
1519  &(dictionaryStream->internal_donotuse);
1520 
1521  DEBUGLOG(4, "LZ4_attach_dictionary (%p, %p, size %u)",
1522  workingStream, dictionaryStream,
1523  dictCtx != NULL ? dictCtx->dictSize : 0);
1524 
1525  if (dictCtx != NULL) {
1526  /* If the current offset is zero, we will never look in the
1527  * external dictionary context, since there is no value a table
1528  * entry can take that indicate a miss. In that case, we need
1529  * to bump the offset to something non-zero.
1530  */
1531  if (workingStream->internal_donotuse.currentOffset == 0) {
1532  workingStream->internal_donotuse.currentOffset = 64 KB;
1533  }
1534 
1535  /* Don't actually attach an empty dictionary.
1536  */
1537  if (dictCtx->dictSize == 0) {
1538  dictCtx = NULL;
1539  }
1540  }
1541  workingStream->internal_donotuse.dictCtx = dictCtx;
1542 }
1543 
1544 
1545 static void LZ4_renormDictT(LZ4_stream_t_internal* LZ4_dict, int nextSize)
1546 {
1547  assert(nextSize >= 0);
1548  if (LZ4_dict->currentOffset + (unsigned)nextSize > 0x80000000) { /* potential ptrdiff_t overflow (32-bits mode) */
1549  /* rescale hash table */
1550  U32 const delta = LZ4_dict->currentOffset - 64 KB;
1551  const BYTE* dictEnd = LZ4_dict->dictionary + LZ4_dict->dictSize;
1552  int i;
1553  DEBUGLOG(4, "LZ4_renormDictT");
1554  for (i=0; i<LZ4_HASH_SIZE_U32; i++) {
1555  if (LZ4_dict->hashTable[i] < delta) LZ4_dict->hashTable[i]=0;
1556  else LZ4_dict->hashTable[i] -= delta;
1557  }
1558  LZ4_dict->currentOffset = 64 KB;
1559  if (LZ4_dict->dictSize > 64 KB) LZ4_dict->dictSize = 64 KB;
1560  LZ4_dict->dictionary = dictEnd - LZ4_dict->dictSize;
1561  }
1562 }
1563 
1564 
1566  const char* source, char* dest,
1567  int inputSize, int maxOutputSize,
1568  int acceleration)
1569 {
1570  const tableType_t tableType = byU32;
1572  const BYTE* dictEnd = streamPtr->dictionary + streamPtr->dictSize;
1573 
1574  DEBUGLOG(5, "LZ4_compress_fast_continue (inputSize=%i)", inputSize);
1575 
1576  LZ4_renormDictT(streamPtr, inputSize); /* avoid index overflow */
1577  if (acceleration < 1) acceleration = LZ4_ACCELERATION_DEFAULT;
1578  if (acceleration > LZ4_ACCELERATION_MAX) acceleration = LZ4_ACCELERATION_MAX;
1579 
1580  /* invalidate tiny dictionaries */
1581  if ( (streamPtr->dictSize-1 < 4-1) /* intentional underflow */
1582  && (dictEnd != (const BYTE*)source) ) {
1583  DEBUGLOG(5, "LZ4_compress_fast_continue: dictSize(%u) at addr:%p is too small", streamPtr->dictSize, streamPtr->dictionary);
1584  streamPtr->dictSize = 0;
1585  streamPtr->dictionary = (const BYTE*)source;
1586  dictEnd = (const BYTE*)source;
1587  }
1588 
1589  /* Check overlapping input/dictionary space */
1590  { const BYTE* sourceEnd = (const BYTE*) source + inputSize;
1591  if ((sourceEnd > streamPtr->dictionary) && (sourceEnd < dictEnd)) {
1592  streamPtr->dictSize = (U32)(dictEnd - sourceEnd);
1593  if (streamPtr->dictSize > 64 KB) streamPtr->dictSize = 64 KB;
1594  if (streamPtr->dictSize < 4) streamPtr->dictSize = 0;
1595  streamPtr->dictionary = dictEnd - streamPtr->dictSize;
1596  }
1597  }
1598 
1599  /* prefix mode : source data follows dictionary */
1600  if (dictEnd == (const BYTE*)source) {
1601  if ((streamPtr->dictSize < 64 KB) && (streamPtr->dictSize < streamPtr->currentOffset))
1602  return LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, withPrefix64k, dictSmall, acceleration);
1603  else
1604  return LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, withPrefix64k, noDictIssue, acceleration);
1605  }
1606 
1607  /* external dictionary mode */
1608  { int result;
1609  if (streamPtr->dictCtx) {
1610  /* We depend here on the fact that dictCtx'es (produced by
1611  * LZ4_loadDict) guarantee that their tables contain no references
1612  * to offsets between dictCtx->currentOffset - 64 KB and
1613  * dictCtx->currentOffset - dictCtx->dictSize. This makes it safe
1614  * to use noDictIssue even when the dict isn't a full 64 KB.
1615  */
1616  if (inputSize > 4 KB) {
1617  /* For compressing large blobs, it is faster to pay the setup
1618  * cost to copy the dictionary's tables into the active context,
1619  * so that the compression loop is only looking into one table.
1620  */
1621  LZ4_memcpy(streamPtr, streamPtr->dictCtx, sizeof(*streamPtr));
1622  result = LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, usingExtDict, noDictIssue, acceleration);
1623  } else {
1624  result = LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, usingDictCtx, noDictIssue, acceleration);
1625  }
1626  } else {
1627  if ((streamPtr->dictSize < 64 KB) && (streamPtr->dictSize < streamPtr->currentOffset)) {
1628  result = LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, usingExtDict, dictSmall, acceleration);
1629  } else {
1630  result = LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, usingExtDict, noDictIssue, acceleration);
1631  }
1632  }
1633  streamPtr->dictionary = (const BYTE*)source;
1634  streamPtr->dictSize = (U32)inputSize;
1635  return result;
1636  }
1637 }
1638 
1639 
1640 /* Hidden debug function, to force-test external dictionary mode */
1641 int LZ4_compress_forceExtDict (LZ4_stream_t* LZ4_dict, const char* source, char* dest, int srcSize)
1642 {
1643  LZ4_stream_t_internal* streamPtr = &LZ4_dict->internal_donotuse;
1644  int result;
1645 
1646  LZ4_renormDictT(streamPtr, srcSize);
1647 
1648  if ((streamPtr->dictSize < 64 KB) && (streamPtr->dictSize < streamPtr->currentOffset)) {
1650  } else {
1652  }
1653 
1654  streamPtr->dictionary = (const BYTE*)source;
1655  streamPtr->dictSize = (U32)srcSize;
1656 
1657  return result;
1658 }
1659 
1660 
1668 int LZ4_saveDict (LZ4_stream_t* LZ4_dict, char* safeBuffer, int dictSize)
1669 {
1670  LZ4_stream_t_internal* const dict = &LZ4_dict->internal_donotuse;
1671  const BYTE* const previousDictEnd = dict->dictionary + dict->dictSize;
1672 
1673  if ((U32)dictSize > 64 KB) { dictSize = 64 KB; } /* useless to define a dictionary > 64 KB */
1674  if ((U32)dictSize > dict->dictSize) { dictSize = (int)dict->dictSize; }
1675 
1676  if (safeBuffer == NULL) assert(dictSize == 0);
1677  if (dictSize > 0)
1678  memmove(safeBuffer, previousDictEnd - dictSize, dictSize);
1679 
1680  dict->dictionary = (const BYTE*)safeBuffer;
1681  dict->dictSize = (U32)dictSize;
1682 
1683  return dictSize;
1684 }
1685 
1686 
1687 
1688 /*-*******************************
1689  * Decompression functions
1690  ********************************/
1691 
1694 
1695 #undef MIN
1696 #define MIN(a,b) ( (a) < (b) ? (a) : (b) )
1697 
1698 /* Read the variable-length literal or match length.
1699  *
1700  * ip - pointer to use as input.
1701  * lencheck - end ip. Return an error if ip advances >= lencheck.
1702  * loop_check - check ip >= lencheck in body of loop. Returns loop_error if so.
1703  * initial_check - check ip >= lencheck before start of loop. Returns initial_error if so.
1704  * error (output) - error code. Should be set to 0 before call.
1705  */
1706 typedef enum { loop_error = -2, initial_error = -1, ok = 0 } variable_length_error;
1707 LZ4_FORCE_INLINE unsigned
1708 read_variable_length(const BYTE**ip, const BYTE* lencheck,
1709  int loop_check, int initial_check,
1711 {
1712  U32 length = 0;
1713  U32 s;
1714  if (initial_check && unlikely((*ip) >= lencheck)) { /* overflow detection */
1715  *error = initial_error;
1716  return length;
1717  }
1718  do {
1719  s = **ip;
1720  (*ip)++;
1721  length += s;
1722  if (loop_check && unlikely((*ip) >= lencheck)) { /* overflow detection */
1723  *error = loop_error;
1724  return length;
1725  }
1726  } while (s==255);
1727 
1728  return length;
1729 }
1730 
1737 LZ4_FORCE_INLINE int
1739  const char* const src,
1740  char* const dst,
1741  int srcSize,
1742  int outputSize, /* If endOnInput==endOnInputSize, this value is `dstCapacity` */
1743 
1744  endCondition_directive endOnInput, /* endOnOutputSize, endOnInputSize */
1745  earlyEnd_directive partialDecoding, /* full, partial */
1746  dict_directive dict, /* noDict, withPrefix64k, usingExtDict */
1747  const BYTE* const lowPrefix, /* always <= dst, == dst when no prefix */
1748  const BYTE* const dictStart, /* only if dict==usingExtDict */
1749  const size_t dictSize /* note : = 0 if noDict */
1750  )
1751 {
1752  if (src == NULL) { return -1; }
1753 
1754  { const BYTE* ip = (const BYTE*) src;
1755  const BYTE* const iend = ip + srcSize;
1756 
1757  BYTE* op = (BYTE*) dst;
1758  BYTE* const oend = op + outputSize;
1759  BYTE* cpy;
1760 
1761  const BYTE* const dictEnd = (dictStart == NULL) ? NULL : dictStart + dictSize;
1762 
1763  const int safeDecode = (endOnInput==endOnInputSize);
1764  const int checkOffset = ((safeDecode) && (dictSize < (int)(64 KB)));
1765 
1766 
1767  /* Set up the "end" pointers for the shortcut. */
1768  const BYTE* const shortiend = iend - (endOnInput ? 14 : 8) /*maxLL*/ - 2 /*offset*/;
1769  const BYTE* const shortoend = oend - (endOnInput ? 14 : 8) /*maxLL*/ - 18 /*maxML*/;
1770 
1771  const BYTE* match;
1772  size_t offset;
1773  unsigned token;
1774  size_t length;
1775 
1776 
1777  DEBUGLOG(5, "LZ4_decompress_generic (srcSize:%i, dstSize:%i)", srcSize, outputSize);
1778 
1779  /* Special cases */
1780  assert(lowPrefix <= op);
1781  if ((endOnInput) && (unlikely(outputSize==0))) {
1782  /* Empty output buffer */
1783  if (partialDecoding) return 0;
1784  return ((srcSize==1) && (*ip==0)) ? 0 : -1;
1785  }
1786  if ((!endOnInput) && (unlikely(outputSize==0))) { return (*ip==0 ? 1 : -1); }
1787  if ((endOnInput) && unlikely(srcSize==0)) { return -1; }
1788 
1789  /* Currently the fast loop shows a regression on qualcomm arm chips. */
1790 #if LZ4_FAST_DEC_LOOP
1791  if ((oend - op) < FASTLOOP_SAFE_DISTANCE) {
1792  DEBUGLOG(6, "skip fast decode loop");
1793  goto safe_decode;
1794  }
1795 
1796  /* Fast loop : decode sequences as long as output < iend-FASTLOOP_SAFE_DISTANCE */
1797  while (1) {
1798  /* Main fastloop assertion: We can always wildcopy FASTLOOP_SAFE_DISTANCE */
1799  assert(oend - op >= FASTLOOP_SAFE_DISTANCE);
1800  if (endOnInput) { assert(ip < iend); }
1801  token = *ip++;
1802  length = token >> ML_BITS; /* literal length */
1803 
1804  assert(!endOnInput || ip <= iend); /* ip < iend before the increment */
1805 
1806  /* decode literal length */
1807  if (length == RUN_MASK) {
1809  length += read_variable_length(&ip, iend-RUN_MASK, (int)endOnInput, (int)endOnInput, &error);
1810  if (error == initial_error) { goto _output_error; }
1811  if ((safeDecode) && unlikely((uptrval)(op)+length<(uptrval)(op))) { goto _output_error; } /* overflow detection */
1812  if ((safeDecode) && unlikely((uptrval)(ip)+length<(uptrval)(ip))) { goto _output_error; } /* overflow detection */
1813 
1814  /* copy literals */
1815  cpy = op+length;
1817  if (endOnInput) { /* LZ4_decompress_safe() */
1818  if ((cpy>oend-32) || (ip+length>iend-32)) { goto safe_literal_copy; }
1819  LZ4_wildCopy32(op, ip, cpy);
1820  } else { /* LZ4_decompress_fast() */
1821  if (cpy>oend-8) { goto safe_literal_copy; }
1822  LZ4_wildCopy8(op, ip, cpy); /* LZ4_decompress_fast() cannot copy more than 8 bytes at a time :
1823  * it doesn't know input length, and only relies on end-of-block properties */
1824  }
1825  ip += length; op = cpy;
1826  } else {
1827  cpy = op+length;
1828  if (endOnInput) { /* LZ4_decompress_safe() */
1829  DEBUGLOG(7, "copy %u bytes in a 16-bytes stripe", (unsigned)length);
1830  /* We don't need to check oend, since we check it once for each loop below */
1831  if (ip > iend-(16 + 1/*max lit + offset + nextToken*/)) { goto safe_literal_copy; }
1832  /* Literals can only be 14, but hope compilers optimize if we copy by a register size */
1833  LZ4_memcpy(op, ip, 16);
1834  } else { /* LZ4_decompress_fast() */
1835  /* LZ4_decompress_fast() cannot copy more than 8 bytes at a time :
1836  * it doesn't know input length, and relies on end-of-block properties */
1837  LZ4_memcpy(op, ip, 8);
1838  if (length > 8) { LZ4_memcpy(op+8, ip+8, 8); }
1839  }
1840  ip += length; op = cpy;
1841  }
1842 
1843  /* get offset */
1844  offset = LZ4_readLE16(ip); ip+=2;
1845  match = op - offset;
1846  assert(match <= op);
1847 
1848  /* get matchlength */
1849  length = token & ML_MASK;
1850 
1851  if (length == ML_MASK) {
1853  if ((checkOffset) && (unlikely(match + dictSize < lowPrefix))) { goto _output_error; } /* Error : offset outside buffers */
1854  length += read_variable_length(&ip, iend - LASTLITERALS + 1, (int)endOnInput, 0, &error);
1855  if (error != ok) { goto _output_error; }
1856  if ((safeDecode) && unlikely((uptrval)(op)+length<(uptrval)op)) { goto _output_error; } /* overflow detection */
1857  length += MINMATCH;
1858  if (op + length >= oend - FASTLOOP_SAFE_DISTANCE) {
1859  goto safe_match_copy;
1860  }
1861  } else {
1862  length += MINMATCH;
1863  if (op + length >= oend - FASTLOOP_SAFE_DISTANCE) {
1864  goto safe_match_copy;
1865  }
1866 
1867  /* Fastpath check: Avoids a branch in LZ4_wildCopy32 if true */
1868  if ((dict == withPrefix64k) || (match >= lowPrefix)) {
1869  if (offset >= 8) {
1870  assert(match >= lowPrefix);
1871  assert(match <= op);
1872  assert(op + 18 <= oend);
1873 
1874  LZ4_memcpy(op, match, 8);
1875  LZ4_memcpy(op+8, match+8, 8);
1876  LZ4_memcpy(op+16, match+16, 2);
1877  op += length;
1878  continue;
1879  } } }
1880 
1881  if (checkOffset && (unlikely(match + dictSize < lowPrefix))) { goto _output_error; } /* Error : offset outside buffers */
1882  /* match starting within external dictionary */
1883  if ((dict==usingExtDict) && (match < lowPrefix)) {
1884  if (unlikely(op+length > oend-LASTLITERALS)) {
1885  if (partialDecoding) {
1886  DEBUGLOG(7, "partialDecoding: dictionary match, close to dstEnd");
1887  length = MIN(length, (size_t)(oend-op));
1888  } else {
1889  goto _output_error; /* end-of-block condition violated */
1890  } }
1891 
1892  if (length <= (size_t)(lowPrefix-match)) {
1893  /* match fits entirely within external dictionary : just copy */
1894  memmove(op, dictEnd - (lowPrefix-match), length);
1895  op += length;
1896  } else {
1897  /* match stretches into both external dictionary and current block */
1898  size_t const copySize = (size_t)(lowPrefix - match);
1899  size_t const restSize = length - copySize;
1900  LZ4_memcpy(op, dictEnd - copySize, copySize);
1901  op += copySize;
1902  if (restSize > (size_t)(op - lowPrefix)) { /* overlap copy */
1903  BYTE* const endOfMatch = op + restSize;
1904  const BYTE* copyFrom = lowPrefix;
1905  while (op < endOfMatch) { *op++ = *copyFrom++; }
1906  } else {
1907  LZ4_memcpy(op, lowPrefix, restSize);
1908  op += restSize;
1909  } }
1910  continue;
1911  }
1912 
1913  /* copy match within block */
1914  cpy = op + length;
1915 
1916  assert((op <= oend) && (oend-op >= 32));
1917  if (unlikely(offset<16)) {
1918  LZ4_memcpy_using_offset(op, match, cpy, offset);
1919  } else {
1920  LZ4_wildCopy32(op, match, cpy);
1921  }
1922 
1923  op = cpy; /* wildcopy correction */
1924  }
1925  safe_decode:
1926 #endif
1927 
1928  /* Main Loop : decode remaining sequences where output < FASTLOOP_SAFE_DISTANCE */
1929  while (1) {
1930  token = *ip++;
1931  length = token >> ML_BITS; /* literal length */
1932 
1933  assert(!endOnInput || ip <= iend); /* ip < iend before the increment */
1934 
1935  /* A two-stage shortcut for the most common case:
1936  * 1) If the literal length is 0..14, and there is enough space,
1937  * enter the shortcut and copy 16 bytes on behalf of the literals
1938  * (in the fast mode, only 8 bytes can be safely copied this way).
1939  * 2) Further if the match length is 4..18, copy 18 bytes in a similar
1940  * manner; but we ensure that there's enough space in the output for
1941  * those 18 bytes earlier, upon entering the shortcut (in other words,
1942  * there is a combined check for both stages).
1943  */
1944  if ( (endOnInput ? length != RUN_MASK : length <= 8)
1945  /* strictly "less than" on input, to re-enter the loop with at least one byte */
1946  && likely((endOnInput ? ip < shortiend : 1) & (op <= shortoend)) ) {
1947  /* Copy the literals */
1948  LZ4_memcpy(op, ip, endOnInput ? 16 : 8);
1949  op += length; ip += length;
1950 
1951  /* The second stage: prepare for match copying, decode full info.
1952  * If it doesn't work out, the info won't be wasted. */
1953  length = token & ML_MASK; /* match length */
1954  offset = LZ4_readLE16(ip); ip += 2;
1955  match = op - offset;
1956  assert(match <= op); /* check overflow */
1957 
1958  /* Do not deal with overlapping matches. */
1959  if ( (length != ML_MASK)
1960  && (offset >= 8)
1961  && (dict==withPrefix64k || match >= lowPrefix) ) {
1962  /* Copy the match. */
1963  LZ4_memcpy(op + 0, match + 0, 8);
1964  LZ4_memcpy(op + 8, match + 8, 8);
1965  LZ4_memcpy(op +16, match +16, 2);
1966  op += length + MINMATCH;
1967  /* Both stages worked, load the next token. */
1968  continue;
1969  }
1970 
1971  /* The second stage didn't work out, but the info is ready.
1972  * Propel it right to the point of match copying. */
1973  goto _copy_match;
1974  }
1975 
1976  /* decode literal length */
1977  if (length == RUN_MASK) {
1979  length += read_variable_length(&ip, iend-RUN_MASK, (int)endOnInput, (int)endOnInput, &error);
1980  if (error == initial_error) { goto _output_error; }
1981  if ((safeDecode) && unlikely((uptrval)(op)+length<(uptrval)(op))) { goto _output_error; } /* overflow detection */
1982  if ((safeDecode) && unlikely((uptrval)(ip)+length<(uptrval)(ip))) { goto _output_error; } /* overflow detection */
1983  }
1984 
1985  /* copy literals */
1986  cpy = op+length;
1987 #if LZ4_FAST_DEC_LOOP
1988  safe_literal_copy:
1989 #endif
1991  if ( ((endOnInput) && ((cpy>oend-MFLIMIT) || (ip+length>iend-(2+1+LASTLITERALS))) )
1992  || ((!endOnInput) && (cpy>oend-WILDCOPYLENGTH)) )
1993  {
1994  /* We've either hit the input parsing restriction or the output parsing restriction.
1995  * In the normal scenario, decoding a full block, it must be the last sequence,
1996  * otherwise it's an error (invalid input or dimensions).
1997  * In partialDecoding scenario, it's necessary to ensure there is no buffer overflow.
1998  */
1999  if (partialDecoding) {
2000  /* Since we are partial decoding we may be in this block because of the output parsing
2001  * restriction, which is not valid since the output buffer is allowed to be undersized.
2002  */
2003  assert(endOnInput);
2004  DEBUGLOG(7, "partialDecoding: copying literals, close to input or output end")
2005  DEBUGLOG(7, "partialDecoding: literal length = %u", (unsigned)length);
2006  DEBUGLOG(7, "partialDecoding: remaining space in dstBuffer : %i", (int)(oend - op));
2007  DEBUGLOG(7, "partialDecoding: remaining space in srcBuffer : %i", (int)(iend - ip));
2008  /* Finishing in the middle of a literals segment,
2009  * due to lack of input.
2010  */
2011  if (ip+length > iend) {
2012  length = (size_t)(iend-ip);
2013  cpy = op + length;
2014  }
2015  /* Finishing in the middle of a literals segment,
2016  * due to lack of output space.
2017  */
2018  if (cpy > oend) {
2019  cpy = oend;
2020  assert(op<=oend);
2021  length = (size_t)(oend-op);
2022  }
2023  } else {
2024  /* We must be on the last sequence because of the parsing limitations so check
2025  * that we exactly regenerate the original size (must be exact when !endOnInput).
2026  */
2027  if ((!endOnInput) && (cpy != oend)) { goto _output_error; }
2028  /* We must be on the last sequence (or invalid) because of the parsing limitations
2029  * so check that we exactly consume the input and don't overrun the output buffer.
2030  */
2031  if ((endOnInput) && ((ip+length != iend) || (cpy > oend))) {
2032  DEBUGLOG(6, "should have been last run of literals")
2033  DEBUGLOG(6, "ip(%p) + length(%i) = %p != iend (%p)", ip, (int)length, ip+length, iend);
2034  DEBUGLOG(6, "or cpy(%p) > oend(%p)", cpy, oend);
2035  goto _output_error;
2036  }
2037  }
2038  memmove(op, ip, length); /* supports overlapping memory regions; only matters for in-place decompression scenarios */
2039  ip += length;
2040  op += length;
2041  /* Necessarily EOF when !partialDecoding.
2042  * When partialDecoding, it is EOF if we've either
2043  * filled the output buffer or
2044  * can't proceed with reading an offset for following match.
2045  */
2046  if (!partialDecoding || (cpy == oend) || (ip >= (iend-2))) {
2047  break;
2048  }
2049  } else {
2050  LZ4_wildCopy8(op, ip, cpy); /* may overwrite up to WILDCOPYLENGTH beyond cpy */
2051  ip += length; op = cpy;
2052  }
2053 
2054  /* get offset */
2055  offset = LZ4_readLE16(ip); ip+=2;
2056  match = op - offset;
2057 
2058  /* get matchlength */
2059  length = token & ML_MASK;
2060 
2061  _copy_match:
2062  if (length == ML_MASK) {
2064  length += read_variable_length(&ip, iend - LASTLITERALS + 1, (int)endOnInput, 0, &error);
2065  if (error != ok) goto _output_error;
2066  if ((safeDecode) && unlikely((uptrval)(op)+length<(uptrval)op)) goto _output_error; /* overflow detection */
2067  }
2068  length += MINMATCH;
2069 
2070 #if LZ4_FAST_DEC_LOOP
2071  safe_match_copy:
2072 #endif
2073  if ((checkOffset) && (unlikely(match + dictSize < lowPrefix))) goto _output_error; /* Error : offset outside buffers */
2074  /* match starting within external dictionary */
2075  if ((dict==usingExtDict) && (match < lowPrefix)) {
2076  if (unlikely(op+length > oend-LASTLITERALS)) {
2077  if (partialDecoding) length = MIN(length, (size_t)(oend-op));
2078  else goto _output_error; /* doesn't respect parsing restriction */
2079  }
2080 
2081  if (length <= (size_t)(lowPrefix-match)) {
2082  /* match fits entirely within external dictionary : just copy */
2083  memmove(op, dictEnd - (lowPrefix-match), length);
2084  op += length;
2085  } else {
2086  /* match stretches into both external dictionary and current block */
2087  size_t const copySize = (size_t)(lowPrefix - match);
2088  size_t const restSize = length - copySize;
2089  LZ4_memcpy(op, dictEnd - copySize, copySize);
2090  op += copySize;
2091  if (restSize > (size_t)(op - lowPrefix)) { /* overlap copy */
2092  BYTE* const endOfMatch = op + restSize;
2093  const BYTE* copyFrom = lowPrefix;
2094  while (op < endOfMatch) *op++ = *copyFrom++;
2095  } else {
2096  LZ4_memcpy(op, lowPrefix, restSize);
2097  op += restSize;
2098  } }
2099  continue;
2100  }
2101  assert(match >= lowPrefix);
2102 
2103  /* copy match within block */
2104  cpy = op + length;
2105 
2106  /* partialDecoding : may end anywhere within the block */
2107  assert(op<=oend);
2108  if (partialDecoding && (cpy > oend-MATCH_SAFEGUARD_DISTANCE)) {
2109  size_t const mlen = MIN(length, (size_t)(oend-op));
2110  const BYTE* const matchEnd = match + mlen;
2111  BYTE* const copyEnd = op + mlen;
2112  if (matchEnd > op) { /* overlap copy */
2113  while (op < copyEnd) { *op++ = *match++; }
2114  } else {
2115  LZ4_memcpy(op, match, mlen);
2116  }
2117  op = copyEnd;
2118  if (op == oend) { break; }
2119  continue;
2120  }
2121 
2122  if (unlikely(offset<8)) {
2123  LZ4_write32(op, 0); /* silence msan warning when offset==0 */
2124  op[0] = match[0];
2125  op[1] = match[1];
2126  op[2] = match[2];
2127  op[3] = match[3];
2128  match += inc32table[offset];
2129  LZ4_memcpy(op+4, match, 4);
2130  match -= dec64table[offset];
2131  } else {
2132  LZ4_memcpy(op, match, 8);
2133  match += 8;
2134  }
2135  op += 8;
2136 
2137  if (unlikely(cpy > oend-MATCH_SAFEGUARD_DISTANCE)) {
2138  BYTE* const oCopyLimit = oend - (WILDCOPYLENGTH-1);
2139  if (cpy > oend-LASTLITERALS) { goto _output_error; } /* Error : last LASTLITERALS bytes must be literals (uncompressed) */
2140  if (op < oCopyLimit) {
2141  LZ4_wildCopy8(op, match, oCopyLimit);
2142  match += oCopyLimit - op;
2143  op = oCopyLimit;
2144  }
2145  while (op < cpy) { *op++ = *match++; }
2146  } else {
2147  LZ4_memcpy(op, match, 8);
2148  if (length > 16) { LZ4_wildCopy8(op+8, match+8, cpy); }
2149  }
2150  op = cpy; /* wildcopy correction */
2151  }
2152 
2153  /* end of decoding */
2154  if (endOnInput) {
2155  DEBUGLOG(5, "decoded %i bytes", (int) (((char*)op)-dst));
2156  return (int) (((char*)op)-dst); /* Nb of output bytes decoded */
2157  } else {
2158  return (int) (((const char*)ip)-src); /* Nb of input bytes read */
2159  }
2160 
2161  /* Overflow error detected */
2162  _output_error:
2163  return (int) (-(((const char*)ip)-src))-1;
2164  }
2165 }
2166 
2167 
2168 /*===== Instantiate the API decoding functions. =====*/
2169 
2171 int LZ4_decompress_safe(const char* source, char* dest, int compressedSize, int maxDecompressedSize)
2172 {
2173  return LZ4_decompress_generic(source, dest, compressedSize, maxDecompressedSize,
2175  (BYTE*)dest, NULL, 0);
2176 }
2177 
2179 int LZ4_decompress_safe_partial(const char* src, char* dst, int compressedSize, int targetOutputSize, int dstCapacity)
2180 {
2181  dstCapacity = MIN(targetOutputSize, dstCapacity);
2182  return LZ4_decompress_generic(src, dst, compressedSize, dstCapacity,
2184  noDict, (BYTE*)dst, NULL, 0);
2185 }
2186 
2188 int LZ4_decompress_fast(const char* source, char* dest, int originalSize)
2189 {
2192  (BYTE*)dest - 64 KB, NULL, 0);
2193 }
2194 
2195 /*===== Instantiate a few more decoding cases, used more than once. =====*/
2196 
2197 LZ4_FORCE_O2 /* Exported, an obsolete API function. */
2199 {
2202  (BYTE*)dest - 64 KB, NULL, 0);
2203 }
2204 
2205 /* Another obsolete API function, paired with the previous one. */
2207 {
2208  /* LZ4_decompress_fast doesn't validate match offsets,
2209  * and thus serves well with any prefixed dictionary. */
2211 }
2212 
2215  size_t prefixSize)
2216 {
2219  (BYTE*)dest-prefixSize, NULL, 0);
2220 }
2221 
2224  int compressedSize, int maxOutputSize,
2225  const void* dictStart, size_t dictSize)
2226 {
2229  (BYTE*)dest, (const BYTE*)dictStart, dictSize);
2230 }
2231 
2233 static int LZ4_decompress_fast_extDict(const char* source, char* dest, int originalSize,
2234  const void* dictStart, size_t dictSize)
2235 {
2238  (BYTE*)dest, (const BYTE*)dictStart, dictSize);
2239 }
2240 
2241 /* The "double dictionary" mode, for use with e.g. ring buffers: the first part
2242  * of the dictionary is passed as prefix, and the second via dictStart + dictSize.
2243  * These routines are used only once, in LZ4_decompress_*_continue().
2244  */
2247  size_t prefixSize, const void* dictStart, size_t dictSize)
2248 {
2251  (BYTE*)dest-prefixSize, (const BYTE*)dictStart, dictSize);
2252 }
2253 
2256  size_t prefixSize, const void* dictStart, size_t dictSize)
2257 {
2260  (BYTE*)dest-prefixSize, (const BYTE*)dictStart, dictSize);
2261 }
2262 
2263 /*===== streaming decompression functions =====*/
2264 
2266 {
2268  LZ4_STATIC_ASSERT(LZ4_STREAMDECODESIZE >= sizeof(LZ4_streamDecode_t_internal)); /* A compilation error here means LZ4_STREAMDECODESIZE is not large enough */
2269  return lz4s;
2270 }
2271 
2273 {
2274  if (LZ4_stream == NULL) { return 0; } /* support free on NULL */
2276  return 0;
2277 }
2278 
2285 int LZ4_setStreamDecode (LZ4_streamDecode_t* LZ4_streamDecode, const char* dictionary, int dictSize)
2286 {
2287  LZ4_streamDecode_t_internal* lz4sd = &LZ4_streamDecode->internal_donotuse;
2288  lz4sd->prefixSize = (size_t) dictSize;
2289  lz4sd->prefixEnd = (const BYTE*) dictionary + dictSize;
2290  lz4sd->externalDict = NULL;
2291  lz4sd->extDictSize = 0;
2292  return 1;
2293 }
2294 
2306 int LZ4_decoderRingBufferSize(int maxBlockSize)
2307 {
2308  if (maxBlockSize < 0) return 0;
2309  if (maxBlockSize > LZ4_MAX_INPUT_SIZE) return 0;
2310  if (maxBlockSize < 16) maxBlockSize = 16;
2311  return LZ4_DECODER_RING_BUFFER_SIZE(maxBlockSize);
2312 }
2313 
2314 /*
2315 *_continue() :
2316  These decoding functions allow decompression of multiple blocks in "streaming" mode.
2317  Previously decoded blocks must still be available at the memory position where they were decoded.
2318  If it's not possible, save the relevant part of decoded data into a safe buffer,
2319  and indicate where it stands using LZ4_setStreamDecode()
2320 */
2322 int LZ4_decompress_safe_continue (LZ4_streamDecode_t* LZ4_streamDecode, const char* source, char* dest, int compressedSize, int maxOutputSize)
2323 {
2324  LZ4_streamDecode_t_internal* lz4sd = &LZ4_streamDecode->internal_donotuse;
2325  int result;
2326 
2327  if (lz4sd->prefixSize == 0) {
2328  /* The first call, no dictionary yet. */
2329  assert(lz4sd->extDictSize == 0);
2331  if (result <= 0) return result;
2332  lz4sd->prefixSize = (size_t)result;
2333  lz4sd->prefixEnd = (BYTE*)dest + result;
2334  } else if (lz4sd->prefixEnd == (BYTE*)dest) {
2335  /* They're rolling the current segment. */
2336  if (lz4sd->prefixSize >= 64 KB - 1)
2338  else if (lz4sd->extDictSize == 0)
2340  lz4sd->prefixSize);
2341  else
2343  lz4sd->prefixSize, lz4sd->externalDict, lz4sd->extDictSize);
2344  if (result <= 0) return result;
2345  lz4sd->prefixSize += (size_t)result;
2346  lz4sd->prefixEnd += result;
2347  } else {
2348  /* The buffer wraps around, or they're switching to another buffer. */
2349  lz4sd->extDictSize = lz4sd->prefixSize;
2350  lz4sd->externalDict = lz4sd->prefixEnd - lz4sd->extDictSize;
2352  lz4sd->externalDict, lz4sd->extDictSize);
2353  if (result <= 0) return result;
2354  lz4sd->prefixSize = (size_t)result;
2355  lz4sd->prefixEnd = (BYTE*)dest + result;
2356  }
2357 
2358  return result;
2359 }
2360 
2362 int LZ4_decompress_fast_continue (LZ4_streamDecode_t* LZ4_streamDecode, const char* source, char* dest, int originalSize)
2363 {
2364  LZ4_streamDecode_t_internal* lz4sd = &LZ4_streamDecode->internal_donotuse;
2365  int result;
2366  assert(originalSize >= 0);
2367 
2368  if (lz4sd->prefixSize == 0) {
2369  assert(lz4sd->extDictSize == 0);
2371  if (result <= 0) return result;
2372  lz4sd->prefixSize = (size_t)originalSize;
2373  lz4sd->prefixEnd = (BYTE*)dest + originalSize;
2374  } else if (lz4sd->prefixEnd == (BYTE*)dest) {
2375  if (lz4sd->prefixSize >= 64 KB - 1 || lz4sd->extDictSize == 0)
2377  else
2379  lz4sd->prefixSize, lz4sd->externalDict, lz4sd->extDictSize);
2380  if (result <= 0) return result;
2381  lz4sd->prefixSize += (size_t)originalSize;
2382  lz4sd->prefixEnd += originalSize;
2383  } else {
2384  lz4sd->extDictSize = lz4sd->prefixSize;
2385  lz4sd->externalDict = lz4sd->prefixEnd - lz4sd->extDictSize;
2387  lz4sd->externalDict, lz4sd->extDictSize);
2388  if (result <= 0) return result;
2389  lz4sd->prefixSize = (size_t)originalSize;
2390  lz4sd->prefixEnd = (BYTE*)dest + originalSize;
2391  }
2392 
2393  return result;
2394 }
2395 
2396 
2397 /*
2398 Advanced decoding functions :
2399 *_usingDict() :
2400  These decoding functions work the same as "_continue" ones,
2401  the dictionary must be explicitly provided within parameters
2402 */
2403 
2404 int LZ4_decompress_safe_usingDict(const char* source, char* dest, int compressedSize, int maxOutputSize, const char* dictStart, int dictSize)
2405 {
2406  if (dictSize==0)
2408  if (dictStart+dictSize == dest) {
2409  if (dictSize >= 64 KB - 1) {
2411  }
2412  assert(dictSize >= 0);
2414  }
2415  assert(dictSize >= 0);
2416  return LZ4_decompress_safe_forceExtDict(source, dest, compressedSize, maxOutputSize, dictStart, (size_t)dictSize);
2417 }
2418 
2419 int LZ4_decompress_fast_usingDict(const char* source, char* dest, int originalSize, const char* dictStart, int dictSize)
2420 {
2421  if (dictSize==0 || dictStart+dictSize == dest)
2423  assert(dictSize >= 0);
2424  return LZ4_decompress_fast_extDict(source, dest, originalSize, dictStart, (size_t)dictSize);
2425 }
2426 
2427 
2428 /*=*************************************************
2429 * Obsolete Functions
2430 ***************************************************/
2431 /* obsolete compression functions */
2433 {
2435 }
2436 int LZ4_compress(const char* src, char* dest, int srcSize)
2437 {
2439 }
2440 int LZ4_compress_limitedOutput_withState (void* state, const char* src, char* dst, int srcSize, int dstSize)
2441 {
2442  return LZ4_compress_fast_extState(state, src, dst, srcSize, dstSize, 1);
2443 }
2444 int LZ4_compress_withState (void* state, const char* src, char* dst, int srcSize)
2445 {
2447 }
2448 int LZ4_compress_limitedOutput_continue (LZ4_stream_t* LZ4_stream, const char* src, char* dst, int srcSize, int dstCapacity)
2449 {
2450  return LZ4_compress_fast_continue(LZ4_stream, src, dst, srcSize, dstCapacity, 1);
2451 }
2453 {
2455 }
2456 
2457 /*
2458 These decompression functions are deprecated and should no longer be used.
2459 They are only provided here for compatibility with older user programs.
2460 - LZ4_uncompress is totally equivalent to LZ4_decompress_fast
2461 - LZ4_uncompress_unknownOutputSize is totally equivalent to LZ4_decompress_safe
2462 */
2463 int LZ4_uncompress (const char* source, char* dest, int outputSize)
2464 {
2466 }
2468 {
2470 }
2471 
2472 /* Obsolete Streaming functions */
2473 
2475 
2477 {
2478  (void)inputBuffer;
2480  return 0;
2481 }
2482 
2484 {
2485  (void)inputBuffer;
2486  return LZ4_createStream();
2487 }
2488 
2490 {
2491  /* avoid const char * -> char * conversion warning */
2492  return (char *)(uptrval)((LZ4_stream_t*)state)->internal_donotuse.dictionary;
2493 }
2494 
2495 #endif /* LZ4_COMMONDEFS_ONLY */
size_t len
Definition: 6502dis.c:15
ut8 op
Definition: 6502dis.c:13
#define e(frag)
#define mask()
lzma_index ** i
Definition: index.h:629
lzma_index * src
Definition: index.h:567
ut16 val
Definition: armass64_const.h:6
struct buffer buffer
static int value
Definition: cmd_api.c:93
#define NULL
Definition: cris-opc.c:27
#define r
Definition: crypto_rc6.c:12
static static sync static getppid static getegid const char static filename char static len const char char static bufsiz static mask static vfork const void static prot static getpgrp const char static swapflags static arg static fd static protocol static who struct sockaddr static addrlen static backlog struct timeval struct timezone static tz const struct iovec static count static mode const void const struct sockaddr static tolen const char static pathname void static offset struct stat static buf void long static basep static whence static length const void static len static semflg const void static shmflg const struct timespec struct timespec static rem const char static group const void length
Definition: sflib.h:133
const char * v
Definition: dsignal.c:12
static states step(struct re_guts *, sopno, sopno, states, int, states)
Definition: engine.c:888
void * LZ4_calloc(size_t n, size_t s)
Definition: fullbench.c:162
void * LZ4_malloc(size_t s)
Definition: fullbench.c:161
void LZ4_free(void *p)
Definition: fullbench.c:163
static LZ4_stream_t LZ4_stream
Definition: fullbench.c:169
unsigned char match[65280+2]
Definition: gun.c:165
voidpf void uLong size
Definition: ioapi.h:138
voidpf uLong offset
Definition: ioapi.h:144
void * p
Definition: libc.cpp:67
#define __attribute__(x)
Definition: ansidecl.h:266
static void struct sockaddr socklen_t static fromlen static backlog static fork char char char static envp int struct rusage static rusage struct utsname static buf struct sembuf unsigned
Definition: sflib.h:97
static const int LZ4_minLength
Definition: lz4.c:221
LZ4_FORCE_INLINE void LZ4_wildCopy8(void *dstPtr, const void *srcPtr, void *dstEnd)
Definition: lz4.c:408
#define STEPSIZE
Definition: lz4.c:601
LZ4_FORCE_INLINE void LZ4_putPositionOnHash(const BYTE *p, U32 h, void *tableBase, tableType_t const tableType, const BYTE *srcBase)
Definition: lz4.c:748
#define KB
Definition: lz4.c:223
int LZ4_compress_fast_extState(void *state, const char *source, char *dest, int inputSize, int maxOutputSize, int acceleration)
Definition: lz4.c:1284
int LZ4_decompress_fast_usingDict(const char *source, char *dest, int originalSize, const char *dictStart, int dictSize)
Definition: lz4.c:2419
const char * LZ4_versionString(void)
Definition: lz4.c:673
#define LZ4_STATIC_ASSERT(c)
Definition: lz4.c:249
endCondition_directive
Definition: lz4.c:1692
@ endOnInputSize
Definition: lz4.c:1692
@ endOnOutputSize
Definition: lz4.c:1692
unsigned long long U64
Definition: lz4.c:290
earlyEnd_directive
Definition: lz4.c:1693
@ partial_decode
Definition: lz4.c:1693
@ decode_full_block
Definition: lz4.c:1693
size_t reg_t
Definition: lz4.c:297
int LZ4_compressBound(int isize)
Definition: lz4.c:674
int LZ4_loadDict(LZ4_stream_t *LZ4_dict, const char *dictionary, int dictSize)
Definition: lz4.c:1475
LZ4_FORCE_INLINE int LZ4_compress_generic(LZ4_stream_t_internal *const cctx, const char *const src, char *const dst, const int srcSize, int *inputConsumed, const int dstCapacity, const limitedOutput_directive outputDirective, const tableType_t tableType, const dict_directive dictDirective, const dictIssue_directive dictIssue, const int acceleration)
Definition: lz4.c:1246
LZ4_FORCE_O2 int LZ4_decompress_safe_withPrefix64k(const char *source, char *dest, int compressedSize, int maxOutputSize)
Definition: lz4.c:2198
#define MINMATCH
Definition: lz4.c:214
static void LZ4_write32(void *memPtr, U32 value)
Definition: lz4.c:377
LZ4_stream_t * LZ4_createStream(void)
Definition: lz4.c:1423
LZ4_streamDecode_t * LZ4_createStreamDecode(void)
Definition: lz4.c:2265
LZ4_FORCE_INLINE int LZ4_decompress_fast_doubleDict(const char *source, char *dest, int originalSize, size_t prefixSize, const void *dictStart, size_t dictSize)
Definition: lz4.c:2255
int LZ4_decompress_safe_usingDict(const char *source, char *dest, int compressedSize, int maxOutputSize, const char *dictStart, int dictSize)
Definition: lz4.c:2404
static void LZ4_writeLE16(void *memPtr, U16 value)
Definition: lz4.c:395
#define MIN(a, b)
Definition: lz4.c:1696
void * LZ4_create(char *inputBuffer)
Definition: lz4.c:2483
LZ4_stream_t * LZ4_initStream(void *buffer, size_t size)
Definition: lz4.c:1443
#define GB
Definition: lz4.c:225
LZ4_FORCE_INLINE const BYTE * LZ4_getPosition(const BYTE *p, const void *tableBase, tableType_t tableType, const BYTE *srcBase)
Definition: lz4.c:797
static const unsigned inc32table[8]
Definition: lz4.c:417
int LZ4_compress_limitedOutput(const char *source, char *dest, int inputSize, int maxOutputSize)
Definition: lz4.c:2432
int LZ4_decoderRingBufferSize(int maxBlockSize)
Definition: lz4.c:2306
int LZ4_compress_default(const char *src, char *dst, int srcSize, int maxOutputSize)
Definition: lz4.c:1373
int LZ4_compress_limitedOutput_continue(LZ4_stream_t *LZ4_stream, const char *src, char *dst, int srcSize, int dstCapacity)
Definition: lz4.c:2448
unsigned char BYTE
Definition: lz4.c:286
LZ4_FORCE_INLINE U32 LZ4_hash4(U32 sequence, tableType_t const tableType)
Definition: lz4.c:698
LZ4_FORCE_INLINE U32 LZ4_getIndexOnHash(U32 h, const void *tableBase, tableType_t tableType)
Definition: lz4.c:773
int LZ4_compress_destSize(const char *src, char *dst, int *srcSizePtr, int targetDstSize)
Definition: lz4.c:1399
static const int dec64table[8]
Definition: lz4.c:418
LZ4_FORCE_INLINE unsigned read_variable_length(const BYTE **ip, const BYTE *lencheck, int loop_check, int initial_check, variable_length_error *error)
Definition: lz4.c:1708
int LZ4_sizeofState(void)
Definition: lz4.c:675
signed int S32
Definition: lz4.c:289
LZ4_FORCE_O2 int LZ4_decompress_fast_continue(LZ4_streamDecode_t *LZ4_streamDecode, const char *source, char *dest, int originalSize)
Definition: lz4.c:2362
int LZ4_compress(const char *src, char *dest, int srcSize)
Definition: lz4.c:2436
#define LZ4_memcpy(dst, src, size)
Definition: lz4.c:322
void LZ4_resetStream(LZ4_stream_t *LZ4_stream)
Definition: lz4.c:1455
#define LZ4_DISTANCE_ABSOLUTE_MAX
Definition: lz4.c:227
void LZ4_resetStream_fast(LZ4_stream_t *ctx)
Definition: lz4.c:1461
static U16 LZ4_readLE16(const void *memPtr)
Definition: lz4.c:385
int LZ4_freeStream(LZ4_stream_t *LZ4_stream)
Definition: lz4.c:1465
#define MFLIMIT
Definition: lz4.c:218
LZ4_FORCE_INLINE void LZ4_putPosition(const BYTE *p, void *tableBase, tableType_t tableType, const BYTE *srcBase)
Definition: lz4.c:761
int LZ4_compress_fast(const char *source, char *dest, int inputSize, int maxOutputSize, int acceleration)
Definition: lz4.c:1354
static LZ4_FORCE_O2 int LZ4_decompress_safe_withSmallPrefix(const char *source, char *dest, int compressedSize, int maxOutputSize, size_t prefixSize)
Definition: lz4.c:2214
int LZ4_uncompress(const char *source, char *dest, int outputSize)
Definition: lz4.c:2463
int LZ4_compress_continue(LZ4_stream_t *LZ4_stream, const char *source, char *dest, int inputSize)
Definition: lz4.c:2452
#define LASTLITERALS
Definition: lz4.c:217
int LZ4_uncompress_unknownOutputSize(const char *source, char *dest, int isize, int maxOutputSize)
Definition: lz4.c:2467
#define DEBUGLOG(l,...)
Definition: lz4.c:261
LZ4_FORCE_INLINE void LZ4_prepareTable(LZ4_stream_t_internal *const cctx, const int inputSize, const tableType_t tableType)
Definition: lz4.c:806
int LZ4_decompress_safe_forceExtDict(const char *source, char *dest, int compressedSize, int maxOutputSize, const void *dictStart, size_t dictSize)
Definition: lz4.c:2223
LZ4_FORCE_INLINE int LZ4_decompress_safe_doubleDict(const char *source, char *dest, int compressedSize, int maxOutputSize, size_t prefixSize, const void *dictStart, size_t dictSize)
Definition: lz4.c:2246
static U32 LZ4_read32(const void *memPtr)
Definition: lz4.c:362
char * LZ4_slideInputBuffer(void *state)
Definition: lz4.c:2489
int LZ4_sizeofStreamState(void)
Definition: lz4.c:2474
LZ4_FORCE_INLINE unsigned LZ4_count(const BYTE *pIn, const BYTE *pMatch, const BYTE *pInLimit)
Definition: lz4.c:603
int LZ4_setStreamDecode(LZ4_streamDecode_t *LZ4_streamDecode, const char *dictionary, int dictSize)
Definition: lz4.c:2285
#define ALLOC(s)
Definition: lz4.c:202
#define FREEMEM(p)
Definition: lz4.c:204
static int LZ4_compress_destSize_extState(LZ4_stream_t *state, const char *src, char *dst, int *srcSizePtr, int targetDstSize)
Definition: lz4.c:1382
static unsigned LZ4_NbCommonBytes(reg_t val)
Definition: lz4.c:513
int LZ4_versionNumber(void)
Definition: lz4.c:672
variable_length_error
Definition: lz4.c:1706
@ loop_error
Definition: lz4.c:1706
@ initial_error
Definition: lz4.c:1706
@ ok
Definition: lz4.c:1706
static reg_t LZ4_read_ARCH(const void *memPtr)
Definition: lz4.c:367
dictIssue_directive
Definition: lz4.c:666
@ noDictIssue
Definition: lz4.c:666
@ dictSmall
Definition: lz4.c:666
int LZ4_compress_withState(void *state, const char *src, char *dst, int srcSize)
Definition: lz4.c:2444
LZ4_FORCE_O2 int LZ4_decompress_safe_continue(LZ4_streamDecode_t *LZ4_streamDecode, const char *source, char *dest, int compressedSize, int maxOutputSize)
Definition: lz4.c:2322
#define likely(expr)
Definition: lz4.c:174
int LZ4_compress_forceExtDict(LZ4_stream_t *LZ4_dict, const char *source, char *dest, int srcSize)
Definition: lz4.c:1641
static const U32 LZ4_skipTrigger
Definition: lz4.c:634
LZ4_FORCE_INLINE void LZ4_putIndexOnHash(U32 idx, U32 h, void *tableBase, tableType_t const tableType)
Definition: lz4.c:736
#define FASTLOOP_SAFE_DISTANCE
Definition: lz4.c:220
LZ4_FORCE_INLINE void LZ4_clearHash(U32 h, void *tableBase, tableType_t const tableType)
Definition: lz4.c:724
#define ML_BITS
Definition: lz4.c:232
static unsigned LZ4_isLittleEndian(void)
Definition: lz4.c:325
#define ML_MASK
Definition: lz4.c:233
#define ALLOC_AND_ZERO(s)
Definition: lz4.c:203
#define MEM_INIT(p, v, s)
Definition: lz4.c:208
#define HASH_UNIT
Definition: lz4.c:1474
LZ4_FORCE_INLINE int LZ4_compress_generic_validated(LZ4_stream_t_internal *const cctx, const char *const source, char *const dest, const int inputSize, int *inputConsumed, const int maxOutputSize, const limitedOutput_directive outputDirective, const tableType_t tableType, const dict_directive dictDirective, const dictIssue_directive dictIssue, const int acceleration)
Definition: lz4.c:851
dict_directive
Definition: lz4.c:665
@ noDict
Definition: lz4.c:665
@ withPrefix64k
Definition: lz4.c:665
@ usingExtDict
Definition: lz4.c:665
@ usingDictCtx
Definition: lz4.c:665
LZ4_FORCE_O2 int LZ4_decompress_fast(const char *source, char *dest, int originalSize)
Definition: lz4.c:2188
size_t uptrval
Definition: lz4.c:291
limitedOutput_directive
Definition: lz4.c:300
@ limitedOutput
Definition: lz4.c:302
@ fillOutput
Definition: lz4.c:303
@ notLimited
Definition: lz4.c:301
int LZ4_compress_limitedOutput_withState(void *state, const char *src, char *dst, int srcSize, int dstSize)
Definition: lz4.c:2440
LZ4_FORCE_O2 int LZ4_decompress_safe_partial(const char *src, char *dst, int compressedSize, int targetOutputSize, int dstCapacity)
Definition: lz4.c:2179
int LZ4_saveDict(LZ4_stream_t *LZ4_dict, char *safeBuffer, int dictSize)
Definition: lz4.c:1668
#define LZ4_FORCE_INLINE
Definition: lz4.c:140
static const BYTE * LZ4_getPositionOnHash(U32 h, const void *tableBase, tableType_t tableType, const BYTE *srcBase)
Definition: lz4.c:789
static void LZ4_write16(void *memPtr, U16 value)
Definition: lz4.c:372
int LZ4_compress_fast_continue(LZ4_stream_t *LZ4_stream, const char *source, char *dest, int inputSize, int maxOutputSize, int acceleration)
Definition: lz4.c:1565
LZ4_FORCE_INLINE U32 LZ4_hashPosition(const void *const p, tableType_t const tableType)
Definition: lz4.c:718
static int LZ4_isAligned(const void *ptr, size_t alignment)
Definition: lz4.c:264
unsigned int U32
Definition: lz4.c:288
#define LZ4_ACCELERATION_MAX
Definition: lz4.c:57
LZ4_FORCE_INLINE int LZ4_decompress_generic(const char *const src, char *const dst, int srcSize, int outputSize, endCondition_directive endOnInput, earlyEnd_directive partialDecoding, dict_directive dict, const BYTE *const lowPrefix, const BYTE *const dictStart, const size_t dictSize)
Definition: lz4.c:1738
static const int LZ4_64Klimit
Definition: lz4.c:633
int LZ4_resetStreamState(void *state, char *inputBuffer)
Definition: lz4.c:2476
#define MATCH_SAFEGUARD_DISTANCE
Definition: lz4.c:219
#define assert(condition)
Definition: lz4.c:245
#define unlikely(expr)
Definition: lz4.c:177
int LZ4_compress_fast_extState_fastReset(void *state, const char *src, char *dst, int srcSize, int dstCapacity, int acceleration)
Definition: lz4.c:1316
static LZ4_FORCE_O2 int LZ4_decompress_fast_extDict(const char *source, char *dest, int originalSize, const void *dictStart, size_t dictSize)
Definition: lz4.c:2233
LZ4_FORCE_INLINE U32 LZ4_hash5(U64 sequence, tableType_t const tableType)
Definition: lz4.c:706
static size_t LZ4_stream_t_alignment(void)
Definition: lz4.c:1433
static U16 LZ4_read16(const void *memPtr)
Definition: lz4.c:357
#define WILDCOPYLENGTH
Definition: lz4.c:216
#define LZ4_ACCELERATION_DEFAULT
Definition: lz4.c:51
unsigned short U16
Definition: lz4.c:287
void LZ4_attach_dictionary(LZ4_stream_t *workingStream, const LZ4_stream_t *dictionaryStream)
Definition: lz4.c:1517
LZ4_FORCE_O2 int LZ4_decompress_safe(const char *source, char *dest, int compressedSize, int maxDecompressedSize)
Definition: lz4.c:2171
tableType_t
Definition: lz4.c:640
@ clearedTable
Definition: lz4.c:640
@ byU16
Definition: lz4.c:640
@ byPtr
Definition: lz4.c:640
@ byU32
Definition: lz4.c:640
static void LZ4_renormDictT(LZ4_stream_t_internal *LZ4_dict, int nextSize)
Definition: lz4.c:1545
#define RUN_MASK
Definition: lz4.c:235
#define LZ4_FORCE_O2
Definition: lz4.c:164
int LZ4_decompress_fast_withPrefix64k(const char *source, char *dest, int originalSize)
Definition: lz4.c:2206
int LZ4_freeStreamDecode(LZ4_streamDecode_t *LZ4_stream)
Definition: lz4.c:2272
const char * source
Definition: lz4.h:699
char int srcSize
Definition: lz4.h:697
char int compressedSize
Definition: lz4.h:724
char int int maxOutputSize
Definition: lz4.h:698
#define LZ4_STREAMSIZE
Definition: lz4.h:623
#define LZ4_HASHTABLESIZE
Definition: lz4.h:579
#define LZ4_COMPRESSBOUND(isize)
Definition: lz4.h:171
#define LZ4_MEMORY_USAGE
Definition: lz4.h:127
#define LZ4_VERSION_STRING
Definition: lz4.h:110
char * inputBuffer
Definition: lz4.h:720
#define LZ4_HASH_SIZE_U32
Definition: lz4.h:580
#define LZ4_DECODER_RING_BUFFER_SIZE(maxBlockSize)
Definition: lz4.h:382
#define LZ4_MAX_INPUT_SIZE
Definition: lz4.h:170
const char char int inputSize
Definition: lz4.h:699
char * dst
Definition: lz4.h:724
#define LZ4_STREAMDECODESIZE
Definition: lz4.h:656
#define LZ4_VERSION_NUMBER
Definition: lz4.h:105
#define LZ4_HASHLOG
Definition: lz4.h:578
char int originalSize
Definition: lz4.h:725
char * dest
Definition: lz4.h:697
char int outputSize
Definition: lz4.h:705
char int isize
Definition: lz4.h:706
#define anchor
#define ip
static uint32_t const uint8_t uint32_t uint32_t limit
Definition: memcmplen.h:45
int n
Definition: mipsasm.c:19
int idx
Definition: setup.py:197
static RzSocket * s
Definition: rtr.c:28
static int
Definition: sfsocketcall.h:114
unsigned short uint16_t
Definition: sftypes.h:30
int int32_t
Definition: sftypes.h:33
int size_t
Definition: sftypes.h:40
unsigned int uint32_t
Definition: sftypes.h:29
unsigned long uint64_t
Definition: sftypes.h:28
unsigned char uint8_t
Definition: sftypes.h:31
int ptrdiff_t
Definition: sftypes.h:68
#define d(i)
Definition: sha256.c:44
#define c(i)
Definition: sha256.c:43
#define h(i)
Definition: sha256.c:48
_W64 unsigned int uintptr_t
const LZ4_byte * prefixEnd
Definition: lz4.h:608
const LZ4_byte * externalDict
Definition: lz4.h:606
const LZ4_stream_t_internal * dictCtx
Definition: lz4.h:601
LZ4_u32 tableType
Definition: lz4.h:599
LZ4_u32 currentOffset
Definition: lz4.h:598
const LZ4_byte * dictionary
Definition: lz4.h:600
LZ4_u32 dictSize
Definition: lz4.h:602
LZ4_u32 hashTable[LZ4_HASH_SIZE_U32]
Definition: lz4.h:597
Definition: buffer.h:15
Definition: engine.c:71
Definition: dis.h:43
LZ4_streamDecode_t_internal internal_donotuse
Definition: lz4.h:659
LZ4_stream_t_internal internal_donotuse
Definition: lz4.h:627
Definition: dis.c:32
void error(const char *msg)
Definition: untgz.c:593
static st64 delta
Definition: vmenus.c:2425