summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Makefile2
-rw-r--r--lib/README.md37
-rw-r--r--lib/lz4.c557
-rw-r--r--lib/lz4.h150
-rw-r--r--lib/lz4frame.c147
-rw-r--r--lib/lz4frame.h36
-rw-r--r--lib/lz4hc.c307
-rw-r--r--lib/lz4hc.h55
8 files changed, 757 insertions, 534 deletions
diff --git a/lib/Makefile b/lib/Makefile
index 8f21d3d..c12949b 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -28,7 +28,7 @@
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# You can contact the author at :
-# - LZ4 source repository : https://github.com/Cyan4973/lz4
+# - LZ4 source repository : https://github.com/lz4/lz4
# - LZ4 forum froup : https://groups.google.com/forum/#!forum/lz4c
# ################################################################
diff --git a/lib/README.md b/lib/README.md
index cba2c34..e2af868 100644
--- a/lib/README.md
+++ b/lib/README.md
@@ -35,21 +35,22 @@ So it's necessary to include all `*.c` and `*.h` files present in `/lib`.
Definitions which are not guaranteed to remain stable in future versions,
are protected behind macros, such as `LZ4_STATIC_LINKING_ONLY`.
-As the name implies, these definitions can only be invoked
+As the name strongly implies, these definitions should only be invoked
in the context of static linking ***only***.
Otherwise, dependent application may fail on API or ABI break in the future.
-The associated symbols are also not present in dynamic library by default.
+The associated symbols are also not exposed by the dynamic library by default.
Should they be nonetheless needed, it's possible to force their publication
-by using build macro `LZ4_PUBLISH_STATIC_FUNCTIONS`.
+by using build macros `LZ4_PUBLISH_STATIC_FUNCTIONS`
+and `LZ4F_PUBLISH_STATIC_FUNCTIONS`.
#### Build macros
-The following build macro can be selected at compilation time :
+The following build macro can be selected to adjust source code behavior at compilation time :
-- `LZ4_FAST_DEC_LOOP` : this triggers the optimized decompression loop.
- This loops works great on x86/x64 cpus, and is automatically enabled on this platform.
- It's possible to enable or disable it manually, by passing `LZ4_FAST_DEC_LOOP=1` or `0` to the preprocessor.
+- `LZ4_FAST_DEC_LOOP` : this triggers a speed optimized decompression loop, more powerful on modern cpus.
+ This loop works great on `x86`, `x64` and `aarch64` cpus, and is automatically enabled for them.
+ It's also possible to enable or disable it manually, by passing `LZ4_FAST_DEC_LOOP=1` or `0` to the preprocessor.
For example, with `gcc` : `-DLZ4_FAST_DEC_LOOP=1`,
and with `make` : `CPPFLAGS+=-DLZ4_FAST_DEC_LOOP=1 make lz4`.
@@ -65,8 +66,24 @@ The following build macro can be selected at compilation time :
Should this be a problem, it's generally possible to make the compiler ignore these warnings,
for example with `-Wno-deprecated-declarations` on `gcc`,
or `_CRT_SECURE_NO_WARNINGS` for Visual Studio.
- Another method is to define `LZ4_DISABLE_DEPRECATE_WARNINGS`
- before including the LZ4 header files.
+ This build macro offers another project-specific method
+ by defining `LZ4_DISABLE_DEPRECATE_WARNINGS` before including the LZ4 header files.
+
+- `LZ4_USER_MEMORY_FUNCTIONS` : replace calls to <stdlib>'s `malloc`, `calloc` and `free`
+ by user-defined functions, which must be called `LZ4_malloc()`, `LZ4_calloc()` and `LZ4_free()`.
+ User functions must be available at link time.
+
+- `LZ4_FORCE_SW_BITCOUNT` : by default, the compression algorithm tries to determine lengths
+ by using bitcount instructions, generally implemented as fast single instructions in many cpus.
+ In case the target cpus doesn't support it, or compiler intrinsic doesn't work, or feature bad performance,
+ it's possible to use an optimized software path instead.
+ This is achieved by setting this build macros .
+ In most cases, it's not expected to be necessary,
+ but it can be legitimately considered for less common platforms.
+
+- `LZ4_ALIGN_TEST` : alignment test ensures that the memory area
+ passed as argument to become a compression state is suitably aligned.
+ This test can be disabled if it proves flaky, by setting this value to 0.
#### Amalgamation
@@ -102,7 +119,7 @@ The compiled executable will require LZ4 DLL which is available at `dll\liblz4.d
#### Miscellaneous
-Other files present in the directory are not source code. There are :
+Other files present in the directory are not source code. They are :
- `LICENSE` : contains the BSD license text
- `Makefile` : `make` script to compile and install lz4 library (static and dynamic)
diff --git a/lib/lz4.c b/lib/lz4.c
index 9808d70..9f5e9bf 100644
--- a/lib/lz4.c
+++ b/lib/lz4.c
@@ -45,10 +45,16 @@
#endif
/*
- * ACCELERATION_DEFAULT :
+ * LZ4_ACCELERATION_DEFAULT :
* Select "acceleration" for LZ4_compress_fast() when parameter value <= 0
*/
-#define ACCELERATION_DEFAULT 1
+#define LZ4_ACCELERATION_DEFAULT 1
+/*
+ * LZ4_ACCELERATION_MAX :
+ * Any "acceleration" value higher than this threshold
+ * get treated as LZ4_ACCELERATION_MAX instead (fix #876)
+ */
+#define LZ4_ACCELERATION_MAX 65537
/*-************************************
@@ -82,6 +88,7 @@
* Define this parameter if your target system or compiler does not support hardware bit count
*/
#if defined(_MSC_VER) && defined(_WIN32_WCE) /* Visual Studio for WinCE doesn't support Hardware bit count */
+# undef LZ4_FORCE_SW_BITCOUNT /* avoid double def */
# define LZ4_FORCE_SW_BITCOUNT
#endif
@@ -114,10 +121,9 @@
/*-************************************
* Compiler Options
**************************************/
-#ifdef _MSC_VER /* Visual Studio */
-# include <intrin.h>
-# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
-# pragma warning(disable : 4293) /* disable: C4293: too large shift (32-bits) */
+#if defined(_MSC_VER) && (_MSC_VER >= 1400) /* Visual Studio 2005+ */
+# include <intrin.h> /* only present in VS2005+ */
+# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
#endif /* _MSC_VER */
#ifndef LZ4_FORCE_INLINE
@@ -136,7 +142,7 @@
# endif /* _MSC_VER */
#endif /* LZ4_FORCE_INLINE */
-/* LZ4_FORCE_O2_GCC_PPC64LE and LZ4_FORCE_O2_INLINE_GCC_PPC64LE
+/* LZ4_FORCE_O2 and LZ4_FORCE_INLINE
* gcc on ppc64le generates an unrolled SIMDized loop for LZ4_wildCopy8,
* together with a simple 8-byte copy loop as a fall-back path.
* However, this optimization hurts the decompression speed by >30%,
@@ -151,11 +157,11 @@
* of LZ4_wildCopy8 does not affect the compression speed.
*/
#if defined(__PPC64__) && defined(__LITTLE_ENDIAN__) && defined(__GNUC__) && !defined(__clang__)
-# define LZ4_FORCE_O2_GCC_PPC64LE __attribute__((optimize("O2")))
-# define LZ4_FORCE_O2_INLINE_GCC_PPC64LE __attribute__((optimize("O2"))) LZ4_FORCE_INLINE
+# define LZ4_FORCE_O2 __attribute__((optimize("O2")))
+# undef LZ4_FORCE_INLINE
+# define LZ4_FORCE_INLINE static __inline __attribute__((optimize("O2"),always_inline))
#else
-# define LZ4_FORCE_O2_GCC_PPC64LE
-# define LZ4_FORCE_O2_INLINE_GCC_PPC64LE static
+# define LZ4_FORCE_O2
#endif
#if (defined(__GNUC__) && (__GNUC__ >= 3)) || (defined(__INTEL_COMPILER) && (__INTEL_COMPILER >= 800)) || defined(__clang__)
@@ -171,14 +177,33 @@
#define unlikely(expr) expect((expr) != 0, 0)
#endif
+/* Should the alignment test prove unreliable, for some reason,
+ * it can be disabled by setting LZ4_ALIGN_TEST to 0 */
+#ifndef LZ4_ALIGN_TEST /* can be externally provided */
+# define LZ4_ALIGN_TEST 1
+#endif
+
/*-************************************
* Memory routines
**************************************/
-#include <stdlib.h> /* malloc, calloc, free */
-#define ALLOC(s) malloc(s)
-#define ALLOC_AND_ZERO(s) calloc(1,s)
-#define FREEMEM(p) free(p)
+#ifdef LZ4_USER_MEMORY_FUNCTIONS
+/* memory management functions can be customized by user project.
+ * Below functions must exist somewhere in the Project
+ * and be available at link time */
+void* LZ4_malloc(size_t s);
+void* LZ4_calloc(size_t n, size_t s);
+void LZ4_free(void* p);
+# define ALLOC(s) LZ4_malloc(s)
+# define ALLOC_AND_ZERO(s) LZ4_calloc(1,s)
+# define FREEMEM(p) LZ4_free(p)
+#else
+# include <stdlib.h> /* malloc, calloc, free */
+# define ALLOC(s) malloc(s)
+# define ALLOC_AND_ZERO(s) calloc(1,s)
+# define FREEMEM(p) free(p)
+#endif
+
#include <string.h> /* memset, memcpy */
#define MEM_INIT(p,v,s) memset((p),(v),(s))
@@ -225,21 +250,27 @@ static const int LZ4_minLength = (MFLIMIT+1);
#if defined(LZ4_DEBUG) && (LZ4_DEBUG>=2)
# include <stdio.h>
-static int g_debuglog_enable = 1;
-# define DEBUGLOG(l, ...) { \
- if ((g_debuglog_enable) && (l<=LZ4_DEBUG)) { \
- fprintf(stderr, __FILE__ ": "); \
- fprintf(stderr, __VA_ARGS__); \
- fprintf(stderr, " \n"); \
- } }
+ static int g_debuglog_enable = 1;
+# define DEBUGLOG(l, ...) { \
+ if ((g_debuglog_enable) && (l<=LZ4_DEBUG)) { \
+ fprintf(stderr, __FILE__ ": "); \
+ fprintf(stderr, __VA_ARGS__); \
+ fprintf(stderr, " \n"); \
+ } }
#else
-# define DEBUGLOG(l, ...) {} /* disabled */
+# define DEBUGLOG(l, ...) {} /* disabled */
#endif
+static int LZ4_isAligned(const void* ptr, size_t alignment)
+{
+ return ((size_t)ptr & (alignment -1)) == 0;
+}
+
/*-************************************
* Types
**************************************/
+#include <limits.h>
#if defined(__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
# include <stdint.h>
typedef uint8_t BYTE;
@@ -249,6 +280,9 @@ static int g_debuglog_enable = 1;
typedef uint64_t U64;
typedef uintptr_t uptrval;
#else
+# if UINT_MAX != 4294967295UL
+# error "LZ4 code (when not C++ or C99) assumes that sizeof(int) == 4"
+# endif
typedef unsigned char BYTE;
typedef unsigned short U16;
typedef unsigned int U32;
@@ -273,6 +307,21 @@ typedef enum {
/*-************************************
* Reading and writing into memory
**************************************/
+
+/**
+ * LZ4 relies on memcpy with a constant size being inlined. In freestanding
+ * environments, the compiler can't assume the implementation of memcpy() is
+ * standard compliant, so it can't apply its specialized memcpy() inlining
+ * logic. When possible, use __builtin_memcpy() to tell the compiler to analyze
+ * memcpy() as if it were standard compliant, so it can inline it in freestanding
+ * environments. This is needed when decompressing the Linux Kernel, for example.
+ */
+#if defined(__GNUC__) && (__GNUC__ >= 4)
+#define LZ4_memcpy(dst, src, size) __builtin_memcpy(dst, src, size)
+#else
+#define LZ4_memcpy(dst, src, size) memcpy(dst, src, size)
+#endif
+
static unsigned LZ4_isLittleEndian(void)
{
const union { U32 u; BYTE c[4]; } one = { 1 }; /* don't use static : performance detrimental */
@@ -307,27 +356,27 @@ static void LZ4_write32(void* memPtr, U32 value) { ((unalign*)memPtr)->u32 = val
static U16 LZ4_read16(const void* memPtr)
{
- U16 val; memcpy(&val, memPtr, sizeof(val)); return val;
+ U16 val; LZ4_memcpy(&val, memPtr, sizeof(val)); return val;
}
static U32 LZ4_read32(const void* memPtr)
{
- U32 val; memcpy(&val, memPtr, sizeof(val)); return val;
+ U32 val; LZ4_memcpy(&val, memPtr, sizeof(val)); return val;
}
static reg_t LZ4_read_ARCH(const void* memPtr)
{
- reg_t val; memcpy(&val, memPtr, sizeof(val)); return val;
+ reg_t val; LZ4_memcpy(&val, memPtr, sizeof(val)); return val;
}
static void LZ4_write16(void* memPtr, U16 value)
{
- memcpy(memPtr, &value, sizeof(value));
+ LZ4_memcpy(memPtr, &value, sizeof(value));
}
static void LZ4_write32(void* memPtr, U32 value)
{
- memcpy(memPtr, &value, sizeof(value));
+ LZ4_memcpy(memPtr, &value, sizeof(value));
}
#endif /* LZ4_FORCE_MEMORY_ACCESS */
@@ -355,14 +404,14 @@ static void LZ4_writeLE16(void* memPtr, U16 value)
}
/* customized variant of memcpy, which can overwrite up to 8 bytes beyond dstEnd */
-LZ4_FORCE_O2_INLINE_GCC_PPC64LE
+LZ4_FORCE_INLINE
void LZ4_wildCopy8(void* dstPtr, const void* srcPtr, void* dstEnd)
{
BYTE* d = (BYTE*)dstPtr;
const BYTE* s = (const BYTE*)srcPtr;
BYTE* const e = (BYTE*)dstEnd;
- do { memcpy(d,s,8); d+=8; s+=8; } while (d<e);
+ do { LZ4_memcpy(d,s,8); d+=8; s+=8; } while (d<e);
}
static const unsigned inc32table[8] = {0, 1, 2, 1, 0, 4, 4, 4};
@@ -370,12 +419,12 @@ static const int dec64table[8] = {0, 0, 0, -1, -4, 1, 2, 3};
#ifndef LZ4_FAST_DEC_LOOP
-# if defined(__i386__) || defined(__x86_64__)
+# if defined __i386__ || defined _M_IX86 || defined __x86_64__ || defined _M_X64
# define LZ4_FAST_DEC_LOOP 1
# elif defined(__aarch64__) && !defined(__clang__)
/* On aarch64, we disable this optimization for clang because on certain
- * mobile chipsets and clang, it reduces performance. For more information
- * refer to https://github.com/lz4/lz4/pull/707. */
+ * mobile chipsets, performance is reduced with clang. For information
+ * refer to https://github.com/lz4/lz4/pull/707 */
# define LZ4_FAST_DEC_LOOP 1
# else
# define LZ4_FAST_DEC_LOOP 0
@@ -384,20 +433,22 @@ static const int dec64table[8] = {0, 0, 0, -1, -4, 1, 2, 3};
#if LZ4_FAST_DEC_LOOP
-LZ4_FORCE_O2_INLINE_GCC_PPC64LE void
+LZ4_FORCE_INLINE void
LZ4_memcpy_using_offset_base(BYTE* dstPtr, const BYTE* srcPtr, BYTE* dstEnd, const size_t offset)
{
+ assert(srcPtr + offset == dstPtr);
if (offset < 8) {
+ LZ4_write32(dstPtr, 0); /* silence an msan warning when offset==0 */
dstPtr[0] = srcPtr[0];
dstPtr[1] = srcPtr[1];
dstPtr[2] = srcPtr[2];
dstPtr[3] = srcPtr[3];
srcPtr += inc32table[offset];
- memcpy(dstPtr+4, srcPtr, 4);
+ LZ4_memcpy(dstPtr+4, srcPtr, 4);
srcPtr -= dec64table[offset];
dstPtr += 8;
} else {
- memcpy(dstPtr, srcPtr, 8);
+ LZ4_memcpy(dstPtr, srcPtr, 8);
dstPtr += 8;
srcPtr += 8;
}
@@ -408,49 +459,48 @@ LZ4_memcpy_using_offset_base(BYTE* dstPtr, const BYTE* srcPtr, BYTE* dstEnd, con
/* customized variant of memcpy, which can overwrite up to 32 bytes beyond dstEnd
* this version copies two times 16 bytes (instead of one time 32 bytes)
* because it must be compatible with offsets >= 16. */
-LZ4_FORCE_O2_INLINE_GCC_PPC64LE void
+LZ4_FORCE_INLINE void
LZ4_wildCopy32(void* dstPtr, const void* srcPtr, void* dstEnd)
{
BYTE* d = (BYTE*)dstPtr;
const BYTE* s = (const BYTE*)srcPtr;
BYTE* const e = (BYTE*)dstEnd;
- do { memcpy(d,s,16); memcpy(d+16,s+16,16); d+=32; s+=32; } while (d<e);
+ do { LZ4_memcpy(d,s,16); LZ4_memcpy(d+16,s+16,16); d+=32; s+=32; } while (d<e);
}
/* LZ4_memcpy_using_offset() presumes :
* - dstEnd >= dstPtr + MINMATCH
* - there is at least 8 bytes available to write after dstEnd */
-LZ4_FORCE_O2_INLINE_GCC_PPC64LE void
+LZ4_FORCE_INLINE void
LZ4_memcpy_using_offset(BYTE* dstPtr, const BYTE* srcPtr, BYTE* dstEnd, const size_t offset)
{
BYTE v[8];
assert(dstEnd >= dstPtr + MINMATCH);
- LZ4_write32(dstPtr, 0); /* silence an msan warning when offset==0 */
switch(offset) {
case 1:
- memset(v, *srcPtr, 8);
+ MEM_INIT(v, *srcPtr, 8);
break;
case 2:
- memcpy(v, srcPtr, 2);
- memcpy(&v[2], srcPtr, 2);
- memcpy(&v[4], &v[0], 4);
+ LZ4_memcpy(v, srcPtr, 2);
+ LZ4_memcpy(&v[2], srcPtr, 2);
+ LZ4_memcpy(&v[4], v, 4);
break;
case 4:
- memcpy(v, srcPtr, 4);
- memcpy(&v[4], srcPtr, 4);
+ LZ4_memcpy(v, srcPtr, 4);
+ LZ4_memcpy(&v[4], srcPtr, 4);
break;
default:
LZ4_memcpy_using_offset_base(dstPtr, srcPtr, dstEnd, offset);
return;
}
- memcpy(dstPtr, v, 8);
+ LZ4_memcpy(dstPtr, v, 8);
dstPtr += 8;
while (dstPtr < dstEnd) {
- memcpy(dstPtr, v, 8);
+ LZ4_memcpy(dstPtr, v, 8);
dstPtr += 8;
}
}
@@ -462,75 +512,92 @@ LZ4_memcpy_using_offset(BYTE* dstPtr, const BYTE* srcPtr, BYTE* dstEnd, const si
**************************************/
static unsigned LZ4_NbCommonBytes (reg_t val)
{
+ assert(val != 0);
if (LZ4_isLittleEndian()) {
- if (sizeof(val)==8) {
-# if defined(_MSC_VER) && defined(_WIN64) && !defined(LZ4_FORCE_SW_BITCOUNT)
+ if (sizeof(val) == 8) {
+# if defined(_MSC_VER) && (_MSC_VER >= 1800) && defined(_M_AMD64) && !defined(LZ4_FORCE_SW_BITCOUNT)
+ /* x64 CPUS without BMI support interpret `TZCNT` as `REP BSF` */
+ return (unsigned)_tzcnt_u64(val) >> 3;
+# elif defined(_MSC_VER) && defined(_WIN64) && !defined(LZ4_FORCE_SW_BITCOUNT)
unsigned long r = 0;
- _BitScanForward64( &r, (U64)val );
- return (int)(r>>3);
-# elif (defined(__clang__) || (defined(__GNUC__) && (__GNUC__>=3))) && !defined(LZ4_FORCE_SW_BITCOUNT)
+ _BitScanForward64(&r, (U64)val);
+ return (unsigned)r >> 3;
+# elif (defined(__clang__) || (defined(__GNUC__) && ((__GNUC__ > 3) || \
+ ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 4))))) && \
+ !defined(LZ4_FORCE_SW_BITCOUNT)
return (unsigned)__builtin_ctzll((U64)val) >> 3;
# else
- static const int DeBruijnBytePos[64] = { 0, 0, 0, 0, 0, 1, 1, 2,
- 0, 3, 1, 3, 1, 4, 2, 7,
- 0, 2, 3, 6, 1, 5, 3, 5,
- 1, 3, 4, 4, 2, 5, 6, 7,
- 7, 0, 1, 2, 3, 3, 4, 6,
- 2, 6, 5, 5, 3, 4, 5, 6,
- 7, 1, 2, 4, 6, 4, 4, 5,
- 7, 2, 6, 5, 7, 6, 7, 7 };
- return DeBruijnBytePos[((U64)((val & -(long long)val) * 0x0218A392CDABBD3FULL)) >> 58];
+ const U64 m = 0x0101010101010101ULL;
+ val ^= val - 1;
+ return (unsigned)(((U64)((val & (m - 1)) * m)) >> 56);
# endif
} else /* 32 bits */ {
-# if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)
+# if defined(_MSC_VER) && (_MSC_VER >= 1400) && !defined(LZ4_FORCE_SW_BITCOUNT)
unsigned long r;
- _BitScanForward( &r, (U32)val );
- return (int)(r>>3);
-# elif (defined(__clang__) || (defined(__GNUC__) && (__GNUC__>=3))) && !defined(LZ4_FORCE_SW_BITCOUNT)
+ _BitScanForward(&r, (U32)val);
+ return (unsigned)r >> 3;
+# elif (defined(__clang__) || (defined(__GNUC__) && ((__GNUC__ > 3) || \
+ ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 4))))) && \
+ !defined(__TINYC__) && !defined(LZ4_FORCE_SW_BITCOUNT)
return (unsigned)__builtin_ctz((U32)val) >> 3;
# else
- static const int DeBruijnBytePos[32] = { 0, 0, 3, 0, 3, 1, 3, 0,
- 3, 2, 2, 1, 3, 2, 0, 1,
- 3, 3, 1, 2, 2, 2, 2, 0,
- 3, 1, 2, 0, 1, 0, 1, 1 };
- return DeBruijnBytePos[((U32)((val & -(S32)val) * 0x077CB531U)) >> 27];
+ const U32 m = 0x01010101;
+ return (unsigned)((((val - 1) ^ val) & (m - 1)) * m) >> 24;
# endif
}
} else /* Big Endian CPU */ {
- if (sizeof(val)==8) { /* 64-bits */
-# if defined(_MSC_VER) && defined(_WIN64) && !defined(LZ4_FORCE_SW_BITCOUNT)
- unsigned long r = 0;
- _BitScanReverse64( &r, val );
- return (unsigned)(r>>3);
-# elif (defined(__clang__) || (defined(__GNUC__) && (__GNUC__>=3))) && !defined(LZ4_FORCE_SW_BITCOUNT)
+ if (sizeof(val)==8) {
+# if (defined(__clang__) || (defined(__GNUC__) && ((__GNUC__ > 3) || \
+ ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 4))))) && \
+ !defined(__TINYC__) && !defined(LZ4_FORCE_SW_BITCOUNT)
return (unsigned)__builtin_clzll((U64)val) >> 3;
# else
+#if 1
+ /* this method is probably faster,
+ * but adds a 128 bytes lookup table */
+ static const unsigned char ctz7_tab[128] = {
+ 7, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
+ 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
+ 5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
+ 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
+ 6, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
+ 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
+ 5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
+ 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
+ };
+ U64 const mask = 0x0101010101010101ULL;
+ U64 const t = (((val >> 8) - mask) | val) & mask;
+ return ctz7_tab[(t * 0x0080402010080402ULL) >> 57];
+#else
+ /* this method doesn't consume memory space like the previous one,
+ * but it contains several branches,
+ * that may end up slowing execution */
static const U32 by32 = sizeof(val)*4; /* 32 on 64 bits (goal), 16 on 32 bits.
- Just to avoid some static analyzer complaining about shift by 32 on 32-bits target.
- Note that this code path is never triggered in 32-bits mode. */
+ Just to avoid some static analyzer complaining about shift by 32 on 32-bits target.
+ Note that this code path is never triggered in 32-bits mode. */
unsigned r;
if (!(val>>by32)) { r=4; } else { r=0; val>>=by32; }
if (!(val>>16)) { r+=2; val>>=8; } else { val>>=24; }
r += (!val);
return r;
+#endif
# endif
} else /* 32 bits */ {
-# if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)
- unsigned long r = 0;
- _BitScanReverse( &r, (unsigned long)val );
- return (unsigned)(r>>3);
-# elif (defined(__clang__) || (defined(__GNUC__) && (__GNUC__>=3))) && !defined(LZ4_FORCE_SW_BITCOUNT)
+# if (defined(__clang__) || (defined(__GNUC__) && ((__GNUC__ > 3) || \
+ ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 4))))) && \
+ !defined(LZ4_FORCE_SW_BITCOUNT)
return (unsigned)__builtin_clz((U32)val) >> 3;
# else
- unsigned r;
- if (!(val>>16)) { r=2; val>>=8; } else { r=0; val>>=24; }
- r += (!val);
- return r;
+ val >>= 8;
+ val = ((((val + 0x00FFFF00) | 0x00FFFFFF) + val) |
+ (val + 0x00FF0000)) >> 24;
+ return (unsigned)val ^ 3;
# endif
}
}
}
+
#define STEPSIZE sizeof(reg_t)
LZ4_FORCE_INLINE
unsigned LZ4_count(const BYTE* pIn, const BYTE* pMatch, const BYTE* pInLimit)
@@ -605,7 +672,7 @@ typedef enum { noDictIssue = 0, dictSmall } dictIssue_directive;
int LZ4_versionNumber (void) { return LZ4_VERSION_NUMBER; }
const char* LZ4_versionString(void) { return LZ4_VERSION_STRING; }
int LZ4_compressBound(int isize) { return LZ4_COMPRESSBOUND(isize); }
-int LZ4_sizeofState() { return LZ4_STREAMSIZE; }
+int LZ4_sizeofState(void) { return LZ4_STREAMSIZE; }
/*-************************************
@@ -628,7 +695,7 @@ int LZ4_decompress_safe_forceExtDict(const char* source, char* dest,
/*-******************************
* Compression functions
********************************/
-static U32 LZ4_hash4(U32 sequence, tableType_t const tableType)
+LZ4_FORCE_INLINE U32 LZ4_hash4(U32 sequence, tableType_t const tableType)
{
if (tableType == byU16)
return ((sequence * 2654435761U) >> ((MINMATCH*8)-(LZ4_HASHLOG+1)));
@@ -636,7 +703,7 @@ static U32 LZ4_hash4(U32 sequence, tableType_t const tableType)
return ((sequence * 2654435761U) >> ((MINMATCH*8)-LZ4_HASHLOG));
}
-static U32 LZ4_hash5(U64 sequence, tableType_t const tableType)
+LZ4_FORCE_INLINE U32 LZ4_hash5(U64 sequence, tableType_t const tableType)
{
const U32 hashLog = (tableType == byU16) ? LZ4_HASHLOG+1 : LZ4_HASHLOG;
if (LZ4_isLittleEndian()) {
@@ -654,7 +721,7 @@ LZ4_FORCE_INLINE U32 LZ4_hashPosition(const void* const p, tableType_t const tab
return LZ4_hash4(LZ4_read32(p), tableType);
}
-static void LZ4_clearHash(U32 h, void* tableBase, tableType_t const tableType)
+LZ4_FORCE_INLINE void LZ4_clearHash(U32 h, void* tableBase, tableType_t const tableType)
{
switch (tableType)
{
@@ -666,7 +733,7 @@ static void LZ4_clearHash(U32 h, void* tableBase, tableType_t const tableType)
}
}
-static void LZ4_putIndexOnHash(U32 idx, U32 h, void* tableBase, tableType_t const tableType)
+LZ4_FORCE_INLINE void LZ4_putIndexOnHash(U32 idx, U32 h, void* tableBase, tableType_t const tableType)
{
switch (tableType)
{
@@ -678,7 +745,7 @@ static void LZ4_putIndexOnHash(U32 idx, U32 h, void* tableBase, tableType_t cons
}
}
-static void LZ4_putPositionOnHash(const BYTE* p, U32 h,
+LZ4_FORCE_INLINE void LZ4_putPositionOnHash(const BYTE* p, U32 h,
void* tableBase, tableType_t const tableType,
const BYTE* srcBase)
{
@@ -703,7 +770,7 @@ LZ4_FORCE_INLINE void LZ4_putPosition(const BYTE* p, void* tableBase, tableType_
* Assumption 1 : only valid if tableType == byU32 or byU16.
* Assumption 2 : h is presumed valid (within limits of hash table)
*/
-static U32 LZ4_getIndexOnHash(U32 h, const void* tableBase, tableType_t tableType)
+LZ4_FORCE_INLINE U32 LZ4_getIndexOnHash(U32 h, const void* tableBase, tableType_t tableType)
{
LZ4_STATIC_ASSERT(LZ4_MEMORY_USAGE > 2);
if (tableType == byU32) {
@@ -739,22 +806,13 @@ LZ4_FORCE_INLINE void
LZ4_prepareTable(LZ4_stream_t_internal* const cctx,
const int inputSize,
const tableType_t tableType) {
- /* If compression failed during the previous step, then the context
- * is marked as dirty, therefore, it has to be fully reset.
- */
- if (cctx->dirty) {
- DEBUGLOG(5, "LZ4_prepareTable: Full reset for %p", cctx);
- MEM_INIT(cctx, 0, sizeof(LZ4_stream_t_internal));
- return;
- }
-
/* If the table hasn't been used, it's guaranteed to be zeroed out, and is
* therefore safe to use no matter what mode we're in. Otherwise, we figure
* out if it's safe to leave as is or whether it needs to be reset.
*/
- if (cctx->tableType != clearedTable) {
+ if ((tableType_t)cctx->tableType != clearedTable) {
assert(inputSize >= 0);
- if (cctx->tableType != tableType
+ if ((tableType_t)cctx->tableType != tableType
|| ((tableType == byU16) && cctx->currentOffset + (unsigned)inputSize >= 0xFFFFU)
|| ((tableType == byU32) && cctx->currentOffset > 1 GB)
|| tableType == byPtr
@@ -763,7 +821,7 @@ LZ4_prepareTable(LZ4_stream_t_internal* const cctx,
DEBUGLOG(4, "LZ4_prepareTable: Resetting table in %p", cctx);
MEM_INIT(cctx->hashTable, 0, LZ4_HASHTABLESIZE);
cctx->currentOffset = 0;
- cctx->tableType = clearedTable;
+ cctx->tableType = (U32)clearedTable;
} else {
DEBUGLOG(4, "LZ4_prepareTable: Re-use hash table (no reset)");
}
@@ -785,8 +843,12 @@ LZ4_prepareTable(LZ4_stream_t_internal* const cctx,
}
/** LZ4_compress_generic() :
- inlined, to ensure branches are decided at compilation time */
-LZ4_FORCE_INLINE int LZ4_compress_generic(
+ * inlined, to ensure branches are decided at compilation time.
+ * Presumed already validated at this stage:
+ * - source != NULL
+ * - inputSize > 0
+ */
+LZ4_FORCE_INLINE int LZ4_compress_generic_validated(
LZ4_stream_t_internal* const cctx,
const char* const source,
char* const dest,
@@ -815,7 +877,7 @@ LZ4_FORCE_INLINE int LZ4_compress_generic(
int const maybe_extMem = (dictDirective == usingExtDict) || (dictDirective == usingDictCtx);
U32 const prefixIdxLimit = startIndex - dictSize; /* used when dictDirective == dictSmall */
- const BYTE* const dictEnd = dictionary + dictSize;
+ const BYTE* const dictEnd = dictionary ? dictionary + dictSize : dictionary;
const BYTE* anchor = (const BYTE*) source;
const BYTE* const iend = ip + inputSize;
const BYTE* const mflimitPlusOne = iend - MFLIMIT + 1;
@@ -823,7 +885,7 @@ LZ4_FORCE_INLINE int LZ4_compress_generic(
/* the dictCtx currentOffset is indexed on the start of the dictionary,
* while a dictionary in the current context precedes the currentOffset */
- const BYTE* dictBase = (dictDirective == usingDictCtx) ?
+ const BYTE* dictBase = !dictionary ? NULL : (dictDirective == usingDictCtx) ?
dictionary + dictSize - dictCtx->currentOffset :
dictionary + dictSize - startIndex;
@@ -833,11 +895,11 @@ LZ4_FORCE_INLINE int LZ4_compress_generic(
U32 offset = 0;
U32 forwardH;
- DEBUGLOG(5, "LZ4_compress_generic: srcSize=%i, tableType=%u", inputSize, tableType);
+ DEBUGLOG(5, "LZ4_compress_generic_validated: srcSize=%i, tableType=%u", inputSize, tableType);
+ assert(ip != NULL);
/* If init conditions are not met, we don't have to mark stream
* as having dirty context, since no action was taken yet */
if (outputDirective == fillOutput && maxOutputSize < 1) { return 0; } /* Impossible to store anything */
- if ((U32)inputSize > (U32)LZ4_MAX_INPUT_SIZE) { return 0; } /* Unsupported inputSize, too large (or negative) */
if ((tableType == byU16) && (inputSize>=LZ4_64Klimit)) { return 0; } /* Size too large (not within 64K limit) */
if (tableType==byPtr) assert(dictDirective==noDict); /* only supported use case with byPtr */
assert(acceleration >= 1);
@@ -854,7 +916,7 @@ LZ4_FORCE_INLINE int LZ4_compress_generic(
cctx->dictSize += (U32)inputSize;
}
cctx->currentOffset += (U32)inputSize;
- cctx->tableType = (U16)tableType;
+ cctx->tableType = (U32)tableType;
if (inputSize<LZ4_minLength) goto _last_literals; /* Input too small, no compression (all literals) */
@@ -1147,13 +1209,14 @@ _last_literals:
if (outputDirective == fillOutput) {
/* adapt lastRun to fill 'dst' */
assert(olimit >= op);
- lastRun = (size_t)(olimit-op) - 1;
- lastRun -= (lastRun+240)/255;
+ lastRun = (size_t)(olimit-op) - 1/*token*/;
+ lastRun -= (lastRun + 256 - RUN_MASK) / 256; /*additional length tokens*/
} else {
assert(outputDirective == limitedOutput);
return 0; /* cannot compress within `dst` budget. Stored indexes in hash table are nonetheless fine */
}
}
+ DEBUGLOG(6, "Final literal run : %i literals", (int)lastRun);
if (lastRun >= RUN_MASK) {
size_t accumulator = lastRun - RUN_MASK;
*op++ = RUN_MASK << ML_BITS;
@@ -1162,7 +1225,7 @@ _last_literals:
} else {
*op++ = (BYTE)(lastRun<<ML_BITS);
}
- memcpy(op, anchor, lastRun);
+ LZ4_memcpy(op, anchor, lastRun);
ip = anchor + lastRun;
op += lastRun;
}
@@ -1170,18 +1233,60 @@ _last_literals:
if (outputDirective == fillOutput) {
*inputConsumed = (int) (((const char*)ip)-source);
}
- DEBUGLOG(5, "LZ4_compress_generic: compressed %i bytes into %i bytes", inputSize, (int)(((char*)op) - dest));
result = (int)(((char*)op) - dest);
assert(result > 0);
+ DEBUGLOG(5, "LZ4_compress_generic: compressed %i bytes into %i bytes", inputSize, result);
return result;
}
+/** LZ4_compress_generic() :
+ * inlined, to ensure branches are decided at compilation time;
+ * takes care of src == (NULL, 0)
+ * and forward the rest to LZ4_compress_generic_validated */
+LZ4_FORCE_INLINE int LZ4_compress_generic(
+ LZ4_stream_t_internal* const cctx,
+ const char* const src,
+ char* const dst,
+ const int srcSize,
+ int *inputConsumed, /* only written when outputDirective == fillOutput */
+ const int dstCapacity,
+ const limitedOutput_directive outputDirective,
+ const tableType_t tableType,
+ const dict_directive dictDirective,
+ const dictIssue_directive dictIssue,
+ const int acceleration)
+{
+ DEBUGLOG(5, "LZ4_compress_generic: srcSize=%i, dstCapacity=%i",
+ srcSize, dstCapacity);
+
+ if ((U32)srcSize > (U32)LZ4_MAX_INPUT_SIZE) { return 0; } /* Unsupported srcSize, too large (or negative) */
+ if (srcSize == 0) { /* src == NULL supported if srcSize == 0 */
+ if (outputDirective != notLimited && dstCapacity <= 0) return 0; /* no output, can't write anything */
+ DEBUGLOG(5, "Generating an empty block");
+ assert(outputDirective == notLimited || dstCapacity >= 1);
+ assert(dst != NULL);
+ dst[0] = 0;
+ if (outputDirective == fillOutput) {
+ assert (inputConsumed != NULL);
+ *inputConsumed = 0;
+ }
+ return 1;
+ }
+ assert(src != NULL);
+
+ return LZ4_compress_generic_validated(cctx, src, dst, srcSize,
+ inputConsumed, /* only written into if outputDirective == fillOutput */
+ dstCapacity, outputDirective,
+ tableType, dictDirective, dictIssue, acceleration);
+}
+
int LZ4_compress_fast_extState(void* state, const char* source, char* dest, int inputSize, int maxOutputSize, int acceleration)
{
LZ4_stream_t_internal* const ctx = & LZ4_initStream(state, sizeof(LZ4_stream_t)) -> internal_donotuse;
assert(ctx != NULL);
- if (acceleration < 1) acceleration = ACCELERATION_DEFAULT;
+ if (acceleration < 1) acceleration = LZ4_ACCELERATION_DEFAULT;
+ if (acceleration > LZ4_ACCELERATION_MAX) acceleration = LZ4_ACCELERATION_MAX;
if (maxOutputSize >= LZ4_compressBound(inputSize)) {
if (inputSize < LZ4_64Klimit) {
return LZ4_compress_generic(ctx, source, dest, inputSize, NULL, 0, notLimited, byU16, noDict, noDictIssue, acceleration);
@@ -1211,7 +1316,8 @@ int LZ4_compress_fast_extState(void* state, const char* source, char* dest, int
int LZ4_compress_fast_extState_fastReset(void* state, const char* src, char* dst, int srcSize, int dstCapacity, int acceleration)
{
LZ4_stream_t_internal* ctx = &((LZ4_stream_t*)state)->internal_donotuse;
- if (acceleration < 1) acceleration = ACCELERATION_DEFAULT;
+ if (acceleration < 1) acceleration = LZ4_ACCELERATION_DEFAULT;
+ if (acceleration > LZ4_ACCELERATION_MAX) acceleration = LZ4_ACCELERATION_MAX;
if (dstCapacity >= LZ4_compressBound(srcSize)) {
if (srcSize < LZ4_64Klimit) {
@@ -1270,22 +1376,6 @@ int LZ4_compress_default(const char* src, char* dst, int srcSize, int maxOutputS
}
-/* hidden debug function */
-/* strangely enough, gcc generates faster code when this function is uncommented, even if unused */
-int LZ4_compress_fast_force(const char* src, char* dst, int srcSize, int dstCapacity, int acceleration)
-{
- LZ4_stream_t ctx;
- LZ4_initStream(&ctx, sizeof(ctx));
-
- if (srcSize < LZ4_64Klimit) {
- return LZ4_compress_generic(&ctx.internal_donotuse, src, dst, srcSize, NULL, dstCapacity, limitedOutput, byU16, noDict, noDictIssue, acceleration);
- } else {
- tableType_t const addrMode = (sizeof(void*) > 4) ? byU32 : byPtr;
- return LZ4_compress_generic(&ctx.internal_donotuse, src, dst, srcSize, NULL, dstCapacity, limitedOutput, addrMode, noDict, noDictIssue, acceleration);
- }
-}
-
-
/* Note!: This function leaves the stream in an unclean/broken state!
* It is not safe to subsequently use the same state with a _fastReset() or
* _continue() call without resetting it. */
@@ -1340,27 +1430,23 @@ LZ4_stream_t* LZ4_createStream(void)
return lz4s;
}
-#ifndef _MSC_VER /* for some reason, Visual fails the aligment test on 32-bit x86 :
- it reports an aligment of 8-bytes,
- while actually aligning LZ4_stream_t on 4 bytes. */
static size_t LZ4_stream_t_alignment(void)
{
- struct { char c; LZ4_stream_t t; } t_a;
- return sizeof(t_a) - sizeof(t_a.t);
-}
+#if LZ4_ALIGN_TEST
+ typedef struct { char c; LZ4_stream_t t; } t_a;
+ return sizeof(t_a) - sizeof(LZ4_stream_t);
+#else
+ return 1; /* effectively disabled */
#endif
+}
LZ4_stream_t* LZ4_initStream (void* buffer, size_t size)
{
DEBUGLOG(5, "LZ4_initStream");
if (buffer == NULL) { return NULL; }
if (size < sizeof(LZ4_stream_t)) { return NULL; }
-#ifndef _MSC_VER /* for some reason, Visual fails the aligment test on 32-bit x86 :
- it reports an aligment of 8-bytes,
- while actually aligning LZ4_stream_t on 4 bytes. */
- if (((size_t)buffer) & (LZ4_stream_t_alignment() - 1)) { return NULL; } /* alignment check */
-#endif
- MEM_INIT(buffer, 0, sizeof(LZ4_stream_t));
+ if (!LZ4_isAligned(buffer, LZ4_stream_t_alignment())) return NULL;
+ MEM_INIT(buffer, 0, sizeof(LZ4_stream_t_internal));
return (LZ4_stream_t*)buffer;
}
@@ -1369,7 +1455,7 @@ LZ4_stream_t* LZ4_initStream (void* buffer, size_t size)
void LZ4_resetStream (LZ4_stream_t* LZ4_stream)
{
DEBUGLOG(5, "LZ4_resetStream (ctx:%p)", LZ4_stream);
- MEM_INIT(LZ4_stream, 0, sizeof(LZ4_stream_t));
+ MEM_INIT(LZ4_stream, 0, sizeof(LZ4_stream_t_internal));
}
void LZ4_resetStream_fast(LZ4_stream_t* ctx) {
@@ -1418,7 +1504,7 @@ int LZ4_loadDict (LZ4_stream_t* LZ4_dict, const char* dictionary, int dictSize)
base = dictEnd - dict->currentOffset;
dict->dictionary = p;
dict->dictSize = (U32)(dictEnd - p);
- dict->tableType = tableType;
+ dict->tableType = (U32)tableType;
while (p <= dictEnd-HASH_UNIT) {
LZ4_putPosition(p, dict->hashTable, tableType, base);
@@ -1436,12 +1522,6 @@ void LZ4_attach_dictionary(LZ4_stream_t* workingStream, const LZ4_stream_t* dict
workingStream, dictionaryStream,
dictCtx != NULL ? dictCtx->dictSize : 0);
- /* Calling LZ4_resetStream_fast() here makes sure that changes will not be
- * erased by subsequent calls to LZ4_resetStream_fast() in case stream was
- * marked as having dirty context, e.g. requiring full reset.
- */
- LZ4_resetStream_fast(workingStream);
-
if (dictCtx != NULL) {
/* If the current offset is zero, we will never look in the
* external dictionary context, since there is no value a table
@@ -1493,9 +1573,9 @@ int LZ4_compress_fast_continue (LZ4_stream_t* LZ4_stream,
DEBUGLOG(5, "LZ4_compress_fast_continue (inputSize=%i)", inputSize);
- if (streamPtr->dirty) { return 0; } /* Uninitialized structure detected */
LZ4_renormDictT(streamPtr, inputSize); /* avoid index overflow */
- if (acceleration < 1) acceleration = ACCELERATION_DEFAULT;
+ if (acceleration < 1) acceleration = LZ4_ACCELERATION_DEFAULT;
+ if (acceleration > LZ4_ACCELERATION_MAX) acceleration = LZ4_ACCELERATION_MAX;
/* invalidate tiny dictionaries */
if ( (streamPtr->dictSize-1 < 4-1) /* intentional underflow */
@@ -1538,7 +1618,7 @@ int LZ4_compress_fast_continue (LZ4_stream_t* LZ4_stream,
* cost to copy the dictionary's tables into the active context,
* so that the compression loop is only looking into one table.
*/
- memcpy(streamPtr, streamPtr->dictCtx, sizeof(LZ4_stream_t));
+ LZ4_memcpy(streamPtr, streamPtr->dictCtx, sizeof(*streamPtr));
result = LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, usingExtDict, noDictIssue, acceleration);
} else {
result = LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, usingDictCtx, noDictIssue, acceleration);
@@ -1593,7 +1673,9 @@ int LZ4_saveDict (LZ4_stream_t* LZ4_dict, char* safeBuffer, int dictSize)
if ((U32)dictSize > 64 KB) { dictSize = 64 KB; } /* useless to define a dictionary > 64 KB */
if ((U32)dictSize > dict->dictSize) { dictSize = (int)dict->dictSize; }
- memmove(safeBuffer, previousDictEnd - dictSize, dictSize);
+ if (safeBuffer == NULL) assert(dictSize == 0);
+ if (dictSize > 0)
+ memmove(safeBuffer, previousDictEnd - dictSize, dictSize);
dict->dictionary = (const BYTE*)safeBuffer;
dict->dictSize = (U32)dictSize;
@@ -1623,25 +1705,27 @@ typedef enum { decode_full_block = 0, partial_decode = 1 } earlyEnd_directive;
*/
typedef enum { loop_error = -2, initial_error = -1, ok = 0 } variable_length_error;
LZ4_FORCE_INLINE unsigned
-read_variable_length(const BYTE**ip, const BYTE* lencheck, int loop_check, int initial_check, variable_length_error* error)
-{
- unsigned length = 0;
- unsigned s;
- if (initial_check && unlikely((*ip) >= lencheck)) { /* overflow detection */
- *error = initial_error;
- return length;
- }
- do {
- s = **ip;
- (*ip)++;
- length += s;
- if (loop_check && unlikely((*ip) >= lencheck)) { /* overflow detection */
- *error = loop_error;
- return length;
+read_variable_length(const BYTE**ip, const BYTE* lencheck,
+ int loop_check, int initial_check,
+ variable_length_error* error)
+{
+ U32 length = 0;
+ U32 s;
+ if (initial_check && unlikely((*ip) >= lencheck)) { /* overflow detection */
+ *error = initial_error;
+ return length;
}
- } while (s==255);
+ do {
+ s = **ip;
+ (*ip)++;
+ length += s;
+ if (loop_check && unlikely((*ip) >= lencheck)) { /* overflow detection */
+ *error = loop_error;
+ return length;
+ }
+ } while (s==255);
- return length;
+ return length;
}
/*! LZ4_decompress_generic() :
@@ -1722,7 +1806,7 @@ LZ4_decompress_generic(
/* decode literal length */
if (length == RUN_MASK) {
variable_length_error error = ok;
- length += read_variable_length(&ip, iend-RUN_MASK, endOnInput, endOnInput, &error);
+ length += read_variable_length(&ip, iend-RUN_MASK, (int)endOnInput, (int)endOnInput, &error);
if (error == initial_error) { goto _output_error; }
if ((safeDecode) && unlikely((uptrval)(op)+length<(uptrval)(op))) { goto _output_error; } /* overflow detection */
if ((safeDecode) && unlikely((uptrval)(ip)+length<(uptrval)(ip))) { goto _output_error; } /* overflow detection */
@@ -1746,12 +1830,12 @@ LZ4_decompress_generic(
/* We don't need to check oend, since we check it once for each loop below */
if (ip > iend-(16 + 1/*max lit + offset + nextToken*/)) { goto safe_literal_copy; }
/* Literals can only be 14, but hope compilers optimize if we copy by a register size */
- memcpy(op, ip, 16);
+ LZ4_memcpy(op, ip, 16);
} else { /* LZ4_decompress_fast() */
/* LZ4_decompress_fast() cannot copy more than 8 bytes at a time :
* it doesn't know input length, and relies on end-of-block properties */
- memcpy(op, ip, 8);
- if (length > 8) { memcpy(op+8, ip+8, 8); }
+ LZ4_memcpy(op, ip, 8);
+ if (length > 8) { LZ4_memcpy(op+8, ip+8, 8); }
}
ip += length; op = cpy;
}
@@ -1765,10 +1849,10 @@ LZ4_decompress_generic(
length = token & ML_MASK;
if (length == ML_MASK) {
- variable_length_error error = ok;
- if ((checkOffset) && (unlikely(match + dictSize < lowPrefix))) { goto _output_error; } /* Error : offset outside buffers */
- length += read_variable_length(&ip, iend - LASTLITERALS + 1, endOnInput, 0, &error);
- if (error != ok) { goto _output_error; }
+ variable_length_error error = ok;
+ if ((checkOffset) && (unlikely(match + dictSize < lowPrefix))) { goto _output_error; } /* Error : offset outside buffers */
+ length += read_variable_length(&ip, iend - LASTLITERALS + 1, (int)endOnInput, 0, &error);
+ if (error != ok) { goto _output_error; }
if ((safeDecode) && unlikely((uptrval)(op)+length<(uptrval)op)) { goto _output_error; } /* overflow detection */
length += MINMATCH;
if (op + length >= oend - FASTLOOP_SAFE_DISTANCE) {
@@ -1787,19 +1871,20 @@ LZ4_decompress_generic(
assert(match <= op);
assert(op + 18 <= oend);
- memcpy(op, match, 8);
- memcpy(op+8, match+8, 8);
- memcpy(op+16, match+16, 2);
+ LZ4_memcpy(op, match, 8);
+ LZ4_memcpy(op+8, match+8, 8);
+ LZ4_memcpy(op+16, match+16, 2);
op += length;
continue;
} } }
- if ((checkOffset) && (unlikely(match + dictSize < lowPrefix))) { goto _output_error; } /* Error : offset outside buffers */
+ if (checkOffset && (unlikely(match + dictSize < lowPrefix))) { goto _output_error; } /* Error : offset outside buffers */
/* match starting within external dictionary */
if ((dict==usingExtDict) && (match < lowPrefix)) {
if (unlikely(op+length > oend-LASTLITERALS)) {
if (partialDecoding) {
- length = MIN(length, (size_t)(oend-op)); /* reach end of buffer */
+ DEBUGLOG(7, "partialDecoding: dictionary match, close to dstEnd");
+ length = MIN(length, (size_t)(oend-op));
} else {
goto _output_error; /* end-of-block condition violated */
} }
@@ -1812,14 +1897,14 @@ LZ4_decompress_generic(
/* match stretches into both external dictionary and current block */
size_t const copySize = (size_t)(lowPrefix - match);
size_t const restSize = length - copySize;
- memcpy(op, dictEnd - copySize, copySize);
+ LZ4_memcpy(op, dictEnd - copySize, copySize);
op += copySize;
if (restSize > (size_t)(op - lowPrefix)) { /* overlap copy */
BYTE* const endOfMatch = op + restSize;
const BYTE* copyFrom = lowPrefix;
while (op < endOfMatch) { *op++ = *copyFrom++; }
} else {
- memcpy(op, lowPrefix, restSize);
+ LZ4_memcpy(op, lowPrefix, restSize);
op += restSize;
} }
continue;
@@ -1860,7 +1945,7 @@ LZ4_decompress_generic(
/* strictly "less than" on input, to re-enter the loop with at least one byte */
&& likely((endOnInput ? ip < shortiend : 1) & (op <= shortoend)) ) {
/* Copy the literals */
- memcpy(op, ip, endOnInput ? 16 : 8);
+ LZ4_memcpy(op, ip, endOnInput ? 16 : 8);
op += length; ip += length;
/* The second stage: prepare for match copying, decode full info.
@@ -1875,9 +1960,9 @@ LZ4_decompress_generic(
&& (offset >= 8)
&& (dict==withPrefix64k || match >= lowPrefix) ) {
/* Copy the match. */
- memcpy(op + 0, match + 0, 8);
- memcpy(op + 8, match + 8, 8);
- memcpy(op +16, match +16, 2);
+ LZ4_memcpy(op + 0, match + 0, 8);
+ LZ4_memcpy(op + 8, match + 8, 8);
+ LZ4_memcpy(op +16, match +16, 2);
op += length + MINMATCH;
/* Both stages worked, load the next token. */
continue;
@@ -1891,7 +1976,7 @@ LZ4_decompress_generic(
/* decode literal length */
if (length == RUN_MASK) {
variable_length_error error = ok;
- length += read_variable_length(&ip, iend-RUN_MASK, endOnInput, endOnInput, &error);
+ length += read_variable_length(&ip, iend-RUN_MASK, (int)endOnInput, (int)endOnInput, &error);
if (error == initial_error) { goto _output_error; }
if ((safeDecode) && unlikely((uptrval)(op)+length<(uptrval)(op))) { goto _output_error; } /* overflow detection */
if ((safeDecode) && unlikely((uptrval)(ip)+length<(uptrval)(ip))) { goto _output_error; } /* overflow detection */
@@ -1907,29 +1992,34 @@ LZ4_decompress_generic(
|| ((!endOnInput) && (cpy>oend-WILDCOPYLENGTH)) )
{
/* We've either hit the input parsing restriction or the output parsing restriction.
- * If we've hit the input parsing condition then this must be the last sequence.
- * If we've hit the output parsing condition then we are either using partialDecoding
- * or we've hit the output parsing condition.
+ * In the normal scenario, decoding a full block, it must be the last sequence,
+ * otherwise it's an error (invalid input or dimensions).
+ * In partialDecoding scenario, it's necessary to ensure there is no buffer overflow.
*/
if (partialDecoding) {
/* Since we are partial decoding we may be in this block because of the output parsing
* restriction, which is not valid since the output buffer is allowed to be undersized.
*/
assert(endOnInput);
- /* If we're in this block because of the input parsing condition, then we must be on the
- * last sequence (or invalid), so we must check that we exactly consume the input.
+ DEBUGLOG(7, "partialDecoding: copying literals, close to input or output end")
+ DEBUGLOG(7, "partialDecoding: literal length = %u", (unsigned)length);
+ DEBUGLOG(7, "partialDecoding: remaining space in dstBuffer : %i", (int)(oend - op));
+ DEBUGLOG(7, "partialDecoding: remaining space in srcBuffer : %i", (int)(iend - ip));
+ /* Finishing in the middle of a literals segment,
+ * due to lack of input.
*/
- if ((ip+length>iend-(2+1+LASTLITERALS)) && (ip+length != iend)) { goto _output_error; }
- assert(ip+length <= iend);
- /* We are finishing in the middle of a literals segment.
- * Break after the copy.
+ if (ip+length > iend) {
+ length = (size_t)(iend-ip);
+ cpy = op + length;
+ }
+ /* Finishing in the middle of a literals segment,
+ * due to lack of output space.
*/
if (cpy > oend) {
cpy = oend;
assert(op<=oend);
length = (size_t)(oend-op);
}
- assert(ip+length <= iend);
} else {
/* We must be on the last sequence because of the parsing limitations so check
* that we exactly regenerate the original size (must be exact when !endOnInput).
@@ -1938,16 +2028,22 @@ LZ4_decompress_generic(
/* We must be on the last sequence (or invalid) because of the parsing limitations
* so check that we exactly consume the input and don't overrun the output buffer.
*/
- if ((endOnInput) && ((ip+length != iend) || (cpy > oend))) { goto _output_error; }
+ if ((endOnInput) && ((ip+length != iend) || (cpy > oend))) {
+ DEBUGLOG(6, "should have been last run of literals")
+ DEBUGLOG(6, "ip(%p) + length(%i) = %p != iend (%p)", ip, (int)length, ip+length, iend);
+ DEBUGLOG(6, "or cpy(%p) > oend(%p)", cpy, oend);
+ goto _output_error;
+ }
}
- memmove(op, ip, length); /* supports overlapping memory regions, which only matters for in-place decompression scenarios */
+ memmove(op, ip, length); /* supports overlapping memory regions; only matters for in-place decompression scenarios */
ip += length;
op += length;
- /* Necessarily EOF when !partialDecoding. When partialDecoding
- * it is EOF if we've either filled the output buffer or hit
- * the input parsing restriction.
+ /* Necessarily EOF when !partialDecoding.
+ * When partialDecoding, it is EOF if we've either
+ * filled the output buffer or
+ * can't proceed with reading an offset for following match.
*/
- if (!partialDecoding || (cpy == oend) || (ip == iend)) {
+ if (!partialDecoding || (cpy == oend) || (ip >= (iend-2))) {
break;
}
} else {
@@ -1965,7 +2061,7 @@ LZ4_decompress_generic(
_copy_match:
if (length == ML_MASK) {
variable_length_error error = ok;
- length += read_variable_length(&ip, iend - LASTLITERALS + 1, endOnInput, 0, &error);
+ length += read_variable_length(&ip, iend - LASTLITERALS + 1, (int)endOnInput, 0, &error);
if (error != ok) goto _output_error;
if ((safeDecode) && unlikely((uptrval)(op)+length<(uptrval)op)) goto _output_error; /* overflow detection */
}
@@ -1990,14 +2086,14 @@ LZ4_decompress_generic(
/* match stretches into both external dictionary and current block */
size_t const copySize = (size_t)(lowPrefix - match);
size_t const restSize = length - copySize;
- memcpy(op, dictEnd - copySize, copySize);
+ LZ4_memcpy(op, dictEnd - copySize, copySize);
op += copySize;
if (restSize > (size_t)(op - lowPrefix)) { /* overlap copy */
BYTE* const endOfMatch = op + restSize;
const BYTE* copyFrom = lowPrefix;
while (op < endOfMatch) *op++ = *copyFrom++;
} else {
- memcpy(op, lowPrefix, restSize);
+ LZ4_memcpy(op, lowPrefix, restSize);
op += restSize;
} }
continue;
@@ -2016,7 +2112,7 @@ LZ4_decompress_generic(
if (matchEnd > op) { /* overlap copy */
while (op < copyEnd) { *op++ = *match++; }
} else {
- memcpy(op, match, mlen);
+ LZ4_memcpy(op, match, mlen);
}
op = copyEnd;
if (op == oend) { break; }
@@ -2030,10 +2126,10 @@ LZ4_decompress_generic(
op[2] = match[2];
op[3] = match[3];
match += inc32table[offset];
- memcpy(op+4, match, 4);
+ LZ4_memcpy(op+4, match, 4);
match -= dec64table[offset];
} else {
- memcpy(op, match, 8);
+ LZ4_memcpy(op, match, 8);
match += 8;
}
op += 8;
@@ -2048,7 +2144,7 @@ LZ4_decompress_generic(
}
while (op < cpy) { *op++ = *match++; }
} else {
- memcpy(op, match, 8);
+ LZ4_memcpy(op, match, 8);
if (length > 16) { LZ4_wildCopy8(op+8, match+8, cpy); }
}
op = cpy; /* wildcopy correction */
@@ -2056,6 +2152,7 @@ LZ4_decompress_generic(
/* end of decoding */
if (endOnInput) {
+ DEBUGLOG(5, "decoded %i bytes", (int) (((char*)op)-dst));
return (int) (((char*)op)-dst); /* Nb of output bytes decoded */
} else {
return (int) (((const char*)ip)-src); /* Nb of input bytes read */
@@ -2070,7 +2167,7 @@ LZ4_decompress_generic(
/*===== Instantiate the API decoding functions. =====*/
-LZ4_FORCE_O2_GCC_PPC64LE
+LZ4_FORCE_O2
int LZ4_decompress_safe(const char* source, char* dest, int compressedSize, int maxDecompressedSize)
{
return LZ4_decompress_generic(source, dest, compressedSize, maxDecompressedSize,
@@ -2078,7 +2175,7 @@ int LZ4_decompress_safe(const char* source, char* dest, int compressedSize, int
(BYTE*)dest, NULL, 0);
}
-LZ4_FORCE_O2_GCC_PPC64LE
+LZ4_FORCE_O2
int LZ4_decompress_safe_partial(const char* src, char* dst, int compressedSize, int targetOutputSize, int dstCapacity)
{
dstCapacity = MIN(targetOutputSize, dstCapacity);
@@ -2087,7 +2184,7 @@ int LZ4_decompress_safe_partial(const char* src, char* dst, int compressedSize,
noDict, (BYTE*)dst, NULL, 0);
}
-LZ4_FORCE_O2_GCC_PPC64LE
+LZ4_FORCE_O2
int LZ4_decompress_fast(const char* source, char* dest, int originalSize)
{
return LZ4_decompress_generic(source, dest, 0, originalSize,
@@ -2097,7 +2194,7 @@ int LZ4_decompress_fast(const char* source, char* dest, int originalSize)
/*===== Instantiate a few more decoding cases, used more than once. =====*/
-LZ4_FORCE_O2_GCC_PPC64LE /* Exported, an obsolete API function. */
+LZ4_FORCE_O2 /* Exported, an obsolete API function. */
int LZ4_decompress_safe_withPrefix64k(const char* source, char* dest, int compressedSize, int maxOutputSize)
{
return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize,
@@ -2113,7 +2210,7 @@ int LZ4_decompress_fast_withPrefix64k(const char* source, char* dest, int origin
return LZ4_decompress_fast(source, dest, originalSize);
}
-LZ4_FORCE_O2_GCC_PPC64LE
+LZ4_FORCE_O2
static int LZ4_decompress_safe_withSmallPrefix(const char* source, char* dest, int compressedSize, int maxOutputSize,
size_t prefixSize)
{
@@ -2122,7 +2219,7 @@ static int LZ4_decompress_safe_withSmallPrefix(const char* source, char* dest, i
(BYTE*)dest-prefixSize, NULL, 0);
}
-LZ4_FORCE_O2_GCC_PPC64LE
+LZ4_FORCE_O2
int LZ4_decompress_safe_forceExtDict(const char* source, char* dest,
int compressedSize, int maxOutputSize,
const void* dictStart, size_t dictSize)
@@ -2132,7 +2229,7 @@ int LZ4_decompress_safe_forceExtDict(const char* source, char* dest,
(BYTE*)dest, (const BYTE*)dictStart, dictSize);
}
-LZ4_FORCE_O2_GCC_PPC64LE
+LZ4_FORCE_O2
static int LZ4_decompress_fast_extDict(const char* source, char* dest, int originalSize,
const void* dictStart, size_t dictSize)
{
@@ -2221,7 +2318,7 @@ int LZ4_decoderRingBufferSize(int maxBlockSize)
If it's not possible, save the relevant part of decoded data into a safe buffer,
and indicate where it stands using LZ4_setStreamDecode()
*/
-LZ4_FORCE_O2_GCC_PPC64LE
+LZ4_FORCE_O2
int LZ4_decompress_safe_continue (LZ4_streamDecode_t* LZ4_streamDecode, const char* source, char* dest, int compressedSize, int maxOutputSize)
{
LZ4_streamDecode_t_internal* lz4sd = &LZ4_streamDecode->internal_donotuse;
@@ -2261,7 +2358,7 @@ int LZ4_decompress_safe_continue (LZ4_streamDecode_t* LZ4_streamDecode, const ch
return result;
}
-LZ4_FORCE_O2_GCC_PPC64LE
+LZ4_FORCE_O2
int LZ4_decompress_fast_continue (LZ4_streamDecode_t* LZ4_streamDecode, const char* source, char* dest, int originalSize)
{
LZ4_streamDecode_t_internal* lz4sd = &LZ4_streamDecode->internal_donotuse;
@@ -2374,7 +2471,7 @@ int LZ4_uncompress_unknownOutputSize (const char* source, char* dest, int isize,
/* Obsolete Streaming functions */
-int LZ4_sizeofStreamState() { return LZ4_STREAMSIZE; }
+int LZ4_sizeofStreamState(void) { return LZ4_STREAMSIZE; }
int LZ4_resetStreamState(void* state, char* inputBuffer)
{
diff --git a/lib/lz4.h b/lib/lz4.h
index 32108e2..7ab1e48 100644
--- a/lib/lz4.h
+++ b/lib/lz4.h
@@ -100,7 +100,7 @@ extern "C" {
/*------ Version ------*/
#define LZ4_VERSION_MAJOR 1 /* for breaking interface changes */
#define LZ4_VERSION_MINOR 9 /* for new (non-breaking) interface capabilities */
-#define LZ4_VERSION_RELEASE 2 /* for tweaks, bug-fixes, or development */
+#define LZ4_VERSION_RELEASE 3 /* for tweaks, bug-fixes, or development */
#define LZ4_VERSION_NUMBER (LZ4_VERSION_MAJOR *100*100 + LZ4_VERSION_MINOR *100 + LZ4_VERSION_RELEASE)
@@ -186,7 +186,8 @@ LZ4LIB_API int LZ4_compressBound(int inputSize);
The larger the acceleration value, the faster the algorithm, but also the lesser the compression.
It's a trade-off. It can be fine tuned, with each successive value providing roughly +~3% to speed.
An acceleration value of "1" is the same as regular LZ4_compress_default()
- Values <= 0 will be replaced by ACCELERATION_DEFAULT (currently == 1, see lz4.c).
+ Values <= 0 will be replaced by LZ4_ACCELERATION_DEFAULT (currently == 1, see lz4.c).
+ Values > LZ4_ACCELERATION_MAX will be replaced by LZ4_ACCELERATION_MAX (currently == 65537, see lz4.c).
*/
LZ4LIB_API int LZ4_compress_fast (const char* src, char* dst, int srcSize, int dstCapacity, int acceleration);
@@ -212,7 +213,18 @@ LZ4LIB_API int LZ4_compress_fast_extState (void* state, const char* src, char* d
* New value is necessarily <= input value.
* @return : Nb bytes written into 'dst' (necessarily <= targetDestSize)
* or 0 if compression fails.
-*/
+ *
+ * Note : from v1.8.2 to v1.9.1, this function had a bug (fixed un v1.9.2+):
+ * the produced compressed content could, in specific circumstances,
+ * require to be decompressed into a destination buffer larger
+ * by at least 1 byte than the content to decompress.
+ * If an application uses `LZ4_compress_destSize()`,
+ * it's highly recommended to update liblz4 to v1.9.2 or better.
+ * If this can't be done or ensured,
+ * the receiving decompression function should provide
+ * a dstCapacity which is > decompressedSize, by at least 1 byte.
+ * See https://github.com/lz4/lz4/issues/859 for details
+ */
LZ4LIB_API int LZ4_compress_destSize (const char* src, char* dst, int* srcSizePtr, int targetDstSize);
@@ -220,25 +232,35 @@ LZ4LIB_API int LZ4_compress_destSize (const char* src, char* dst, int* srcSizePt
* Decompress an LZ4 compressed block, of size 'srcSize' at position 'src',
* into destination buffer 'dst' of size 'dstCapacity'.
* Up to 'targetOutputSize' bytes will be decoded.
- * The function stops decoding on reaching this objective,
- * which can boost performance when only the beginning of a block is required.
+ * The function stops decoding on reaching this objective.
+ * This can be useful to boost performance
+ * whenever only the beginning of a block is required.
*
- * @return : the number of bytes decoded in `dst` (necessarily <= dstCapacity)
+ * @return : the number of bytes decoded in `dst` (necessarily <= targetOutputSize)
* If source stream is detected malformed, function returns a negative result.
*
- * Note : @return can be < targetOutputSize, if compressed block contains less data.
+ * Note 1 : @return can be < targetOutputSize, if compressed block contains less data.
+ *
+ * Note 2 : targetOutputSize must be <= dstCapacity
*
- * Note 2 : this function features 2 parameters, targetOutputSize and dstCapacity,
- * and expects targetOutputSize <= dstCapacity.
- * It effectively stops decoding on reaching targetOutputSize,
+ * Note 3 : this function effectively stops decoding on reaching targetOutputSize,
* so dstCapacity is kind of redundant.
- * This is because in a previous version of this function,
- * decoding operation would not "break" a sequence in the middle.
- * As a consequence, there was no guarantee that decoding would stop at exactly targetOutputSize,
+ * This is because in older versions of this function,
+ * decoding operation would still write complete sequences.
+ * Therefore, there was no guarantee that it would stop writing at exactly targetOutputSize,
* it could write more bytes, though only up to dstCapacity.
* Some "margin" used to be required for this operation to work properly.
- * This is no longer necessary.
- * The function nonetheless keeps its signature, in an effort to not break API.
+ * Thankfully, this is no longer necessary.
+ * The function nonetheless keeps the same signature, in an effort to preserve API compatibility.
+ *
+ * Note 4 : If srcSize is the exact size of the block,
+ * then targetOutputSize can be any value,
+ * including larger than the block's decompressed size.
+ * The function will, at most, generate block's decompressed size.
+ *
+ * Note 5 : If srcSize is _larger_ than block's compressed size,
+ * then targetOutputSize **MUST** be <= block's decompressed size.
+ * Otherwise, *silent corruption will occur*.
*/
LZ4LIB_API int LZ4_decompress_safe_partial (const char* src, char* dst, int srcSize, int targetOutputSize, int dstCapacity);
@@ -547,74 +569,64 @@ LZ4LIB_STATIC_API void LZ4_attach_dictionary(LZ4_stream_t* workingStream, const
#define LZ4_H_98237428734687
/*-************************************************************
- * PRIVATE DEFINITIONS
+ * Private Definitions
**************************************************************
* Do not use these definitions directly.
* They are only exposed to allow static allocation of `LZ4_stream_t` and `LZ4_streamDecode_t`.
- * Accessing members will expose code to API and/or ABI break in future versions of the library.
+ * Accessing members will expose user code to API and/or ABI break in future versions of the library.
**************************************************************/
#define LZ4_HASHLOG (LZ4_MEMORY_USAGE-2)
#define LZ4_HASHTABLESIZE (1 << LZ4_MEMORY_USAGE)
#define LZ4_HASH_SIZE_U32 (1 << LZ4_HASHLOG) /* required as macro for static allocation */
#if defined(__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
-#include <stdint.h>
-
-typedef struct LZ4_stream_t_internal LZ4_stream_t_internal;
-struct LZ4_stream_t_internal {
- uint32_t hashTable[LZ4_HASH_SIZE_U32];
- uint32_t currentOffset;
- uint16_t dirty;
- uint16_t tableType;
- const uint8_t* dictionary;
- const LZ4_stream_t_internal* dictCtx;
- uint32_t dictSize;
-};
-
-typedef struct {
- const uint8_t* externalDict;
- size_t extDictSize;
- const uint8_t* prefixEnd;
- size_t prefixSize;
-} LZ4_streamDecode_t_internal;
-
+# include <stdint.h>
+ typedef int8_t LZ4_i8;
+ typedef uint8_t LZ4_byte;
+ typedef uint16_t LZ4_u16;
+ typedef uint32_t LZ4_u32;
#else
+ typedef signed char LZ4_i8;
+ typedef unsigned char LZ4_byte;
+ typedef unsigned short LZ4_u16;
+ typedef unsigned int LZ4_u32;
+#endif
typedef struct LZ4_stream_t_internal LZ4_stream_t_internal;
struct LZ4_stream_t_internal {
- unsigned int hashTable[LZ4_HASH_SIZE_U32];
- unsigned int currentOffset;
- unsigned short dirty;
- unsigned short tableType;
- const unsigned char* dictionary;
+ LZ4_u32 hashTable[LZ4_HASH_SIZE_U32];
+ LZ4_u32 currentOffset;
+ LZ4_u32 tableType;
+ const LZ4_byte* dictionary;
const LZ4_stream_t_internal* dictCtx;
- unsigned int dictSize;
+ LZ4_u32 dictSize;
};
typedef struct {
- const unsigned char* externalDict;
- const unsigned char* prefixEnd;
+ const LZ4_byte* externalDict;
size_t extDictSize;
+ const LZ4_byte* prefixEnd;
size_t prefixSize;
} LZ4_streamDecode_t_internal;
-#endif
/*! LZ4_stream_t :
- * information structure to track an LZ4 stream.
+ * Do not use below internal definitions directly !
+ * Declare or allocate an LZ4_stream_t instead.
* LZ4_stream_t can also be created using LZ4_createStream(), which is recommended.
* The structure definition can be convenient for static allocation
* (on stack, or as part of larger structure).
* Init this structure with LZ4_initStream() before first use.
* note : only use this definition in association with static linking !
- * this definition is not API/ABI safe, and may change in a future version.
+ * this definition is not API/ABI safe, and may change in future versions.
*/
-#define LZ4_STREAMSIZE_U64 ((1 << (LZ4_MEMORY_USAGE-3)) + 4 + ((sizeof(void*)==16) ? 4 : 0) /*AS-400*/ )
-#define LZ4_STREAMSIZE (LZ4_STREAMSIZE_U64 * sizeof(unsigned long long))
+#define LZ4_STREAMSIZE 16416 /* static size, for inter-version compatibility */
+#define LZ4_STREAMSIZE_VOIDP (LZ4_STREAMSIZE / sizeof(void*))
union LZ4_stream_u {
- unsigned long long table[LZ4_STREAMSIZE_U64];
+ void* table[LZ4_STREAMSIZE_VOIDP];
LZ4_stream_t_internal internal_donotuse;
-} ; /* previously typedef'd to LZ4_stream_t */
+}; /* previously typedef'd to LZ4_stream_t */
+
/*! LZ4_initStream() : v1.9.0+
* An LZ4_stream_t structure must be initialized at least once.
@@ -667,22 +679,21 @@ union LZ4_streamDecode_u {
#ifdef LZ4_DISABLE_DEPRECATE_WARNINGS
# define LZ4_DEPRECATED(message) /* disable deprecation warnings */
#else
-# define LZ4_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
# if defined (__cplusplus) && (__cplusplus >= 201402) /* C++14 or greater */
# define LZ4_DEPRECATED(message) [[deprecated(message)]]
-# elif (LZ4_GCC_VERSION >= 405) || defined(__clang__)
-# define LZ4_DEPRECATED(message) __attribute__((deprecated(message)))
-# elif (LZ4_GCC_VERSION >= 301)
-# define LZ4_DEPRECATED(message) __attribute__((deprecated))
# elif defined(_MSC_VER)
# define LZ4_DEPRECATED(message) __declspec(deprecated(message))
+# elif defined(__clang__) || (defined(__GNUC__) && (__GNUC__ * 10 + __GNUC_MINOR__ >= 45))
+# define LZ4_DEPRECATED(message) __attribute__((deprecated(message)))
+# elif defined(__GNUC__) && (__GNUC__ * 10 + __GNUC_MINOR__ >= 31)
+# define LZ4_DEPRECATED(message) __attribute__((deprecated))
# else
-# pragma message("WARNING: You need to implement LZ4_DEPRECATED for this compiler")
-# define LZ4_DEPRECATED(message)
+# pragma message("WARNING: LZ4_DEPRECATED needs custom implementation for this compiler")
+# define LZ4_DEPRECATED(message) /* disabled */
# endif
#endif /* LZ4_DISABLE_DEPRECATE_WARNINGS */
-/* Obsolete compression functions */
+/*! Obsolete compression functions (since v1.7.3) */
LZ4_DEPRECATED("use LZ4_compress_default() instead") LZ4LIB_API int LZ4_compress (const char* src, char* dest, int srcSize);
LZ4_DEPRECATED("use LZ4_compress_default() instead") LZ4LIB_API int LZ4_compress_limitedOutput (const char* src, char* dest, int srcSize, int maxOutputSize);
LZ4_DEPRECATED("use LZ4_compress_fast_extState() instead") LZ4LIB_API int LZ4_compress_withState (void* state, const char* source, char* dest, int inputSize);
@@ -690,11 +701,12 @@ LZ4_DEPRECATED("use LZ4_compress_fast_extState() instead") LZ4LIB_API int LZ4_co
LZ4_DEPRECATED("use LZ4_compress_fast_continue() instead") LZ4LIB_API int LZ4_compress_continue (LZ4_stream_t* LZ4_streamPtr, const char* source, char* dest, int inputSize);
LZ4_DEPRECATED("use LZ4_compress_fast_continue() instead") LZ4LIB_API int LZ4_compress_limitedOutput_continue (LZ4_stream_t* LZ4_streamPtr, const char* source, char* dest, int inputSize, int maxOutputSize);
-/* Obsolete decompression functions */
+/*! Obsolete decompression functions (since v1.8.0) */
LZ4_DEPRECATED("use LZ4_decompress_fast() instead") LZ4LIB_API int LZ4_uncompress (const char* source, char* dest, int outputSize);
LZ4_DEPRECATED("use LZ4_decompress_safe() instead") LZ4LIB_API int LZ4_uncompress_unknownOutputSize (const char* source, char* dest, int isize, int maxOutputSize);
-/* Obsolete streaming functions; degraded functionality; do not use!
+/* Obsolete streaming functions (since v1.7.0)
+ * degraded functionality; do not use!
*
* In order to perform streaming compression, these functions depended on data
* that is no longer tracked in the state. They have been preserved as well as
@@ -708,23 +720,22 @@ LZ4_DEPRECATED("Use LZ4_createStream() instead") LZ4LIB_API int LZ4_sizeofStre
LZ4_DEPRECATED("Use LZ4_resetStream() instead") LZ4LIB_API int LZ4_resetStreamState(void* state, char* inputBuffer);
LZ4_DEPRECATED("Use LZ4_saveDict() instead") LZ4LIB_API char* LZ4_slideInputBuffer (void* state);
-/* Obsolete streaming decoding functions */
+/*! Obsolete streaming decoding functions (since v1.7.0) */
LZ4_DEPRECATED("use LZ4_decompress_safe_usingDict() instead") LZ4LIB_API int LZ4_decompress_safe_withPrefix64k (const char* src, char* dst, int compressedSize, int maxDstSize);
LZ4_DEPRECATED("use LZ4_decompress_fast_usingDict() instead") LZ4LIB_API int LZ4_decompress_fast_withPrefix64k (const char* src, char* dst, int originalSize);
-/*! LZ4_decompress_fast() : **unsafe!**
+/*! Obsolete LZ4_decompress_fast variants (since v1.9.0) :
* These functions used to be faster than LZ4_decompress_safe(),
- * but it has changed, and they are now slower than LZ4_decompress_safe().
+ * but this is no longer the case. They are now slower.
* This is because LZ4_decompress_fast() doesn't know the input size,
- * and therefore must progress more cautiously in the input buffer to not read beyond the end of block.
+ * and therefore must progress more cautiously into the input buffer to not read beyond the end of block.
* On top of that `LZ4_decompress_fast()` is not protected vs malformed or malicious inputs, making it a security liability.
* As a consequence, LZ4_decompress_fast() is strongly discouraged, and deprecated.
*
* The last remaining LZ4_decompress_fast() specificity is that
* it can decompress a block without knowing its compressed size.
- * Such functionality could be achieved in a more secure manner,
- * by also providing the maximum size of input buffer,
- * but it would require new prototypes, and adaptation of the implementation to this new use case.
+ * Such functionality can be achieved in a more secure manner
+ * by employing LZ4_decompress_safe_partial().
*
* Parameters:
* originalSize : is the uncompressed size to regenerate.
@@ -739,7 +750,6 @@ LZ4_DEPRECATED("use LZ4_decompress_fast_usingDict() instead") LZ4LIB_API int LZ4
* But they may happen if input data is invalid (error or intentional tampering).
* As a consequence, use these functions in trusted environments with trusted data **only**.
*/
-
LZ4_DEPRECATED("This function is deprecated and unsafe. Consider using LZ4_decompress_safe() instead")
LZ4LIB_API int LZ4_decompress_fast (const char* src, char* dst, int originalSize);
LZ4_DEPRECATED("This function is deprecated and unsafe. Consider using LZ4_decompress_safe_continue() instead")
diff --git a/lib/lz4frame.c b/lib/lz4frame.c
index c9f630d..ec02c92 100644
--- a/lib/lz4frame.c
+++ b/lib/lz4frame.c
@@ -71,8 +71,8 @@
* towards another library or solution of their choice
* by modifying below section.
*/
-#include <stdlib.h> /* malloc, calloc, free */
#ifndef LZ4_SRC_INCLUDED /* avoid redefinition when sources are coalesced */
+# include <stdlib.h> /* malloc, calloc, free */
# define ALLOC(s) malloc(s)
# define ALLOC_AND_ZERO(s) calloc(1,(s))
# define FREEMEM(p) free(p)
@@ -533,7 +533,7 @@ void LZ4F_freeCDict(LZ4F_CDict* cdict)
* If the result LZ4F_errorCode_t is not OK_NoError, there was an error during context creation.
* Object can release its memory using LZ4F_freeCompressionContext();
*/
-LZ4F_errorCode_t LZ4F_createCompressionContext(LZ4F_compressionContext_t* LZ4F_compressionContextPtr, unsigned version)
+LZ4F_errorCode_t LZ4F_createCompressionContext(LZ4F_cctx** LZ4F_compressionContextPtr, unsigned version)
{
LZ4F_cctx_t* const cctxPtr = (LZ4F_cctx_t*)ALLOC_AND_ZERO(sizeof(LZ4F_cctx_t));
if (cctxPtr==NULL) return err0r(LZ4F_ERROR_allocation_failed);
@@ -541,20 +541,18 @@ LZ4F_errorCode_t LZ4F_createCompressionContext(LZ4F_compressionContext_t* LZ4F_c
cctxPtr->version = version;
cctxPtr->cStage = 0; /* Next stage : init stream */
- *LZ4F_compressionContextPtr = (LZ4F_compressionContext_t)cctxPtr;
+ *LZ4F_compressionContextPtr = cctxPtr;
return LZ4F_OK_NoError;
}
-LZ4F_errorCode_t LZ4F_freeCompressionContext(LZ4F_compressionContext_t LZ4F_compressionContext)
+LZ4F_errorCode_t LZ4F_freeCompressionContext(LZ4F_cctx* cctxPtr)
{
- LZ4F_cctx_t* const cctxPtr = (LZ4F_cctx_t*)LZ4F_compressionContext;
-
if (cctxPtr != NULL) { /* support free on NULL */
- FREEMEM(cctxPtr->lz4CtxPtr); /* works because LZ4_streamHC_t and LZ4_stream_t are simple POD types */
+ FREEMEM(cctxPtr->lz4CtxPtr); /* note: LZ4_streamHC_t and LZ4_stream_t are simple POD types */
FREEMEM(cctxPtr->tmpBuff);
- FREEMEM(LZ4F_compressionContext);
+ FREEMEM(cctxPtr);
}
return LZ4F_OK_NoError;
@@ -725,6 +723,9 @@ size_t LZ4F_compressBegin(LZ4F_cctx* cctxPtr,
*/
size_t LZ4F_compressBound(size_t srcSize, const LZ4F_preferences_t* preferencesPtr)
{
+ if (preferencesPtr && preferencesPtr->autoFlush) {
+ return LZ4F_compressBound_internal(srcSize, preferencesPtr, 0);
+ }
return LZ4F_compressBound_internal(srcSize, preferencesPtr, (size_t)-1);
}
@@ -747,6 +748,7 @@ static size_t LZ4F_makeBlock(void* dst,
(int)(srcSize), (int)(srcSize-1),
level, cdict);
if (cSize == 0) { /* compression failed */
+ DEBUGLOG(5, "LZ4F_makeBlock: compression failed, creating a raw block (size %u)", (U32)srcSize);
cSize = (U32)srcSize;
LZ4F_writeLE32(cSizePtr, cSize | LZ4F_BLOCKUNCOMPRESSED_FLAG);
memcpy(cSizePtr+BHSize, src, srcSize);
@@ -989,6 +991,7 @@ size_t LZ4F_compressEnd(LZ4F_cctx* cctxPtr,
BYTE* dstPtr = dstStart;
size_t const flushSize = LZ4F_flush(cctxPtr, dstBuffer, dstCapacity, compressOptionsPtr);
+ DEBUGLOG(5,"LZ4F_compressEnd: dstCapacity=%u", (unsigned)dstCapacity);
if (LZ4F_isError(flushSize)) return flushSize;
dstPtr += flushSize;
@@ -1002,6 +1005,7 @@ size_t LZ4F_compressEnd(LZ4F_cctx* cctxPtr,
if (cctxPtr->prefs.frameInfo.contentChecksumFlag == LZ4F_contentChecksumEnabled) {
U32 const xxh = XXH32_digest(&(cctxPtr->xxh));
if (dstCapacity < 8) return err0r(LZ4F_ERROR_dstMaxSize_tooSmall);
+ DEBUGLOG(5,"Writing 32-bit content checksum");
LZ4F_writeLE32(dstPtr, xxh);
dstPtr+=4; /* content Checksum */
}
@@ -1112,6 +1116,7 @@ static size_t LZ4F_decodeHeader(LZ4F_dctx* dctx, const void* src, size_t srcSize
size_t frameHeaderSize;
const BYTE* srcPtr = (const BYTE*)src;
+ DEBUGLOG(5, "LZ4F_decodeHeader");
/* need to decode header to get frameInfo */
if (srcSize < minFHSize) return err0r(LZ4F_ERROR_frameHeader_incomplete); /* minimal frame header size */
MEM_INIT(&(dctx->frameInfo), 0, sizeof(dctx->frameInfo));
@@ -1132,8 +1137,10 @@ static size_t LZ4F_decodeHeader(LZ4F_dctx* dctx, const void* src, size_t srcSize
/* control magic number */
#ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
- if (LZ4F_readLE32(srcPtr) != LZ4F_MAGICNUMBER)
+ if (LZ4F_readLE32(srcPtr) != LZ4F_MAGICNUMBER) {
+ DEBUGLOG(4, "frame header error : unknown magic number");
return err0r(LZ4F_ERROR_frameType_unknown);
+ }
#endif
dctx->frameInfo.frameType = LZ4F_frame;
@@ -1282,15 +1289,20 @@ LZ4F_errorCode_t LZ4F_getFrameInfo(LZ4F_dctx* dctx,
/* LZ4F_updateDict() :
- * only used for LZ4F_blockLinked mode */
+ * only used for LZ4F_blockLinked mode
+ * Condition : dstPtr != NULL
+ */
static void LZ4F_updateDict(LZ4F_dctx* dctx,
const BYTE* dstPtr, size_t dstSize, const BYTE* dstBufferStart,
unsigned withinTmp)
{
- if (dctx->dictSize==0)
- dctx->dict = (const BYTE*)dstPtr; /* priority to dictionary continuity */
+ assert(dstPtr != NULL);
+ if (dctx->dictSize==0) {
+ dctx->dict = (const BYTE*)dstPtr; /* priority to prefix mode */
+ }
+ assert(dctx->dict != NULL);
- if (dctx->dict + dctx->dictSize == dstPtr) { /* dictionary continuity, directly within dstBuffer */
+ if (dctx->dict + dctx->dictSize == dstPtr) { /* prefix mode, everything within dstBuffer */
dctx->dictSize += dstSize;
return;
}
@@ -1304,9 +1316,10 @@ static void LZ4F_updateDict(LZ4F_dctx* dctx,
assert(dstSize < 64 KB); /* if dstSize >= 64 KB, dictionary would be set into dstBuffer directly */
- /* dstBuffer does not contain whole useful history (64 KB), so it must be saved within tmpOut */
+ /* dstBuffer does not contain whole useful history (64 KB), so it must be saved within tmpOutBuffer */
+ assert(dctx->tmpOutBuffer != NULL);
- if ((withinTmp) && (dctx->dict == dctx->tmpOutBuffer)) { /* continue history within tmpOutBuffer */
+ if (withinTmp && (dctx->dict == dctx->tmpOutBuffer)) { /* continue history within tmpOutBuffer */
/* withinTmp expectation : content of [dstPtr,dstSize] is same as [dict+dictSize,dstSize], so we just extend it */
assert(dctx->dict + dctx->dictSize == dctx->tmpOut + dctx->tmpOutStart);
dctx->dictSize += dstSize;
@@ -1378,17 +1391,21 @@ size_t LZ4F_decompress(LZ4F_dctx* dctx,
const BYTE* const srcEnd = srcStart + *srcSizePtr;
const BYTE* srcPtr = srcStart;
BYTE* const dstStart = (BYTE*)dstBuffer;
- BYTE* const dstEnd = dstStart + *dstSizePtr;
+ BYTE* const dstEnd = dstStart ? dstStart + *dstSizePtr : NULL;
BYTE* dstPtr = dstStart;
const BYTE* selectedIn = NULL;
unsigned doAnotherStage = 1;
size_t nextSrcSizeHint = 1;
+ DEBUGLOG(5, "LZ4F_decompress : %p,%u => %p,%u",
+ srcBuffer, (unsigned)*srcSizePtr, dstBuffer, (unsigned)*dstSizePtr);
+ if (dstBuffer == NULL) assert(*dstSizePtr == 0);
MEM_INIT(&optionsNull, 0, sizeof(optionsNull));
if (decompressOptionsPtr==NULL) decompressOptionsPtr = &optionsNull;
*srcSizePtr = 0;
*dstSizePtr = 0;
+ assert(dctx != NULL);
/* behaves as a state machine */
@@ -1398,6 +1415,7 @@ size_t LZ4F_decompress(LZ4F_dctx* dctx,
{
case dstage_getFrameHeader:
+ DEBUGLOG(6, "dstage_getFrameHeader");
if ((size_t)(srcEnd-srcPtr) >= maxFHSize) { /* enough to decode - shortcut */
size_t const hSize = LZ4F_decodeHeader(dctx, srcPtr, (size_t)(srcEnd-srcPtr)); /* will update dStage appropriately */
if (LZ4F_isError(hSize)) return hSize;
@@ -1411,6 +1429,7 @@ size_t LZ4F_decompress(LZ4F_dctx* dctx,
/* fall-through */
case dstage_storeFrameHeader:
+ DEBUGLOG(6, "dstage_storeFrameHeader");
{ size_t const sizeToCopy = MIN(dctx->tmpInTarget - dctx->tmpInSize, (size_t)(srcEnd - srcPtr));
memcpy(dctx->header + dctx->tmpInSize, srcPtr, sizeToCopy);
dctx->tmpInSize += sizeToCopy;
@@ -1427,6 +1446,7 @@ size_t LZ4F_decompress(LZ4F_dctx* dctx,
break;
case dstage_init:
+ DEBUGLOG(6, "dstage_init");
if (dctx->frameInfo.contentChecksumFlag) (void)XXH32_reset(&(dctx->xxh), 0);
/* internal buffers allocation */
{ size_t const bufferNeeded = dctx->maxBlockSize
@@ -1480,17 +1500,21 @@ size_t LZ4F_decompress(LZ4F_dctx* dctx,
} /* if (dctx->dStage == dstage_storeBlockHeader) */
/* decode block header */
- { size_t const nextCBlockSize = LZ4F_readLE32(selectedIn) & 0x7FFFFFFFU;
+ { U32 const blockHeader = LZ4F_readLE32(selectedIn);
+ size_t const nextCBlockSize = blockHeader & 0x7FFFFFFFU;
size_t const crcSize = dctx->frameInfo.blockChecksumFlag * BFSize;
- if (nextCBlockSize==0) { /* frameEnd signal, no more block */
+ if (blockHeader==0) { /* frameEnd signal, no more block */
+ DEBUGLOG(5, "end of frame");
dctx->dStage = dstage_getSuffix;
break;
}
- if (nextCBlockSize > dctx->maxBlockSize)
+ if (nextCBlockSize > dctx->maxBlockSize) {
return err0r(LZ4F_ERROR_maxBlockSize_invalid);
- if (LZ4F_readLE32(selectedIn) & LZ4F_BLOCKUNCOMPRESSED_FLAG) {
+ }
+ if (blockHeader & LZ4F_BLOCKUNCOMPRESSED_FLAG) {
/* next block is uncompressed */
dctx->tmpInTarget = nextCBlockSize;
+ DEBUGLOG(5, "next block is uncompressed (size %u)", (U32)nextCBlockSize);
if (dctx->frameInfo.blockChecksumFlag) {
(void)XXH32_reset(&dctx->blockChecksum, 0);
}
@@ -1508,20 +1532,26 @@ size_t LZ4F_decompress(LZ4F_dctx* dctx,
}
case dstage_copyDirect: /* uncompressed block */
- { size_t const minBuffSize = MIN((size_t)(srcEnd-srcPtr), (size_t)(dstEnd-dstPtr));
- size_t const sizeToCopy = MIN(dctx->tmpInTarget, minBuffSize);
- memcpy(dstPtr, srcPtr, sizeToCopy);
- if (dctx->frameInfo.blockChecksumFlag) {
- (void)XXH32_update(&dctx->blockChecksum, srcPtr, sizeToCopy);
- }
- if (dctx->frameInfo.contentChecksumFlag)
- (void)XXH32_update(&dctx->xxh, srcPtr, sizeToCopy);
- if (dctx->frameInfo.contentSize)
- dctx->frameRemainingSize -= sizeToCopy;
+ DEBUGLOG(6, "dstage_copyDirect");
+ { size_t sizeToCopy;
+ if (dstPtr == NULL) {
+ sizeToCopy = 0;
+ } else {
+ size_t const minBuffSize = MIN((size_t)(srcEnd-srcPtr), (size_t)(dstEnd-dstPtr));
+ sizeToCopy = MIN(dctx->tmpInTarget, minBuffSize);
+ memcpy(dstPtr, srcPtr, sizeToCopy);
+ if (dctx->frameInfo.blockChecksumFlag) {
+ (void)XXH32_update(&dctx->blockChecksum, srcPtr, sizeToCopy);
+ }
+ if (dctx->frameInfo.contentChecksumFlag)
+ (void)XXH32_update(&dctx->xxh, srcPtr, sizeToCopy);
+ if (dctx->frameInfo.contentSize)
+ dctx->frameRemainingSize -= sizeToCopy;
- /* history management (linked blocks only)*/
- if (dctx->frameInfo.blockMode == LZ4F_blockLinked)
- LZ4F_updateDict(dctx, dstPtr, sizeToCopy, dstStart, 0);
+ /* history management (linked blocks only)*/
+ if (dctx->frameInfo.blockMode == LZ4F_blockLinked) {
+ LZ4F_updateDict(dctx, dstPtr, sizeToCopy, dstStart, 0);
+ } }
srcPtr += sizeToCopy;
dstPtr += sizeToCopy;
@@ -1534,15 +1564,16 @@ size_t LZ4F_decompress(LZ4F_dctx* dctx,
break;
}
dctx->tmpInTarget -= sizeToCopy; /* need to copy more */
- nextSrcSizeHint = dctx->tmpInTarget +
- +(dctx->frameInfo.blockChecksumFlag ? BFSize : 0)
- + BHSize /* next header size */;
- doAnotherStage = 0;
- break;
}
+ nextSrcSizeHint = dctx->tmpInTarget +
+ +(dctx->frameInfo.blockChecksumFlag ? BFSize : 0)
+ + BHSize /* next header size */;
+ doAnotherStage = 0;
+ break;
/* check block checksum for recently transferred uncompressed block */
case dstage_getBlockChecksum:
+ DEBUGLOG(6, "dstage_getBlockChecksum");
{ const void* crcSrc;
if ((srcEnd-srcPtr >= 4) && (dctx->tmpInSize==0)) {
crcSrc = srcPtr;
@@ -1562,8 +1593,12 @@ size_t LZ4F_decompress(LZ4F_dctx* dctx,
{ U32 const readCRC = LZ4F_readLE32(crcSrc);
U32 const calcCRC = XXH32_digest(&dctx->blockChecksum);
#ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
- if (readCRC != calcCRC)
+ DEBUGLOG(6, "compare block checksum");
+ if (readCRC != calcCRC) {
+ DEBUGLOG(4, "incorrect block checksum: %08X != %08X",
+ readCRC, calcCRC);
return err0r(LZ4F_ERROR_blockChecksum_invalid);
+ }
#else
(void)readCRC;
(void)calcCRC;
@@ -1573,6 +1608,7 @@ size_t LZ4F_decompress(LZ4F_dctx* dctx,
break;
case dstage_getCBlock:
+ DEBUGLOG(6, "dstage_getCBlock");
if ((size_t)(srcEnd-srcPtr) < dctx->tmpInTarget) {
dctx->tmpInSize = 0;
dctx->dStage = dstage_storeCBlock;
@@ -1582,7 +1618,7 @@ size_t LZ4F_decompress(LZ4F_dctx* dctx,
selectedIn = srcPtr;
srcPtr += dctx->tmpInTarget;
- if (0) /* jump over next block */
+ if (0) /* always jump over next block */
case dstage_storeCBlock:
{ size_t const wantedData = dctx->tmpInTarget - dctx->tmpInSize;
size_t const inputLeft = (size_t)(srcEnd-srcPtr);
@@ -1619,6 +1655,7 @@ size_t LZ4F_decompress(LZ4F_dctx* dctx,
const char* dict = (const char*)dctx->dict;
size_t dictSize = dctx->dictSize;
int decodedSize;
+ assert(dstPtr != NULL);
if (dict && dictSize > 1 GB) {
/* the dictSize param is an int, avoid truncation / sign issues */
dict += dictSize - 64 KB;
@@ -1636,8 +1673,9 @@ size_t LZ4F_decompress(LZ4F_dctx* dctx,
dctx->frameRemainingSize -= (size_t)decodedSize;
/* dictionary management */
- if (dctx->frameInfo.blockMode==LZ4F_blockLinked)
+ if (dctx->frameInfo.blockMode==LZ4F_blockLinked) {
LZ4F_updateDict(dctx, dstPtr, (size_t)decodedSize, dstStart, 0);
+ }
dstPtr += decodedSize;
dctx->dStage = dstage_getBlockHeader;
@@ -1684,7 +1722,9 @@ size_t LZ4F_decompress(LZ4F_dctx* dctx,
/* fall-through */
case dstage_flushOut: /* flush decoded data from tmpOut to dstBuffer */
- { size_t const sizeToCopy = MIN(dctx->tmpOutSize - dctx->tmpOutStart, (size_t)(dstEnd-dstPtr));
+ DEBUGLOG(6, "dstage_flushOut");
+ if (dstPtr != NULL) {
+ size_t const sizeToCopy = MIN(dctx->tmpOutSize - dctx->tmpOutStart, (size_t)(dstEnd-dstPtr));
memcpy(dstPtr, dctx->tmpOut + dctx->tmpOutStart, sizeToCopy);
/* dictionary management */
@@ -1693,16 +1733,15 @@ size_t LZ4F_decompress(LZ4F_dctx* dctx,
dctx->tmpOutStart += sizeToCopy;
dstPtr += sizeToCopy;
-
- if (dctx->tmpOutStart == dctx->tmpOutSize) { /* all flushed */
- dctx->dStage = dstage_getBlockHeader; /* get next block */
- break;
- }
- /* could not flush everything : stop there, just request a block header */
- doAnotherStage = 0;
- nextSrcSizeHint = BHSize;
+ }
+ if (dctx->tmpOutStart == dctx->tmpOutSize) { /* all flushed */
+ dctx->dStage = dstage_getBlockHeader; /* get next block */
break;
}
+ /* could not flush everything : stop there, just request a block header */
+ doAnotherStage = 0;
+ nextSrcSizeHint = BHSize;
+ break;
case dstage_getSuffix:
if (dctx->frameRemainingSize)
@@ -1806,6 +1845,7 @@ size_t LZ4F_decompress(LZ4F_dctx* dctx,
LZ4F_STATIC_ASSERT((unsigned)dstage_init == 2);
if ( (dctx->frameInfo.blockMode==LZ4F_blockLinked) /* next block will use up to 64KB from previous ones */
&& (dctx->dict != dctx->tmpOutBuffer) /* dictionary is not already within tmp */
+ && (dctx->dict != NULL) /* dictionary exists */
&& (!decompressOptionsPtr->stableDst) /* cannot rely on dst data to remain there for next call */
&& ((unsigned)(dctx->dStage)-2 < (unsigned)(dstage_getSuffix)-2) ) /* valid stages : [init ... getSuffix[ */
{
@@ -1815,9 +1855,9 @@ size_t LZ4F_decompress(LZ4F_dctx* dctx,
const BYTE* oldDictEnd = dctx->dict + dctx->dictSize - dctx->tmpOutStart;
if (dctx->tmpOutSize > 64 KB) copySize = 0;
if (copySize > preserveSize) copySize = preserveSize;
+ assert(dctx->tmpOutBuffer != NULL);
- if (copySize > 0)
- memcpy(dctx->tmpOutBuffer + preserveSize - copySize, oldDictEnd - copySize, copySize);
+ memcpy(dctx->tmpOutBuffer + preserveSize - copySize, oldDictEnd - copySize, copySize);
dctx->dict = dctx->tmpOutBuffer;
dctx->dictSize = preserveSize + dctx->tmpOutStart;
@@ -1825,8 +1865,7 @@ size_t LZ4F_decompress(LZ4F_dctx* dctx,
const BYTE* const oldDictEnd = dctx->dict + dctx->dictSize;
size_t const newDictSize = MIN(dctx->dictSize, 64 KB);
- if (newDictSize > 0)
- memcpy(dctx->tmpOutBuffer, oldDictEnd - newDictSize, newDictSize);
+ memcpy(dctx->tmpOutBuffer, oldDictEnd - newDictSize, newDictSize);
dctx->dict = dctx->tmpOutBuffer;
dctx->dictSize = newDictSize;
diff --git a/lib/lz4frame.h b/lib/lz4frame.h
index 391e484..4573317 100644
--- a/lib/lz4frame.h
+++ b/lib/lz4frame.h
@@ -66,17 +66,22 @@ extern "C" {
*****************************************************************/
/* LZ4_DLL_EXPORT :
* Enable exporting of functions when building a Windows DLL
- * LZ4FLIB_API :
+ * LZ4FLIB_VISIBILITY :
* Control library symbols visibility.
*/
+#ifndef LZ4FLIB_VISIBILITY
+# if defined(__GNUC__) && (__GNUC__ >= 4)
+# define LZ4FLIB_VISIBILITY __attribute__ ((visibility ("default")))
+# else
+# define LZ4FLIB_VISIBILITY
+# endif
+#endif
#if defined(LZ4_DLL_EXPORT) && (LZ4_DLL_EXPORT==1)
-# define LZ4FLIB_API __declspec(dllexport)
+# define LZ4FLIB_API __declspec(dllexport) LZ4FLIB_VISIBILITY
#elif defined(LZ4_DLL_IMPORT) && (LZ4_DLL_IMPORT==1)
-# define LZ4FLIB_API __declspec(dllimport)
-#elif defined(__GNUC__) && (__GNUC__ >= 4)
-# define LZ4FLIB_API __attribute__ ((__visibility__ ("default")))
+# define LZ4FLIB_API __declspec(dllimport) LZ4FLIB_VISIBILITY
#else
-# define LZ4FLIB_API
+# define LZ4FLIB_API LZ4FLIB_VISIBILITY
#endif
#ifdef LZ4F_DISABLE_DEPRECATE_WARNINGS
@@ -103,7 +108,7 @@ LZ4FLIB_API const char* LZ4F_getErrorName(LZ4F_errorCode_t code); /**< return
/*-************************************
* Frame compression types
- **************************************/
+ ************************************* */
/* #define LZ4F_ENABLE_OBSOLETE_ENUMS // uncomment to enable obsolete enums */
#ifdef LZ4F_ENABLE_OBSOLETE_ENUMS
# define LZ4F_OBSOLETE_ENUM(x) , LZ4F_DEPRECATE(x) = LZ4F_##x
@@ -113,7 +118,8 @@ LZ4FLIB_API const char* LZ4F_getErrorName(LZ4F_errorCode_t code); /**< return
/* The larger the block size, the (slightly) better the compression ratio,
* though there are diminishing returns.
- * Larger blocks also increase memory usage on both compression and decompression sides. */
+ * Larger blocks also increase memory usage on both compression and decompression sides.
+ */
typedef enum {
LZ4F_default=0,
LZ4F_max64KB=4,
@@ -284,7 +290,7 @@ LZ4FLIB_API size_t LZ4F_compressBegin(LZ4F_cctx* cctx,
* @return is always the same for a srcSize and prefsPtr.
* prefsPtr is optional : when NULL is provided, preferences will be set to cover worst case scenario.
* tech details :
- * @return includes the possibility that internal buffer might already be filled by up to (blockSize-1) bytes.
+ * @return if automatic flushing is not enabled, includes the possibility that internal buffer might already be filled by up to (blockSize-1) bytes.
* It also includes frame footer (ending + checksum), since it might be generated by LZ4F_compressEnd().
* @return doesn't include frame header, as it was already generated by LZ4F_compressBegin().
*/
@@ -376,7 +382,7 @@ LZ4FLIB_API LZ4F_errorCode_t LZ4F_freeDecompressionContext(LZ4F_dctx* dctx);
* note : Frame header size is variable, but is guaranteed to be
* >= LZ4F_HEADER_SIZE_MIN bytes, and <= LZ4F_HEADER_SIZE_MAX bytes.
*/
-size_t LZ4F_headerSize(const void* src, size_t srcSize);
+LZ4FLIB_API size_t LZ4F_headerSize(const void* src, size_t srcSize);
/*! LZ4F_getFrameInfo() :
* This function extracts frame parameters (max blockSize, dictID, etc.).
@@ -426,8 +432,10 @@ LZ4FLIB_API size_t LZ4F_getFrameInfo(LZ4F_dctx* dctx,
const void* srcBuffer, size_t* srcSizePtr);
/*! LZ4F_decompress() :
- * Call this function repetitively to regenerate compressed data from `srcBuffer`.
- * The function will read up to *srcSizePtr bytes from srcBuffer,
+ * Call this function repetitively to regenerate data compressed in `srcBuffer`.
+ *
+ * The function requires a valid dctx state.
+ * It will read up to *srcSizePtr bytes from srcBuffer,
* and decompress data into dstBuffer, of capacity *dstSizePtr.
*
* The nb of bytes consumed from srcBuffer will be written into *srcSizePtr (necessarily <= original value).
@@ -493,9 +501,9 @@ extern "C" {
* Use at your own risk.
*/
#ifdef LZ4F_PUBLISH_STATIC_FUNCTIONS
-#define LZ4FLIB_STATIC_API LZ4FLIB_API
+# define LZ4FLIB_STATIC_API LZ4FLIB_API
#else
-#define LZ4FLIB_STATIC_API
+# define LZ4FLIB_STATIC_API
#endif
diff --git a/lib/lz4hc.c b/lib/lz4hc.c
index 5922ed7..77c9f43 100644
--- a/lib/lz4hc.c
+++ b/lib/lz4hc.c
@@ -53,7 +53,7 @@
#include "lz4hc.h"
-/*=== Common LZ4 definitions ===*/
+/*=== Common definitions ===*/
#if defined(__GNUC__)
# pragma GCC diagnostic ignored "-Wunused-function"
#endif
@@ -61,15 +61,16 @@
# pragma clang diagnostic ignored "-Wunused-function"
#endif
-/*=== Enums ===*/
-typedef enum { noDictCtx, usingDictCtxHc } dictCtx_directive;
-
-
#define LZ4_COMMONDEFS_ONLY
#ifndef LZ4_SRC_INCLUDED
#include "lz4.c" /* LZ4_count, constants, mem */
#endif
+
+/*=== Enums ===*/
+typedef enum { noDictCtx, usingDictCtxHc } dictCtx_directive;
+
+
/*=== Constants ===*/
#define OPTIMAL_ML (int)((ML_MASK-1)+MINMATCH)
#define LZ4_OPT_NUM (1<<12)
@@ -92,7 +93,7 @@ static U32 LZ4HC_hashPtr(const void* ptr) { return HASH_FUNCTION(LZ4_read32(ptr)
**************************************/
static void LZ4HC_clearTables (LZ4HC_CCtx_internal* hc4)
{
- MEM_INIT((void*)hc4->hashTable, 0, sizeof(hc4->hashTable));
+ MEM_INIT(hc4->hashTable, 0, sizeof(hc4->hashTable));
MEM_INIT(hc4->chainTable, 0xFF, sizeof(hc4->chainTable));
}
@@ -161,8 +162,7 @@ int LZ4HC_countBack(const BYTE* const ip, const BYTE* const match,
static U32 LZ4HC_rotatePattern(size_t const rotate, U32 const pattern)
{
size_t const bitsToRotate = (rotate & (sizeof(pattern) - 1)) << 3;
- if (bitsToRotate == 0)
- return pattern;
+ if (bitsToRotate == 0) return pattern;
return LZ4HC_rotl32(pattern, (int)bitsToRotate);
}
@@ -172,7 +172,8 @@ static unsigned
LZ4HC_countPattern(const BYTE* ip, const BYTE* const iEnd, U32 const pattern32)
{
const BYTE* const iStart = ip;
- reg_t const pattern = (sizeof(pattern)==8) ? (reg_t)pattern32 + (((reg_t)pattern32) << 32) : pattern32;
+ reg_t const pattern = (sizeof(pattern)==8) ?
+ (reg_t)pattern32 + (((reg_t)pattern32) << (sizeof(pattern)*4)) : pattern32;
while (likely(ip < iEnd-(sizeof(pattern)-1))) {
reg_t const diff = LZ4_read_ARCH(ip) ^ pattern;
@@ -270,7 +271,7 @@ LZ4HC_InsertAndGetWiderMatch (
DEBUGLOG(7, "First match at index %u / %u (lowestMatchIndex)",
matchIndex, lowestMatchIndex);
- while ((matchIndex>=lowestMatchIndex) && (nbAttempts)) {
+ while ((matchIndex>=lowestMatchIndex) && (nbAttempts>0)) {
int matchLength=0;
nbAttempts--;
assert(matchIndex < ipIndex);
@@ -389,8 +390,8 @@ LZ4HC_InsertAndGetWiderMatch (
if (lookBackLength==0) { /* no back possible */
size_t const maxML = MIN(currentSegmentLength, srcPatternLength);
if ((size_t)longest < maxML) {
- assert(base + matchIndex < ip);
- if (ip - (base+matchIndex) > LZ4_DISTANCE_MAX) break;
+ assert(base + matchIndex != ip);
+ if ((size_t)(ip - base) - matchIndex > LZ4_DISTANCE_MAX) break;
assert(maxML < 2 GB);
longest = (int)maxML;
*matchpos = base + matchIndex; /* virtual pos, relative to ip, to retrieve offset */
@@ -410,7 +411,7 @@ LZ4HC_InsertAndGetWiderMatch (
} /* while ((matchIndex>=lowestMatchIndex) && (nbAttempts)) */
if ( dict == usingDictCtxHc
- && nbAttempts
+ && nbAttempts > 0
&& ipIndex - lowestMatchIndex < LZ4_DISTANCE_MAX) {
size_t const dictEndOffset = (size_t)(dictCtx->end - dictCtx->base);
U32 dictMatchIndex = dictCtx->hashTable[LZ4HC_hashPtr(ip)];
@@ -460,74 +461,90 @@ int LZ4HC_InsertAndFindBestMatch(LZ4HC_CCtx_internal* const hc4, /* Index tabl
* @return : 0 if ok,
* 1 if buffer issue detected */
LZ4_FORCE_INLINE int LZ4HC_encodeSequence (
- const BYTE** ip,
- BYTE** op,
- const BYTE** anchor,
+ const BYTE** _ip,
+ BYTE** _op,
+ const BYTE** _anchor,
int matchLength,
const BYTE* const match,
limitedOutput_directive limit,
BYTE* oend)
{
+#define ip (*_ip)
+#define op (*_op)
+#define anchor (*_anchor)
+
size_t length;
- BYTE* const token = (*op)++;
+ BYTE* const token = op++;
#if defined(LZ4_DEBUG) && (LZ4_DEBUG >= 6)
static const BYTE* start = NULL;
static U32 totalCost = 0;
- U32 const pos = (start==NULL) ? 0 : (U32)(*anchor - start);
- U32 const ll = (U32)(*ip - *anchor);
+ U32 const pos = (start==NULL) ? 0 : (U32)(anchor - start);
+ U32 const ll = (U32)(ip - anchor);
U32 const llAdd = (ll>=15) ? ((ll-15) / 255) + 1 : 0;
U32 const mlAdd = (matchLength>=19) ? ((matchLength-19) / 255) + 1 : 0;
U32 const cost = 1 + llAdd + ll + 2 + mlAdd;
- if (start==NULL) start = *anchor; /* only works for single segment */
+ if (start==NULL) start = anchor; /* only works for single segment */
/* g_debuglog_enable = (pos >= 2228) & (pos <= 2262); */
- DEBUGLOG(6, "pos:%7u -- literals:%3u, match:%4i, offset:%5u, cost:%3u + %u",
+ DEBUGLOG(6, "pos:%7u -- literals:%4u, match:%4i, offset:%5u, cost:%4u + %5u",
pos,
- (U32)(*ip - *anchor), matchLength, (U32)(*ip-match),
+ (U32)(ip - anchor), matchLength, (U32)(ip-match),
cost, totalCost);
totalCost += cost;
#endif
/* Encode Literal length */
- length = (size_t)(*ip - *anchor);
- if ((limit) && ((*op + (length / 255) + length + (2 + 1 + LASTLITERALS)) > oend)) return 1; /* Check output limit */
+ length = (size_t)(ip - anchor);
+ LZ4_STATIC_ASSERT(notLimited == 0);
+ /* Check output limit */
+ if (limit && ((op + (length / 255) + length + (2 + 1 + LASTLITERALS)) > oend)) {
+ DEBUGLOG(6, "Not enough room to write %i literals (%i bytes remaining)",
+ (int)length, (int)(oend - op));
+ return 1;
+ }
if (length >= RUN_MASK) {
size_t len = length - RUN_MASK;
*token = (RUN_MASK << ML_BITS);
- for(; len >= 255 ; len -= 255) *(*op)++ = 255;
- *(*op)++ = (BYTE)len;
+ for(; len >= 255 ; len -= 255) *op++ = 255;
+ *op++ = (BYTE)len;
} else {
*token = (BYTE)(length << ML_BITS);
}
/* Copy Literals */
- LZ4_wildCopy8(*op, *anchor, (*op) + length);
- *op += length;
+ LZ4_wildCopy8(op, anchor, op + length);
+ op += length;
/* Encode Offset */
- assert( (*ip - match) <= LZ4_DISTANCE_MAX ); /* note : consider providing offset as a value, rather than as a pointer difference */
- LZ4_writeLE16(*op, (U16)(*ip-match)); *op += 2;
+ assert( (ip - match) <= LZ4_DISTANCE_MAX ); /* note : consider providing offset as a value, rather than as a pointer difference */
+ LZ4_writeLE16(op, (U16)(ip - match)); op += 2;
/* Encode MatchLength */
assert(matchLength >= MINMATCH);
length = (size_t)matchLength - MINMATCH;
- if ((limit) && (*op + (length / 255) + (1 + LASTLITERALS) > oend)) return 1; /* Check output limit */
+ if (limit && (op + (length / 255) + (1 + LASTLITERALS) > oend)) {
+ DEBUGLOG(6, "Not enough room to write match length");
+ return 1; /* Check output limit */
+ }
if (length >= ML_MASK) {
*token += ML_MASK;
length -= ML_MASK;
- for(; length >= 510 ; length -= 510) { *(*op)++ = 255; *(*op)++ = 255; }
- if (length >= 255) { length -= 255; *(*op)++ = 255; }
- *(*op)++ = (BYTE)length;
+ for(; length >= 510 ; length -= 510) { *op++ = 255; *op++ = 255; }
+ if (length >= 255) { length -= 255; *op++ = 255; }
+ *op++ = (BYTE)length;
} else {
*token += (BYTE)(length);
}
/* Prepare next loop */
- *ip += matchLength;
- *anchor = *ip;
+ ip += matchLength;
+ anchor = ip;
return 0;
}
+#undef ip
+#undef op
+#undef anchor
LZ4_FORCE_INLINE int LZ4HC_compress_hashChain (
LZ4HC_CCtx_internal* const ctx,
@@ -535,7 +552,7 @@ LZ4_FORCE_INLINE int LZ4HC_compress_hashChain (
char* const dest,
int* srcSizePtr,
int const maxOutputSize,
- unsigned maxNbAttempts,
+ int maxNbAttempts,
const limitedOutput_directive limit,
const dictCtx_directive dict
)
@@ -565,7 +582,7 @@ LZ4_FORCE_INLINE int LZ4HC_compress_hashChain (
/* init */
*srcSizePtr = 0;
if (limit == fillOutput) oend -= LASTLITERALS; /* Hack for support LZ4 format restriction */
- if (inputSize < LZ4_minLength) goto _last_literals; /* Input too small, no compression (all literals) */
+ if (inputSize < LZ4_minLength) goto _last_literals; /* Input too small, no compression (all literals) */
/* Main Loop */
while (ip <= mflimit) {
@@ -637,7 +654,11 @@ _Search3:
if (LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), ml, ref, limit, oend)) goto _dest_overflow;
ip = start2;
optr = op;
- if (LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), ml2, ref2, limit, oend)) goto _dest_overflow;
+ if (LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), ml2, ref2, limit, oend)) {
+ ml = ml2;
+ ref = ref2;
+ goto _dest_overflow;
+ }
continue;
}
@@ -709,17 +730,18 @@ _Search3:
_last_literals:
/* Encode Last Literals */
{ size_t lastRunSize = (size_t)(iend - anchor); /* literals */
- size_t litLength = (lastRunSize + 255 - RUN_MASK) / 255;
- size_t const totalSize = 1 + litLength + lastRunSize;
+ size_t llAdd = (lastRunSize + 255 - RUN_MASK) / 255;
+ size_t const totalSize = 1 + llAdd + lastRunSize;
if (limit == fillOutput) oend += LASTLITERALS; /* restore correct value */
if (limit && (op + totalSize > oend)) {
- if (limit == limitedOutput) return 0; /* Check output limit */
+ if (limit == limitedOutput) return 0;
/* adapt lastRunSize to fill 'dest' */
- lastRunSize = (size_t)(oend - op) - 1;
- litLength = (lastRunSize + 255 - RUN_MASK) / 255;
- lastRunSize -= litLength;
+ lastRunSize = (size_t)(oend - op) - 1 /*token*/;
+ llAdd = (lastRunSize + 256 - RUN_MASK) / 256;
+ lastRunSize -= llAdd;
}
- ip = anchor + lastRunSize;
+ DEBUGLOG(6, "Final literal run : %i literals", (int)lastRunSize);
+ ip = anchor + lastRunSize; /* can be != iend if limit==fillOutput */
if (lastRunSize >= RUN_MASK) {
size_t accumulator = lastRunSize - RUN_MASK;
@@ -739,9 +761,25 @@ _last_literals:
_dest_overflow:
if (limit == fillOutput) {
+ /* Assumption : ip, anchor, ml and ref must be set correctly */
+ size_t const ll = (size_t)(ip - anchor);
+ size_t const ll_addbytes = (ll + 240) / 255;
+ size_t const ll_totalCost = 1 + ll_addbytes + ll;
+ BYTE* const maxLitPos = oend - 3; /* 2 for offset, 1 for token */
+ DEBUGLOG(6, "Last sequence overflowing");
op = optr; /* restore correct out pointer */
+ if (op + ll_totalCost <= maxLitPos) {
+ /* ll validated; now adjust match length */
+ size_t const bytesLeftForMl = (size_t)(maxLitPos - (op+ll_totalCost));
+ size_t const maxMlSize = MINMATCH + (ML_MASK-1) + (bytesLeftForMl * 255);
+ assert(maxMlSize < INT_MAX); assert(ml >= 0);
+ if ((size_t)ml > maxMlSize) ml = (int)maxMlSize;
+ if ((oend + LASTLITERALS) - (op + ll_totalCost + 2) - 1 + ml >= MFLIMIT) {
+ LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), ml, ref, notLimited, oend);
+ } }
goto _last_literals;
}
+ /* compression failed */
return 0;
}
@@ -752,7 +790,7 @@ static int LZ4HC_compress_optimal( LZ4HC_CCtx_internal* ctx,
int const nbSearches, size_t sufficient_len,
const limitedOutput_directive limit, int const fullUpdate,
const dictCtx_directive dict,
- HCfavor_e favorDecSpeed);
+ const HCfavor_e favorDecSpeed);
LZ4_FORCE_INLINE int LZ4HC_compress_generic_internal (
@@ -769,7 +807,7 @@ LZ4_FORCE_INLINE int LZ4HC_compress_generic_internal (
typedef enum { lz4hc, lz4opt } lz4hc_strat_e;
typedef struct {
lz4hc_strat_e strat;
- U32 nbSearches;
+ int nbSearches;
U32 targetLength;
} cParams_t;
static const cParams_t clTable[LZ4HC_CLEVEL_MAX+1] = {
@@ -788,7 +826,8 @@ LZ4_FORCE_INLINE int LZ4HC_compress_generic_internal (
{ lz4opt,16384,LZ4_OPT_NUM }, /* 12==LZ4HC_CLEVEL_MAX */
};
- DEBUGLOG(4, "LZ4HC_compress_generic(ctx=%p, src=%p, srcSize=%d)", ctx, src, *srcSizePtr);
+ DEBUGLOG(4, "LZ4HC_compress_generic(ctx=%p, src=%p, srcSize=%d, limit=%d)",
+ ctx, src, *srcSizePtr, limit);
if (limit == fillOutput && dstCapacity < 1) return 0; /* Impossible to store anything */
if ((U32)*srcSizePtr > (U32)LZ4_MAX_INPUT_SIZE) return 0; /* Unsupported input size (too large or negative) */
@@ -808,7 +847,7 @@ LZ4_FORCE_INLINE int LZ4HC_compress_generic_internal (
assert(cParam.strat == lz4opt);
result = LZ4HC_compress_optimal(ctx,
src, dst, srcSizePtr, dstCapacity,
- (int)cParam.nbSearches, cParam.targetLength, limit,
+ cParam.nbSearches, cParam.targetLength, limit,
cLevel == LZ4HC_CLEVEL_MAX, /* ultra mode */
dict, favor);
}
@@ -881,27 +920,22 @@ LZ4HC_compress_generic (
int LZ4_sizeofStateHC(void) { return (int)sizeof(LZ4_streamHC_t); }
-#ifndef _MSC_VER /* for some reason, Visual fails the aligment test on 32-bit x86 :
- * it reports an aligment of 8-bytes,
- * while actually aligning LZ4_streamHC_t on 4 bytes. */
static size_t LZ4_streamHC_t_alignment(void)
{
- struct { char c; LZ4_streamHC_t t; } t_a;
- return sizeof(t_a) - sizeof(t_a.t);
-}
+#if LZ4_ALIGN_TEST
+ typedef struct { char c; LZ4_streamHC_t t; } t_a;
+ return sizeof(t_a) - sizeof(LZ4_streamHC_t);
+#else
+ return 1; /* effectively disabled */
#endif
+}
/* state is presumed correctly initialized,
* in which case its size and alignment have already been validate */
int LZ4_compress_HC_extStateHC_fastReset (void* state, const char* src, char* dst, int srcSize, int dstCapacity, int compressionLevel)
{
LZ4HC_CCtx_internal* const ctx = &((LZ4_streamHC_t*)state)->internal_donotuse;
-#ifndef _MSC_VER /* for some reason, Visual fails the aligment test on 32-bit x86 :
- * it reports an aligment of 8-bytes,
- * while actually aligning LZ4_streamHC_t on 4 bytes. */
- assert(((size_t)state & (LZ4_streamHC_t_alignment() - 1)) == 0); /* check alignment */
-#endif
- if (((size_t)(state)&(sizeof(void*)-1)) != 0) return 0; /* Error : state is not aligned for pointers (32 or 64 bits) */
+ if (!LZ4_isAligned(state, LZ4_streamHC_t_alignment())) return 0;
LZ4_resetStreamHC_fast((LZ4_streamHC_t*)state, compressionLevel);
LZ4HC_init_internal (ctx, (const BYTE*)src);
if (dstCapacity < LZ4_compressBound(srcSize))
@@ -950,10 +984,11 @@ int LZ4_compress_HC_destSize(void* state, const char* source, char* dest, int* s
/* allocation */
LZ4_streamHC_t* LZ4_createStreamHC(void)
{
- LZ4_streamHC_t* const LZ4_streamHCPtr = (LZ4_streamHC_t*)ALLOC(sizeof(LZ4_streamHC_t));
- if (LZ4_streamHCPtr==NULL) return NULL;
- LZ4_initStreamHC(LZ4_streamHCPtr, sizeof(*LZ4_streamHCPtr)); /* full initialization, malloc'ed buffer can be full of garbage */
- return LZ4_streamHCPtr;
+ LZ4_streamHC_t* const state =
+ (LZ4_streamHC_t*)ALLOC_AND_ZERO(sizeof(LZ4_streamHC_t));
+ if (state == NULL) return NULL;
+ LZ4_setCompressionLevel(state, LZ4HC_CLEVEL_DEFAULT);
+ return state;
}
int LZ4_freeStreamHC (LZ4_streamHC_t* LZ4_streamHCPtr)
@@ -968,22 +1003,16 @@ int LZ4_freeStreamHC (LZ4_streamHC_t* LZ4_streamHCPtr)
LZ4_streamHC_t* LZ4_initStreamHC (void* buffer, size_t size)
{
LZ4_streamHC_t* const LZ4_streamHCPtr = (LZ4_streamHC_t*)buffer;
- if (buffer == NULL) return NULL;
- if (size < sizeof(LZ4_streamHC_t)) return NULL;
-#ifndef _MSC_VER /* for some reason, Visual fails the aligment test on 32-bit x86 :
- * it reports an aligment of 8-bytes,
- * while actually aligning LZ4_streamHC_t on 4 bytes. */
- if (((size_t)buffer) & (LZ4_streamHC_t_alignment() - 1)) return NULL; /* alignment check */
-#endif
/* if compilation fails here, LZ4_STREAMHCSIZE must be increased */
LZ4_STATIC_ASSERT(sizeof(LZ4HC_CCtx_internal) <= LZ4_STREAMHCSIZE);
- DEBUGLOG(4, "LZ4_initStreamHC(%p, %u)", LZ4_streamHCPtr, (unsigned)size);
- /* end-base will trigger a clearTable on starting compression */
- LZ4_streamHCPtr->internal_donotuse.end = (const BYTE *)(ptrdiff_t)-1;
- LZ4_streamHCPtr->internal_donotuse.base = NULL;
- LZ4_streamHCPtr->internal_donotuse.dictCtx = NULL;
- LZ4_streamHCPtr->internal_donotuse.favorDecSpeed = 0;
- LZ4_streamHCPtr->internal_donotuse.dirty = 0;
+ DEBUGLOG(4, "LZ4_initStreamHC(%p, %u)", buffer, (unsigned)size);
+ /* check conditions */
+ if (buffer == NULL) return NULL;
+ if (size < sizeof(LZ4_streamHC_t)) return NULL;
+ if (!LZ4_isAligned(buffer, LZ4_streamHC_t_alignment())) return NULL;
+ /* init */
+ { LZ4HC_CCtx_internal* const hcstate = &(LZ4_streamHCPtr->internal_donotuse);
+ MEM_INIT(hcstate, 0, sizeof(*hcstate)); }
LZ4_setCompressionLevel(LZ4_streamHCPtr, LZ4HC_CLEVEL_DEFAULT);
return LZ4_streamHCPtr;
}
@@ -1028,7 +1057,7 @@ int LZ4_loadDictHC (LZ4_streamHC_t* LZ4_streamHCPtr,
const char* dictionary, int dictSize)
{
LZ4HC_CCtx_internal* const ctxPtr = &LZ4_streamHCPtr->internal_donotuse;
- DEBUGLOG(4, "LZ4_loadDictHC(%p, %p, %d)", LZ4_streamHCPtr, dictionary, dictSize);
+ DEBUGLOG(4, "LZ4_loadDictHC(ctx:%p, dict:%p, dictSize:%d)", LZ4_streamHCPtr, dictionary, dictSize);
assert(LZ4_streamHCPtr != NULL);
if (dictSize > 64 KB) {
dictionary += (size_t)dictSize - 64 KB;
@@ -1069,14 +1098,15 @@ static void LZ4HC_setExternalDict(LZ4HC_CCtx_internal* ctxPtr, const BYTE* newBl
ctxPtr->dictCtx = NULL;
}
-static int LZ4_compressHC_continue_generic (LZ4_streamHC_t* LZ4_streamHCPtr,
- const char* src, char* dst,
- int* srcSizePtr, int dstCapacity,
- limitedOutput_directive limit)
+static int
+LZ4_compressHC_continue_generic (LZ4_streamHC_t* LZ4_streamHCPtr,
+ const char* src, char* dst,
+ int* srcSizePtr, int dstCapacity,
+ limitedOutput_directive limit)
{
LZ4HC_CCtx_internal* const ctxPtr = &LZ4_streamHCPtr->internal_donotuse;
- DEBUGLOG(4, "LZ4_compressHC_continue_generic(ctx=%p, src=%p, srcSize=%d)",
- LZ4_streamHCPtr, src, *srcSizePtr);
+ DEBUGLOG(5, "LZ4_compressHC_continue_generic(ctx=%p, src=%p, srcSize=%d, limit=%d)",
+ LZ4_streamHCPtr, src, *srcSizePtr, limit);
assert(ctxPtr != NULL);
/* auto-init if forgotten */
if (ctxPtr->base == NULL) LZ4HC_init_internal (ctxPtr, (const BYTE*) src);
@@ -1100,8 +1130,7 @@ static int LZ4_compressHC_continue_generic (LZ4_streamHC_t* LZ4_streamHCPtr,
if (sourceEnd > dictEnd) sourceEnd = dictEnd;
ctxPtr->lowLimit = (U32)(sourceEnd - ctxPtr->dictBase);
if (ctxPtr->dictLimit - ctxPtr->lowLimit < 4) ctxPtr->lowLimit = ctxPtr->dictLimit;
- }
- }
+ } }
return LZ4HC_compress_generic (ctxPtr, src, dst, srcSizePtr, dstCapacity, ctxPtr->compressionLevel, limit);
}
@@ -1121,23 +1150,30 @@ int LZ4_compress_HC_continue_destSize (LZ4_streamHC_t* LZ4_streamHCPtr, const ch
-/* dictionary saving */
-
+/* LZ4_saveDictHC :
+ * save history content
+ * into a user-provided buffer
+ * which is then used to continue compression
+ */
int LZ4_saveDictHC (LZ4_streamHC_t* LZ4_streamHCPtr, char* safeBuffer, int dictSize)
{
LZ4HC_CCtx_internal* const streamPtr = &LZ4_streamHCPtr->internal_donotuse;
int const prefixSize = (int)(streamPtr->end - (streamPtr->base + streamPtr->dictLimit));
- DEBUGLOG(4, "LZ4_saveDictHC(%p, %p, %d)", LZ4_streamHCPtr, safeBuffer, dictSize);
+ DEBUGLOG(5, "LZ4_saveDictHC(%p, %p, %d)", LZ4_streamHCPtr, safeBuffer, dictSize);
+ assert(prefixSize >= 0);
if (dictSize > 64 KB) dictSize = 64 KB;
if (dictSize < 4) dictSize = 0;
if (dictSize > prefixSize) dictSize = prefixSize;
- memmove(safeBuffer, streamPtr->end - dictSize, dictSize);
+ if (safeBuffer == NULL) assert(dictSize == 0);
+ if (dictSize > 0)
+ memmove(safeBuffer, streamPtr->end - dictSize, dictSize);
{ U32 const endIndex = (U32)(streamPtr->end - streamPtr->base);
streamPtr->end = (const BYTE*)safeBuffer + dictSize;
streamPtr->base = streamPtr->end - endIndex;
streamPtr->dictLimit = endIndex - (U32)dictSize;
streamPtr->lowLimit = endIndex - (U32)dictSize;
- if (streamPtr->nextToUpdate < streamPtr->dictLimit) streamPtr->nextToUpdate = streamPtr->dictLimit;
+ if (streamPtr->nextToUpdate < streamPtr->dictLimit)
+ streamPtr->nextToUpdate = streamPtr->dictLimit;
}
return dictSize;
}
@@ -1287,8 +1323,13 @@ static int LZ4HC_compress_optimal ( LZ4HC_CCtx_internal* ctx,
const dictCtx_directive dict,
const HCfavor_e favorDecSpeed)
{
+ int retval = 0;
#define TRAILING_LITERALS 3
+#ifdef LZ4HC_HEAPMODE
+ LZ4HC_optimal_t* const opt = (LZ4HC_optimal_t*)ALLOC(sizeof(LZ4HC_optimal_t) * (LZ4_OPT_NUM + TRAILING_LITERALS));
+#else
LZ4HC_optimal_t opt[LZ4_OPT_NUM + TRAILING_LITERALS]; /* ~64 KB, which is a bit large for stack... */
+#endif
const BYTE* ip = (const BYTE*) source;
const BYTE* anchor = ip;
@@ -1298,15 +1339,19 @@ static int LZ4HC_compress_optimal ( LZ4HC_CCtx_internal* ctx,
BYTE* op = (BYTE*) dst;
BYTE* opSaved = (BYTE*) dst;
BYTE* oend = op + dstCapacity;
+ int ovml = MINMATCH; /* overflow - last sequence */
+ const BYTE* ovref = NULL;
/* init */
+#ifdef LZ4HC_HEAPMODE
+ if (opt == NULL) goto _return_label;
+#endif
DEBUGLOG(5, "LZ4HC_compress_optimal(dst=%p, dstCapa=%u)", dst, (unsigned)dstCapacity);
*srcSizePtr = 0;
if (limit == fillOutput) oend -= LASTLITERALS; /* Hack for support LZ4 format restriction */
if (sufficient_len >= LZ4_OPT_NUM) sufficient_len = LZ4_OPT_NUM-1;
/* Main Loop */
- assert(ip - anchor < LZ4_MAX_INPUT_SIZE);
while (ip <= mflimit) {
int const llen = (int)(ip - anchor);
int best_mlen, best_off;
@@ -1320,8 +1365,11 @@ static int LZ4HC_compress_optimal ( LZ4HC_CCtx_internal* ctx,
int const firstML = firstMatch.len;
const BYTE* const matchPos = ip - firstMatch.off;
opSaved = op;
- if ( LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), firstML, matchPos, limit, oend) ) /* updates ip, op and anchor */
+ if ( LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), firstML, matchPos, limit, oend) ) { /* updates ip, op and anchor */
+ ovml = firstML;
+ ovref = matchPos;
goto _dest_overflow;
+ }
continue;
}
@@ -1463,7 +1511,7 @@ static int LZ4HC_compress_optimal ( LZ4HC_CCtx_internal* ctx,
best_off = opt[last_match_pos].off;
cur = last_match_pos - best_mlen;
- encode: /* cur, last_match_pos, best_mlen, best_off must be set */
+encode: /* cur, last_match_pos, best_mlen, best_off must be set */
assert(cur < LZ4_OPT_NUM);
assert(last_match_pos >= 1); /* == 1 when only one candidate */
DEBUGLOG(6, "reverse traversal, looking for shortest path (last_match_pos=%i)", last_match_pos);
@@ -1493,25 +1541,31 @@ static int LZ4HC_compress_optimal ( LZ4HC_CCtx_internal* ctx,
assert(ml >= MINMATCH);
assert((offset >= 1) && (offset <= LZ4_DISTANCE_MAX));
opSaved = op;
- if ( LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), ml, ip - offset, limit, oend) ) /* updates ip, op and anchor */
+ if ( LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), ml, ip - offset, limit, oend) ) { /* updates ip, op and anchor */
+ ovml = ml;
+ ovref = ip - offset;
goto _dest_overflow;
- } }
+ } } }
} /* while (ip <= mflimit) */
- _last_literals:
+_last_literals:
/* Encode Last Literals */
{ size_t lastRunSize = (size_t)(iend - anchor); /* literals */
- size_t litLength = (lastRunSize + 255 - RUN_MASK) / 255;
- size_t const totalSize = 1 + litLength + lastRunSize;
+ size_t llAdd = (lastRunSize + 255 - RUN_MASK) / 255;
+ size_t const totalSize = 1 + llAdd + lastRunSize;
if (limit == fillOutput) oend += LASTLITERALS; /* restore correct value */
if (limit && (op + totalSize > oend)) {
- if (limit == limitedOutput) return 0; /* Check output limit */
+ if (limit == limitedOutput) { /* Check output limit */
+ retval = 0;
+ goto _return_label;
+ }
/* adapt lastRunSize to fill 'dst' */
- lastRunSize = (size_t)(oend - op) - 1;
- litLength = (lastRunSize + 255 - RUN_MASK) / 255;
- lastRunSize -= litLength;
+ lastRunSize = (size_t)(oend - op) - 1 /*token*/;
+ llAdd = (lastRunSize + 256 - RUN_MASK) / 256;
+ lastRunSize -= llAdd;
}
- ip = anchor + lastRunSize;
+ DEBUGLOG(6, "Final literal run : %i literals", (int)lastRunSize);
+ ip = anchor + lastRunSize; /* can be != iend if limit==fillOutput */
if (lastRunSize >= RUN_MASK) {
size_t accumulator = lastRunSize - RUN_MASK;
@@ -1527,12 +1581,35 @@ static int LZ4HC_compress_optimal ( LZ4HC_CCtx_internal* ctx,
/* End */
*srcSizePtr = (int) (((const char*)ip) - source);
- return (int) ((char*)op-dst);
+ retval = (int) ((char*)op-dst);
+ goto _return_label;
- _dest_overflow:
- if (limit == fillOutput) {
- op = opSaved; /* restore correct out pointer */
- goto _last_literals;
- }
- return 0;
- }
+_dest_overflow:
+if (limit == fillOutput) {
+ /* Assumption : ip, anchor, ovml and ovref must be set correctly */
+ size_t const ll = (size_t)(ip - anchor);
+ size_t const ll_addbytes = (ll + 240) / 255;
+ size_t const ll_totalCost = 1 + ll_addbytes + ll;
+ BYTE* const maxLitPos = oend - 3; /* 2 for offset, 1 for token */
+ DEBUGLOG(6, "Last sequence overflowing (only %i bytes remaining)", (int)(oend-1-opSaved));
+ op = opSaved; /* restore correct out pointer */
+ if (op + ll_totalCost <= maxLitPos) {
+ /* ll validated; now adjust match length */
+ size_t const bytesLeftForMl = (size_t)(maxLitPos - (op+ll_totalCost));
+ size_t const maxMlSize = MINMATCH + (ML_MASK-1) + (bytesLeftForMl * 255);
+ assert(maxMlSize < INT_MAX); assert(ovml >= 0);
+ if ((size_t)ovml > maxMlSize) ovml = (int)maxMlSize;
+ if ((oend + LASTLITERALS) - (op + ll_totalCost + 2) - 1 + ovml >= MFLIMIT) {
+ DEBUGLOG(6, "Space to end : %i + ml (%i)", (int)((oend + LASTLITERALS) - (op + ll_totalCost + 2) - 1), ovml);
+ DEBUGLOG(6, "Before : ip = %p, anchor = %p", ip, anchor);
+ LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), ovml, ovref, notLimited, oend);
+ DEBUGLOG(6, "After : ip = %p, anchor = %p", ip, anchor);
+ } }
+ goto _last_literals;
+}
+_return_label:
+#ifdef LZ4HC_HEAPMODE
+ FREEMEM(opt);
+#endif
+ return retval;
+}
diff --git a/lib/lz4hc.h b/lib/lz4hc.h
index 44e35bb..3d441fb 100644
--- a/lib/lz4hc.h
+++ b/lib/lz4hc.h
@@ -198,57 +198,32 @@ LZ4LIB_API int LZ4_saveDictHC (LZ4_streamHC_t* streamHCPtr, char* safeBuffer, in
#define LZ4HC_HASH_MASK (LZ4HC_HASHTABLESIZE - 1)
-#if defined(__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
-#include <stdint.h>
-
-typedef struct LZ4HC_CCtx_internal LZ4HC_CCtx_internal;
-struct LZ4HC_CCtx_internal
-{
- uint32_t hashTable[LZ4HC_HASHTABLESIZE];
- uint16_t chainTable[LZ4HC_MAXD];
- const uint8_t* end; /* next block here to continue on current prefix */
- const uint8_t* base; /* All index relative to this position */
- const uint8_t* dictBase; /* alternate base for extDict */
- uint32_t dictLimit; /* below that point, need extDict */
- uint32_t lowLimit; /* below that point, no more dict */
- uint32_t nextToUpdate; /* index from which to continue dictionary update */
- short compressionLevel;
- int8_t favorDecSpeed; /* favor decompression speed if this flag set,
- otherwise, favor compression ratio */
- int8_t dirty; /* stream has to be fully reset if this flag is set */
- const LZ4HC_CCtx_internal* dictCtx;
-};
-
-#else
-
typedef struct LZ4HC_CCtx_internal LZ4HC_CCtx_internal;
struct LZ4HC_CCtx_internal
{
- unsigned int hashTable[LZ4HC_HASHTABLESIZE];
- unsigned short chainTable[LZ4HC_MAXD];
- const unsigned char* end; /* next block here to continue on current prefix */
- const unsigned char* base; /* All index relative to this position */
- const unsigned char* dictBase; /* alternate base for extDict */
- unsigned int dictLimit; /* below that point, need extDict */
- unsigned int lowLimit; /* below that point, no more dict */
- unsigned int nextToUpdate; /* index from which to continue dictionary update */
- short compressionLevel;
- char favorDecSpeed; /* favor decompression speed if this flag set,
- otherwise, favor compression ratio */
- char dirty; /* stream has to be fully reset if this flag is set */
+ LZ4_u32 hashTable[LZ4HC_HASHTABLESIZE];
+ LZ4_u16 chainTable[LZ4HC_MAXD];
+ const LZ4_byte* end; /* next block here to continue on current prefix */
+ const LZ4_byte* base; /* All index relative to this position */
+ const LZ4_byte* dictBase; /* alternate base for extDict */
+ LZ4_u32 dictLimit; /* below that point, need extDict */
+ LZ4_u32 lowLimit; /* below that point, no more dict */
+ LZ4_u32 nextToUpdate; /* index from which to continue dictionary update */
+ short compressionLevel;
+ LZ4_i8 favorDecSpeed; /* favor decompression speed if this flag set,
+ otherwise, favor compression ratio */
+ LZ4_i8 dirty; /* stream has to be fully reset if this flag is set */
const LZ4HC_CCtx_internal* dictCtx;
};
-#endif
-
/* Do not use these definitions directly !
* Declare or allocate an LZ4_streamHC_t instead.
*/
-#define LZ4_STREAMHCSIZE (4*LZ4HC_HASHTABLESIZE + 2*LZ4HC_MAXD + 56 + ((sizeof(void*)==16) ? 56 : 0) /* AS400*/ ) /* 262200 or 262256*/
-#define LZ4_STREAMHCSIZE_SIZET (LZ4_STREAMHCSIZE / sizeof(size_t))
+#define LZ4_STREAMHCSIZE 262200 /* static size, for inter-version compatibility */
+#define LZ4_STREAMHCSIZE_VOIDP (LZ4_STREAMHCSIZE / sizeof(void*))
union LZ4_streamHC_u {
- size_t table[LZ4_STREAMHCSIZE_SIZET];
+ void* table[LZ4_STREAMHCSIZE_VOIDP];
LZ4HC_CCtx_internal internal_donotuse;
}; /* previously typedef'd to LZ4_streamHC_t */