Cleanup: Remove the now unused lzma and lzo libraries from extern

LZO and LZMA has been unused in Blender since #141461

Pull Request: https://projects.blender.org/blender/blender/pulls/146842
This commit is contained in:
Sebastian Parborg
2025-09-26 15:55:47 +02:00
committed by Sebastian Parborg
parent 9aade6bd61
commit fcc1502146
42 changed files with 0 additions and 22877 deletions

View File

@@ -521,11 +521,6 @@ else()
endif()
# Compression
option(WITH_LZO "Enable fast LZO compression (used for pointcache)" ON)
option(WITH_LZMA "Enable best LZMA compression, (used for pointcache)" ON)
if(UNIX AND NOT APPLE)
option(WITH_SYSTEM_LZO "Use the system LZO library" OFF)
endif()
option(WITH_DRACO "Enable Draco mesh compression Python module (used for glTF)" ON)
# Camera/motion tracking
@@ -2886,10 +2881,6 @@ if(FIRST_RUN)
info_cfg_option(WITH_SDL)
info_cfg_option(WITH_WASAPI)
info_cfg_text("Compression:")
info_cfg_option(WITH_LZMA)
info_cfg_option(WITH_LZO)
if(WITH_PYTHON)
info_cfg_text("Python:")
info_cfg_option(WITH_PYTHON_INSTALL)

View File

@@ -34,8 +34,6 @@ set(WITH_INPUT_IME ON CACHE BOOL "" FORCE)
set(WITH_INTERNATIONAL ON CACHE BOOL "" FORCE)
set(WITH_LIBMV ON CACHE BOOL "" FORCE)
set(WITH_LIBMV_SCHUR_SPECIALIZATIONS ON CACHE BOOL "" FORCE)
set(WITH_LZMA ON CACHE BOOL "" FORCE)
set(WITH_LZO ON CACHE BOOL "" FORCE)
set(WITH_MOD_FLUID ON CACHE BOOL "" FORCE)
set(WITH_MOD_OCEANSIM ON CACHE BOOL "" FORCE)
set(WITH_MOD_REMESH ON CACHE BOOL "" FORCE)

View File

@@ -41,8 +41,6 @@ set(WITH_IO_FBX OFF CACHE BOOL "" FORCE)
set(WITH_JACK OFF CACHE BOOL "" FORCE)
set(WITH_LIBMV OFF CACHE BOOL "" FORCE)
set(WITH_LLVM OFF CACHE BOOL "" FORCE)
set(WITH_LZMA OFF CACHE BOOL "" FORCE)
set(WITH_LZO OFF CACHE BOOL "" FORCE)
set(WITH_MANIFOLD OFF CACHE BOOL "" FORCE)
set(WITH_MOD_FLUID OFF CACHE BOOL "" FORCE)
set(WITH_MOD_OCEANSIM OFF CACHE BOOL "" FORCE)

View File

@@ -38,8 +38,6 @@ set(WITH_INPUT_IME ON CACHE BOOL "" FORCE)
set(WITH_INTERNATIONAL ON CACHE BOOL "" FORCE)
set(WITH_LIBMV ON CACHE BOOL "" FORCE)
set(WITH_LIBMV_SCHUR_SPECIALIZATIONS ON CACHE BOOL "" FORCE)
set(WITH_LZMA ON CACHE BOOL "" FORCE)
set(WITH_LZO ON CACHE BOOL "" FORCE)
set(WITH_MOD_FLUID ON CACHE BOOL "" FORCE)
set(WITH_MOD_OCEANSIM ON CACHE BOOL "" FORCE)
set(WITH_MOD_REMESH ON CACHE BOOL "" FORCE)

View File

@@ -667,13 +667,6 @@ if(WITH_SYSTEM_FREETYPE)
set(BROTLI_LIBRARIES "")
endif()
if(WITH_LZO AND WITH_SYSTEM_LZO)
find_package_wrapper(LZO)
if(NOT LZO_FOUND)
message(FATAL_ERROR "Failed finding system LZO version!")
endif()
endif()
if(WITH_SYSTEM_EIGEN3)
find_package_wrapper(Eigen3)
if(NOT EIGEN3_FOUND)

View File

@@ -61,14 +61,6 @@ if(WITH_BINRELOC)
add_subdirectory(binreloc)
endif()
if(WITH_LZO AND NOT WITH_SYSTEM_LZO)
add_subdirectory(lzo)
endif()
if(WITH_LZMA)
add_subdirectory(lzma)
endif()
if(WITH_CYCLES OR WITH_OPENSUBDIV)
if((WITH_CYCLES_DEVICE_CUDA OR WITH_CYCLES_DEVICE_OPTIX) AND WITH_CUDA_DYNLOAD)
add_subdirectory(cuew)

597
extern/lzma/7zTypes.h vendored
View File

@@ -1,597 +0,0 @@
/* 7zTypes.h -- Basic types
2023-04-02 : Igor Pavlov : Public domain */
#ifndef ZIP7_7Z_TYPES_H
#define ZIP7_7Z_TYPES_H
#ifdef _WIN32
/* #include <windows.h> */
#else
#include <errno.h>
#endif
#include <stddef.h>
#ifndef EXTERN_C_BEGIN
#ifdef __cplusplus
#define EXTERN_C_BEGIN extern "C" {
#define EXTERN_C_END }
#else
#define EXTERN_C_BEGIN
#define EXTERN_C_END
#endif
#endif
EXTERN_C_BEGIN
#define SZ_OK 0
#define SZ_ERROR_DATA 1
#define SZ_ERROR_MEM 2
#define SZ_ERROR_CRC 3
#define SZ_ERROR_UNSUPPORTED 4
#define SZ_ERROR_PARAM 5
#define SZ_ERROR_INPUT_EOF 6
#define SZ_ERROR_OUTPUT_EOF 7
#define SZ_ERROR_READ 8
#define SZ_ERROR_WRITE 9
#define SZ_ERROR_PROGRESS 10
#define SZ_ERROR_FAIL 11
#define SZ_ERROR_THREAD 12
#define SZ_ERROR_ARCHIVE 16
#define SZ_ERROR_NO_ARCHIVE 17
typedef int SRes;
#ifdef _MSC_VER
#if _MSC_VER > 1200
#define MY_ALIGN(n) __declspec(align(n))
#else
#define MY_ALIGN(n)
#endif
#else
/*
// C11/C++11:
#include <stdalign.h>
#define MY_ALIGN(n) alignas(n)
*/
#define MY_ALIGN(n) __attribute__ ((aligned(n)))
#endif
#ifdef _WIN32
/* typedef DWORD WRes; */
typedef unsigned WRes;
#define MY_SRes_HRESULT_FROM_WRes(x) HRESULT_FROM_WIN32(x)
// #define MY_HRES_ERROR_INTERNAL_ERROR MY_SRes_HRESULT_FROM_WRes(ERROR_INTERNAL_ERROR)
#else // _WIN32
// #define ENV_HAVE_LSTAT
typedef int WRes;
// (FACILITY_ERRNO = 0x800) is 7zip's FACILITY constant to represent (errno) errors in HRESULT
#define MY_FACILITY_ERRNO 0x800
#define MY_FACILITY_WIN32 7
#define MY_FACILITY_WRes MY_FACILITY_ERRNO
#define MY_HRESULT_FROM_errno_CONST_ERROR(x) ((HRESULT)( \
( (HRESULT)(x) & 0x0000FFFF) \
| (MY_FACILITY_WRes << 16) \
| (HRESULT)0x80000000 ))
#define MY_SRes_HRESULT_FROM_WRes(x) \
((HRESULT)(x) <= 0 ? ((HRESULT)(x)) : MY_HRESULT_FROM_errno_CONST_ERROR(x))
// we call macro HRESULT_FROM_WIN32 for system errors (WRes) that are (errno)
#define HRESULT_FROM_WIN32(x) MY_SRes_HRESULT_FROM_WRes(x)
/*
#define ERROR_FILE_NOT_FOUND 2L
#define ERROR_ACCESS_DENIED 5L
#define ERROR_NO_MORE_FILES 18L
#define ERROR_LOCK_VIOLATION 33L
#define ERROR_FILE_EXISTS 80L
#define ERROR_DISK_FULL 112L
#define ERROR_NEGATIVE_SEEK 131L
#define ERROR_ALREADY_EXISTS 183L
#define ERROR_DIRECTORY 267L
#define ERROR_TOO_MANY_POSTS 298L
#define ERROR_INTERNAL_ERROR 1359L
#define ERROR_INVALID_REPARSE_DATA 4392L
#define ERROR_REPARSE_TAG_INVALID 4393L
#define ERROR_REPARSE_TAG_MISMATCH 4394L
*/
// we use errno equivalents for some WIN32 errors:
#define ERROR_INVALID_PARAMETER EINVAL
#define ERROR_INVALID_FUNCTION EINVAL
#define ERROR_ALREADY_EXISTS EEXIST
#define ERROR_FILE_EXISTS EEXIST
#define ERROR_PATH_NOT_FOUND ENOENT
#define ERROR_FILE_NOT_FOUND ENOENT
#define ERROR_DISK_FULL ENOSPC
// #define ERROR_INVALID_HANDLE EBADF
// we use FACILITY_WIN32 for errors that has no errno equivalent
// Too many posts were made to a semaphore.
#define ERROR_TOO_MANY_POSTS ((HRESULT)0x8007012AL)
#define ERROR_INVALID_REPARSE_DATA ((HRESULT)0x80071128L)
#define ERROR_REPARSE_TAG_INVALID ((HRESULT)0x80071129L)
// if (MY_FACILITY_WRes != FACILITY_WIN32),
// we use FACILITY_WIN32 for COM errors:
#define E_OUTOFMEMORY ((HRESULT)0x8007000EL)
#define E_INVALIDARG ((HRESULT)0x80070057L)
#define MY_E_ERROR_NEGATIVE_SEEK ((HRESULT)0x80070083L)
/*
// we can use FACILITY_ERRNO for some COM errors, that have errno equivalents:
#define E_OUTOFMEMORY MY_HRESULT_FROM_errno_CONST_ERROR(ENOMEM)
#define E_INVALIDARG MY_HRESULT_FROM_errno_CONST_ERROR(EINVAL)
#define MY_E_ERROR_NEGATIVE_SEEK MY_HRESULT_FROM_errno_CONST_ERROR(EINVAL)
*/
#define TEXT(quote) quote
#define FILE_ATTRIBUTE_READONLY 0x0001
#define FILE_ATTRIBUTE_HIDDEN 0x0002
#define FILE_ATTRIBUTE_SYSTEM 0x0004
#define FILE_ATTRIBUTE_DIRECTORY 0x0010
#define FILE_ATTRIBUTE_ARCHIVE 0x0020
#define FILE_ATTRIBUTE_DEVICE 0x0040
#define FILE_ATTRIBUTE_NORMAL 0x0080
#define FILE_ATTRIBUTE_TEMPORARY 0x0100
#define FILE_ATTRIBUTE_SPARSE_FILE 0x0200
#define FILE_ATTRIBUTE_REPARSE_POINT 0x0400
#define FILE_ATTRIBUTE_COMPRESSED 0x0800
#define FILE_ATTRIBUTE_OFFLINE 0x1000
#define FILE_ATTRIBUTE_NOT_CONTENT_INDEXED 0x2000
#define FILE_ATTRIBUTE_ENCRYPTED 0x4000
#define FILE_ATTRIBUTE_UNIX_EXTENSION 0x8000 /* trick for Unix */
#endif
#ifndef RINOK
#define RINOK(x) { const int _result_ = (x); if (_result_ != 0) return _result_; }
#endif
#ifndef RINOK_WRes
#define RINOK_WRes(x) { const WRes _result_ = (x); if (_result_ != 0) return _result_; }
#endif
typedef unsigned char Byte;
typedef short Int16;
typedef unsigned short UInt16;
#ifdef Z7_DECL_Int32_AS_long
typedef long Int32;
typedef unsigned long UInt32;
#else
typedef int Int32;
typedef unsigned int UInt32;
#endif
#ifndef _WIN32
typedef int INT;
typedef Int32 INT32;
typedef unsigned int UINT;
typedef UInt32 UINT32;
typedef INT32 LONG; // LONG, ULONG and DWORD must be 32-bit for _WIN32 compatibility
typedef UINT32 ULONG;
#undef DWORD
typedef UINT32 DWORD;
#define VOID void
#define HRESULT LONG
typedef void *LPVOID;
// typedef void VOID;
// typedef ULONG_PTR DWORD_PTR, *PDWORD_PTR;
// gcc / clang on Unix : sizeof(long==sizeof(void*) in 32 or 64 bits)
typedef long INT_PTR;
typedef unsigned long UINT_PTR;
typedef long LONG_PTR;
typedef unsigned long DWORD_PTR;
typedef size_t SIZE_T;
#endif // _WIN32
#define MY_HRES_ERROR_INTERNAL_ERROR ((HRESULT)0x8007054FL)
#ifdef Z7_DECL_Int64_AS_long
typedef long Int64;
typedef unsigned long UInt64;
#else
#if (defined(_MSC_VER) || defined(__BORLANDC__)) && !defined(__clang__)
typedef __int64 Int64;
typedef unsigned __int64 UInt64;
#else
#if defined(__clang__) || defined(__GNUC__)
#include <stdint.h>
typedef int64_t Int64;
typedef uint64_t UInt64;
#else
typedef long long int Int64;
typedef unsigned long long int UInt64;
// #define UINT64_CONST(n) n ## ULL
#endif
#endif
#endif
#define UINT64_CONST(n) n
#ifdef Z7_DECL_SizeT_AS_unsigned_int
typedef unsigned int SizeT;
#else
typedef size_t SizeT;
#endif
/*
#if (defined(_MSC_VER) && _MSC_VER <= 1200)
typedef size_t MY_uintptr_t;
#else
#include <stdint.h>
typedef uintptr_t MY_uintptr_t;
#endif
*/
typedef int BoolInt;
/* typedef BoolInt Bool; */
#define True 1
#define False 0
#ifdef _WIN32
#define Z7_STDCALL __stdcall
#else
#define Z7_STDCALL
#endif
#ifdef _MSC_VER
#if _MSC_VER >= 1300
#define Z7_NO_INLINE __declspec(noinline)
#else
#define Z7_NO_INLINE
#endif
#define Z7_FORCE_INLINE __forceinline
#define Z7_CDECL __cdecl
#define Z7_FASTCALL __fastcall
#else // _MSC_VER
#if (defined(__GNUC__) && (__GNUC__ >= 4)) \
|| (defined(__clang__) && (__clang_major__ >= 4)) \
|| defined(__INTEL_COMPILER) \
|| defined(__xlC__)
#define Z7_NO_INLINE __attribute__((noinline))
#define Z7_FORCE_INLINE __attribute__((always_inline)) inline
#else
#define Z7_NO_INLINE
#define Z7_FORCE_INLINE
#endif
#define Z7_CDECL
#if defined(_M_IX86) \
|| defined(__i386__)
// #define Z7_FASTCALL __attribute__((fastcall))
// #define Z7_FASTCALL __attribute__((cdecl))
#define Z7_FASTCALL
#elif defined(MY_CPU_AMD64)
// #define Z7_FASTCALL __attribute__((ms_abi))
#define Z7_FASTCALL
#else
#define Z7_FASTCALL
#endif
#endif // _MSC_VER
/* The following interfaces use first parameter as pointer to structure */
// #define Z7_C_IFACE_CONST_QUAL
#define Z7_C_IFACE_CONST_QUAL const
#define Z7_C_IFACE_DECL(a) \
struct a ## _; \
typedef Z7_C_IFACE_CONST_QUAL struct a ## _ * a ## Ptr; \
typedef struct a ## _ a; \
struct a ## _
Z7_C_IFACE_DECL (IByteIn)
{
Byte (*Read)(IByteInPtr p); /* reads one byte, returns 0 in case of EOF or error */
};
#define IByteIn_Read(p) (p)->Read(p)
Z7_C_IFACE_DECL (IByteOut)
{
void (*Write)(IByteOutPtr p, Byte b);
};
#define IByteOut_Write(p, b) (p)->Write(p, b)
Z7_C_IFACE_DECL (ISeqInStream)
{
SRes (*Read)(ISeqInStreamPtr p, void *buf, size_t *size);
/* if (input(*size) != 0 && output(*size) == 0) means end_of_stream.
(output(*size) < input(*size)) is allowed */
};
#define ISeqInStream_Read(p, buf, size) (p)->Read(p, buf, size)
/* try to read as much as avail in stream and limited by (*processedSize) */
SRes SeqInStream_ReadMax(ISeqInStreamPtr stream, void *buf, size_t *processedSize);
/* it can return SZ_ERROR_INPUT_EOF */
// SRes SeqInStream_Read(ISeqInStreamPtr stream, void *buf, size_t size);
// SRes SeqInStream_Read2(ISeqInStreamPtr stream, void *buf, size_t size, SRes errorType);
SRes SeqInStream_ReadByte(ISeqInStreamPtr stream, Byte *buf);
Z7_C_IFACE_DECL (ISeqOutStream)
{
size_t (*Write)(ISeqOutStreamPtr p, const void *buf, size_t size);
/* Returns: result - the number of actually written bytes.
(result < size) means error */
};
#define ISeqOutStream_Write(p, buf, size) (p)->Write(p, buf, size)
typedef enum
{
SZ_SEEK_SET = 0,
SZ_SEEK_CUR = 1,
SZ_SEEK_END = 2
} ESzSeek;
Z7_C_IFACE_DECL (ISeekInStream)
{
SRes (*Read)(ISeekInStreamPtr p, void *buf, size_t *size); /* same as ISeqInStream::Read */
SRes (*Seek)(ISeekInStreamPtr p, Int64 *pos, ESzSeek origin);
};
#define ISeekInStream_Read(p, buf, size) (p)->Read(p, buf, size)
#define ISeekInStream_Seek(p, pos, origin) (p)->Seek(p, pos, origin)
Z7_C_IFACE_DECL (ILookInStream)
{
SRes (*Look)(ILookInStreamPtr p, const void **buf, size_t *size);
/* if (input(*size) != 0 && output(*size) == 0) means end_of_stream.
(output(*size) > input(*size)) is not allowed
(output(*size) < input(*size)) is allowed */
SRes (*Skip)(ILookInStreamPtr p, size_t offset);
/* offset must be <= output(*size) of Look */
SRes (*Read)(ILookInStreamPtr p, void *buf, size_t *size);
/* reads directly (without buffer). It's same as ISeqInStream::Read */
SRes (*Seek)(ILookInStreamPtr p, Int64 *pos, ESzSeek origin);
};
#define ILookInStream_Look(p, buf, size) (p)->Look(p, buf, size)
#define ILookInStream_Skip(p, offset) (p)->Skip(p, offset)
#define ILookInStream_Read(p, buf, size) (p)->Read(p, buf, size)
#define ILookInStream_Seek(p, pos, origin) (p)->Seek(p, pos, origin)
SRes LookInStream_LookRead(ILookInStreamPtr stream, void *buf, size_t *size);
SRes LookInStream_SeekTo(ILookInStreamPtr stream, UInt64 offset);
/* reads via ILookInStream::Read */
SRes LookInStream_Read2(ILookInStreamPtr stream, void *buf, size_t size, SRes errorType);
SRes LookInStream_Read(ILookInStreamPtr stream, void *buf, size_t size);
typedef struct
{
ILookInStream vt;
ISeekInStreamPtr realStream;
size_t pos;
size_t size; /* it's data size */
/* the following variables must be set outside */
Byte *buf;
size_t bufSize;
} CLookToRead2;
void LookToRead2_CreateVTable(CLookToRead2 *p, int lookahead);
#define LookToRead2_INIT(p) { (p)->pos = (p)->size = 0; }
typedef struct
{
ISeqInStream vt;
ILookInStreamPtr realStream;
} CSecToLook;
void SecToLook_CreateVTable(CSecToLook *p);
typedef struct
{
ISeqInStream vt;
ILookInStreamPtr realStream;
} CSecToRead;
void SecToRead_CreateVTable(CSecToRead *p);
Z7_C_IFACE_DECL (ICompressProgress)
{
SRes (*Progress)(ICompressProgressPtr p, UInt64 inSize, UInt64 outSize);
/* Returns: result. (result != SZ_OK) means break.
Value (UInt64)(Int64)-1 for size means unknown value. */
};
#define ICompressProgress_Progress(p, inSize, outSize) (p)->Progress(p, inSize, outSize)
typedef struct ISzAlloc ISzAlloc;
typedef const ISzAlloc * ISzAllocPtr;
struct ISzAlloc
{
void *(*Alloc)(ISzAllocPtr p, size_t size);
void (*Free)(ISzAllocPtr p, void *address); /* address can be 0 */
};
#define ISzAlloc_Alloc(p, size) (p)->Alloc(p, size)
#define ISzAlloc_Free(p, a) (p)->Free(p, a)
/* deprecated */
#define IAlloc_Alloc(p, size) ISzAlloc_Alloc(p, size)
#define IAlloc_Free(p, a) ISzAlloc_Free(p, a)
#ifndef MY_offsetof
#ifdef offsetof
#define MY_offsetof(type, m) offsetof(type, m)
/*
#define MY_offsetof(type, m) FIELD_OFFSET(type, m)
*/
#else
#define MY_offsetof(type, m) ((size_t)&(((type *)0)->m))
#endif
#endif
#ifndef Z7_container_of
/*
#define Z7_container_of(ptr, type, m) container_of(ptr, type, m)
#define Z7_container_of(ptr, type, m) CONTAINING_RECORD(ptr, type, m)
#define Z7_container_of(ptr, type, m) ((type *)((char *)(ptr) - offsetof(type, m)))
#define Z7_container_of(ptr, type, m) (&((type *)0)->m == (ptr), ((type *)(((char *)(ptr)) - MY_offsetof(type, m))))
*/
/*
GCC shows warning: "perhaps the 'offsetof' macro was used incorrectly"
GCC 3.4.4 : classes with constructor
GCC 4.8.1 : classes with non-public variable members"
*/
#define Z7_container_of(ptr, type, m) \
((type *)(void *)((char *)(void *) \
(1 ? (ptr) : &((type *)NULL)->m) - MY_offsetof(type, m)))
#define Z7_container_of_CONST(ptr, type, m) \
((const type *)(const void *)((const char *)(const void *) \
(1 ? (ptr) : &((type *)NULL)->m) - MY_offsetof(type, m)))
/*
#define Z7_container_of_NON_CONST_FROM_CONST(ptr, type, m) \
((type *)(void *)(const void *)((const char *)(const void *) \
(1 ? (ptr) : &((type *)NULL)->m) - MY_offsetof(type, m)))
*/
#endif
#define Z7_CONTAINER_FROM_VTBL_SIMPLE(ptr, type, m) ((type *)(void *)(ptr))
// #define Z7_CONTAINER_FROM_VTBL(ptr, type, m) Z7_CONTAINER_FROM_VTBL_SIMPLE(ptr, type, m)
#define Z7_CONTAINER_FROM_VTBL(ptr, type, m) Z7_container_of(ptr, type, m)
// #define Z7_CONTAINER_FROM_VTBL(ptr, type, m) Z7_container_of_NON_CONST_FROM_CONST(ptr, type, m)
#define Z7_CONTAINER_FROM_VTBL_CONST(ptr, type, m) Z7_container_of_CONST(ptr, type, m)
#define Z7_CONTAINER_FROM_VTBL_CLS(ptr, type, m) Z7_CONTAINER_FROM_VTBL_SIMPLE(ptr, type, m)
/*
#define Z7_CONTAINER_FROM_VTBL_CLS(ptr, type, m) Z7_CONTAINER_FROM_VTBL(ptr, type, m)
*/
#if defined (__clang__) || defined(__GNUC__)
#define Z7_DIAGNOSCTIC_IGNORE_BEGIN_CAST_QUAL \
_Pragma("GCC diagnostic push") \
_Pragma("GCC diagnostic ignored \"-Wcast-qual\"")
#define Z7_DIAGNOSCTIC_IGNORE_END_CAST_QUAL \
_Pragma("GCC diagnostic pop")
#else
#define Z7_DIAGNOSCTIC_IGNORE_BEGIN_CAST_QUAL
#define Z7_DIAGNOSCTIC_IGNORE_END_CAST_QUAL
#endif
#define Z7_CONTAINER_FROM_VTBL_TO_DECL_VAR(ptr, type, m, p) \
Z7_DIAGNOSCTIC_IGNORE_BEGIN_CAST_QUAL \
type *p = Z7_CONTAINER_FROM_VTBL(ptr, type, m); \
Z7_DIAGNOSCTIC_IGNORE_END_CAST_QUAL
#define Z7_CONTAINER_FROM_VTBL_TO_DECL_VAR_pp_vt_p(type) \
Z7_CONTAINER_FROM_VTBL_TO_DECL_VAR(pp, type, vt, p)
// #define ZIP7_DECLARE_HANDLE(name) typedef void *name;
#define Z7_DECLARE_HANDLE(name) struct name##_dummy{int unused;}; typedef struct name##_dummy *name;
#define Z7_memset_0_ARRAY(a) memset((a), 0, sizeof(a))
#ifndef Z7_ARRAY_SIZE
#define Z7_ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0]))
#endif
#ifdef _WIN32
#define CHAR_PATH_SEPARATOR '\\'
#define WCHAR_PATH_SEPARATOR L'\\'
#define STRING_PATH_SEPARATOR "\\"
#define WSTRING_PATH_SEPARATOR L"\\"
#else
#define CHAR_PATH_SEPARATOR '/'
#define WCHAR_PATH_SEPARATOR L'/'
#define STRING_PATH_SEPARATOR "/"
#define WSTRING_PATH_SEPARATOR L"/"
#endif
#define k_PropVar_TimePrec_0 0
#define k_PropVar_TimePrec_Unix 1
#define k_PropVar_TimePrec_DOS 2
#define k_PropVar_TimePrec_HighPrec 3
#define k_PropVar_TimePrec_Base 16
#define k_PropVar_TimePrec_100ns (k_PropVar_TimePrec_Base + 7)
#define k_PropVar_TimePrec_1ns (k_PropVar_TimePrec_Base + 9)
EXTERN_C_END
#endif
/*
#ifndef Z7_ST
#ifdef _7ZIP_ST
#define Z7_ST
#endif
#endif
*/

View File

@@ -1,101 +0,0 @@
/* 7zWindows.h -- StdAfx
2023-04-02 : Igor Pavlov : Public domain */
#ifndef ZIP7_INC_7Z_WINDOWS_H
#define ZIP7_INC_7Z_WINDOWS_H
#ifdef _WIN32
#if defined(__clang__)
# pragma clang diagnostic push
#endif
#if defined(_MSC_VER)
#pragma warning(push)
#pragma warning(disable : 4668) // '_WIN32_WINNT' is not defined as a preprocessor macro, replacing with '0' for '#if/#elif'
#if _MSC_VER == 1900
// for old kit10 versions
// #pragma warning(disable : 4255) // winuser.h(13979): warning C4255: 'GetThreadDpiAwarenessContext':
#endif
// win10 Windows Kit:
#endif // _MSC_VER
#if defined(_MSC_VER) && _MSC_VER <= 1200 && !defined(_WIN64)
// for msvc6 without sdk2003
#define RPC_NO_WINDOWS_H
#endif
#if defined(__MINGW32__) || defined(__MINGW64__)
// #if defined(__GNUC__) && !defined(__clang__)
#include <windows.h>
#else
#include <Windows.h>
#endif
// #include <basetsd.h>
// #include <wtypes.h>
// but if precompiled with clang-cl then we need
// #include <windows.h>
#if defined(_MSC_VER)
#pragma warning(pop)
#endif
#if defined(__clang__)
# pragma clang diagnostic pop
#endif
#if defined(_MSC_VER) && _MSC_VER <= 1200 && !defined(_WIN64)
#ifndef _W64
typedef long LONG_PTR, *PLONG_PTR;
typedef unsigned long ULONG_PTR, *PULONG_PTR;
typedef ULONG_PTR DWORD_PTR, *PDWORD_PTR;
#define Z7_OLD_WIN_SDK
#endif // _W64
#endif // _MSC_VER == 1200
#ifdef Z7_OLD_WIN_SDK
#ifndef INVALID_FILE_ATTRIBUTES
#define INVALID_FILE_ATTRIBUTES ((DWORD)-1)
#endif
#ifndef INVALID_SET_FILE_POINTER
#define INVALID_SET_FILE_POINTER ((DWORD)-1)
#endif
#ifndef FILE_SPECIAL_ACCESS
#define FILE_SPECIAL_ACCESS (FILE_ANY_ACCESS)
#endif
// ShlObj.h:
// #define BIF_NEWDIALOGSTYLE 0x0040
#pragma warning(disable : 4201)
// #pragma warning(disable : 4115)
#undef VARIANT_TRUE
#define VARIANT_TRUE ((VARIANT_BOOL)-1)
#endif
#endif // Z7_OLD_WIN_SDK
#ifdef UNDER_CE
#undef VARIANT_TRUE
#define VARIANT_TRUE ((VARIANT_BOOL)-1)
#endif
#if defined(_MSC_VER)
#if _MSC_VER >= 1400 && _MSC_VER <= 1600
// BaseTsd.h(148) : 'HandleToULong' : unreferenced inline function has been removed
// string.h
// #pragma warning(disable : 4514)
#endif
#endif
/* #include "7zTypes.h" */
#endif

535
extern/lzma/Alloc.c vendored
View File

@@ -1,535 +0,0 @@
/* Alloc.c -- Memory allocation functions
2023-04-02 : Igor Pavlov : Public domain */
#include "Precomp.h"
#ifdef _WIN32
#include "7zWindows.h"
#endif
#include <stdlib.h>
#include "Alloc.h"
#ifdef _WIN32
#ifdef Z7_LARGE_PAGES
#if defined(__clang__) || defined(__GNUC__)
typedef void (*Z7_voidFunction)(void);
#define MY_CAST_FUNC (Z7_voidFunction)
#elif defined(_MSC_VER) && _MSC_VER > 1920
#define MY_CAST_FUNC (void *)
// #pragma warning(disable : 4191) // 'type cast': unsafe conversion from 'FARPROC' to 'void (__cdecl *)()'
#else
#define MY_CAST_FUNC
#endif
#endif // Z7_LARGE_PAGES
#endif // _WIN32
// #define SZ_ALLOC_DEBUG
/* #define SZ_ALLOC_DEBUG */
/* use SZ_ALLOC_DEBUG to debug alloc/free operations */
#ifdef SZ_ALLOC_DEBUG
#include <string.h>
#include <stdio.h>
static int g_allocCount = 0;
#ifdef _WIN32
static int g_allocCountMid = 0;
static int g_allocCountBig = 0;
#endif
#define CONVERT_INT_TO_STR(charType, tempSize) \
char temp[tempSize]; unsigned i = 0; \
while (val >= 10) { temp[i++] = (char)('0' + (unsigned)(val % 10)); val /= 10; } \
*s++ = (charType)('0' + (unsigned)val); \
while (i != 0) { i--; *s++ = temp[i]; } \
*s = 0;
static void ConvertUInt64ToString(UInt64 val, char *s)
{
CONVERT_INT_TO_STR(char, 24)
}
#define GET_HEX_CHAR(t) ((char)(((t < 10) ? ('0' + t) : ('A' + (t - 10)))))
static void ConvertUInt64ToHex(UInt64 val, char *s)
{
UInt64 v = val;
unsigned i;
for (i = 1;; i++)
{
v >>= 4;
if (v == 0)
break;
}
s[i] = 0;
do
{
unsigned t = (unsigned)(val & 0xF);
val >>= 4;
s[--i] = GET_HEX_CHAR(t);
}
while (i);
}
#define DEBUG_OUT_STREAM stderr
static void Print(const char *s)
{
fputs(s, DEBUG_OUT_STREAM);
}
static void PrintAligned(const char *s, size_t align)
{
size_t len = strlen(s);
for(;;)
{
fputc(' ', DEBUG_OUT_STREAM);
if (len >= align)
break;
++len;
}
Print(s);
}
static void PrintLn(void)
{
Print("\n");
}
static void PrintHex(UInt64 v, size_t align)
{
char s[32];
ConvertUInt64ToHex(v, s);
PrintAligned(s, align);
}
static void PrintDec(int v, size_t align)
{
char s[32];
ConvertUInt64ToString((unsigned)v, s);
PrintAligned(s, align);
}
static void PrintAddr(void *p)
{
PrintHex((UInt64)(size_t)(ptrdiff_t)p, 12);
}
#define PRINT_REALLOC(name, cnt, size, ptr) { \
Print(name " "); \
if (!ptr) PrintDec(cnt++, 10); \
PrintHex(size, 10); \
PrintAddr(ptr); \
PrintLn(); }
#define PRINT_ALLOC(name, cnt, size, ptr) { \
Print(name " "); \
PrintDec(cnt++, 10); \
PrintHex(size, 10); \
PrintAddr(ptr); \
PrintLn(); }
#define PRINT_FREE(name, cnt, ptr) if (ptr) { \
Print(name " "); \
PrintDec(--cnt, 10); \
PrintAddr(ptr); \
PrintLn(); }
#else
#ifdef _WIN32
#define PRINT_ALLOC(name, cnt, size, ptr)
#endif
#define PRINT_FREE(name, cnt, ptr)
#define Print(s)
#define PrintLn()
#define PrintHex(v, align)
#define PrintAddr(p)
#endif
/*
by specification:
malloc(non_NULL, 0) : returns NULL or a unique pointer value that can later be successfully passed to free()
realloc(NULL, size) : the call is equivalent to malloc(size)
realloc(non_NULL, 0) : the call is equivalent to free(ptr)
in main compilers:
malloc(0) : returns non_NULL
realloc(NULL, 0) : returns non_NULL
realloc(non_NULL, 0) : returns NULL
*/
void *MyAlloc(size_t size)
{
if (size == 0)
return NULL;
// PRINT_ALLOC("Alloc ", g_allocCount, size, NULL)
#ifdef SZ_ALLOC_DEBUG
{
void *p = malloc(size);
if (p)
{
PRINT_ALLOC("Alloc ", g_allocCount, size, p)
}
return p;
}
#else
return malloc(size);
#endif
}
void MyFree(void *address)
{
PRINT_FREE("Free ", g_allocCount, address)
free(address);
}
void *MyRealloc(void *address, size_t size)
{
if (size == 0)
{
MyFree(address);
return NULL;
}
// PRINT_REALLOC("Realloc ", g_allocCount, size, address)
#ifdef SZ_ALLOC_DEBUG
{
void *p = realloc(address, size);
if (p)
{
PRINT_REALLOC("Realloc ", g_allocCount, size, address)
}
return p;
}
#else
return realloc(address, size);
#endif
}
#ifdef _WIN32
void *MidAlloc(size_t size)
{
if (size == 0)
return NULL;
#ifdef SZ_ALLOC_DEBUG
{
void *p = VirtualAlloc(NULL, size, MEM_COMMIT, PAGE_READWRITE);
if (p)
{
PRINT_ALLOC("Alloc-Mid", g_allocCountMid, size, p)
}
return p;
}
#else
return VirtualAlloc(NULL, size, MEM_COMMIT, PAGE_READWRITE);
#endif
}
void MidFree(void *address)
{
PRINT_FREE("Free-Mid", g_allocCountMid, address)
if (!address)
return;
VirtualFree(address, 0, MEM_RELEASE);
}
#ifdef Z7_LARGE_PAGES
#ifdef MEM_LARGE_PAGES
#define MY__MEM_LARGE_PAGES MEM_LARGE_PAGES
#else
#define MY__MEM_LARGE_PAGES 0x20000000
#endif
extern
SIZE_T g_LargePageSize;
SIZE_T g_LargePageSize = 0;
typedef SIZE_T (WINAPI *Func_GetLargePageMinimum)(VOID);
void SetLargePageSize(void)
{
#ifdef Z7_LARGE_PAGES
SIZE_T size;
const
Func_GetLargePageMinimum fn =
(Func_GetLargePageMinimum) MY_CAST_FUNC GetProcAddress(GetModuleHandle(TEXT("kernel32.dll")),
"GetLargePageMinimum");
if (!fn)
return;
size = fn();
if (size == 0 || (size & (size - 1)) != 0)
return;
g_LargePageSize = size;
#endif
}
#endif // Z7_LARGE_PAGES
void *BigAlloc(size_t size)
{
if (size == 0)
return NULL;
PRINT_ALLOC("Alloc-Big", g_allocCountBig, size, NULL)
#ifdef Z7_LARGE_PAGES
{
SIZE_T ps = g_LargePageSize;
if (ps != 0 && ps <= (1 << 30) && size > (ps / 2))
{
size_t size2;
ps--;
size2 = (size + ps) & ~ps;
if (size2 >= size)
{
void *p = VirtualAlloc(NULL, size2, MEM_COMMIT | MY__MEM_LARGE_PAGES, PAGE_READWRITE);
if (p)
{
PRINT_ALLOC("Alloc-BM ", g_allocCountMid, size2, p)
return p;
}
}
}
}
#endif
return MidAlloc(size);
}
void BigFree(void *address)
{
PRINT_FREE("Free-Big", g_allocCountBig, address)
MidFree(address);
}
#endif // _WIN32
static void *SzAlloc(ISzAllocPtr p, size_t size) { UNUSED_VAR(p) return MyAlloc(size); }
static void SzFree(ISzAllocPtr p, void *address) { UNUSED_VAR(p) MyFree(address); }
const ISzAlloc g_Alloc = { SzAlloc, SzFree };
#ifdef _WIN32
static void *SzMidAlloc(ISzAllocPtr p, size_t size) { UNUSED_VAR(p) return MidAlloc(size); }
static void SzMidFree(ISzAllocPtr p, void *address) { UNUSED_VAR(p) MidFree(address); }
static void *SzBigAlloc(ISzAllocPtr p, size_t size) { UNUSED_VAR(p) return BigAlloc(size); }
static void SzBigFree(ISzAllocPtr p, void *address) { UNUSED_VAR(p) BigFree(address); }
const ISzAlloc g_MidAlloc = { SzMidAlloc, SzMidFree };
const ISzAlloc g_BigAlloc = { SzBigAlloc, SzBigFree };
#endif
/*
uintptr_t : <stdint.h> C99 (optional)
: unsupported in VS6
*/
#ifdef _WIN32
typedef UINT_PTR UIntPtr;
#else
/*
typedef uintptr_t UIntPtr;
*/
typedef ptrdiff_t UIntPtr;
#endif
#define ADJUST_ALLOC_SIZE 0
/*
#define ADJUST_ALLOC_SIZE (sizeof(void *) - 1)
*/
/*
Use (ADJUST_ALLOC_SIZE = (sizeof(void *) - 1)), if
MyAlloc() can return address that is NOT multiple of sizeof(void *).
*/
/*
#define MY_ALIGN_PTR_DOWN(p, align) ((void *)((char *)(p) - ((size_t)(UIntPtr)(p) & ((align) - 1))))
*/
#define MY_ALIGN_PTR_DOWN(p, align) ((void *)((((UIntPtr)(p)) & ~((UIntPtr)(align) - 1))))
#if !defined(_WIN32) && defined(_POSIX_C_SOURCE) && (_POSIX_C_SOURCE >= 200112L)
#define USE_posix_memalign
#endif
#ifndef USE_posix_memalign
#define MY_ALIGN_PTR_UP_PLUS(p, align) MY_ALIGN_PTR_DOWN(((char *)(p) + (align) + ADJUST_ALLOC_SIZE), align)
#endif
/*
This posix_memalign() is for test purposes only.
We also need special Free() function instead of free(),
if this posix_memalign() is used.
*/
/*
static int posix_memalign(void **ptr, size_t align, size_t size)
{
size_t newSize = size + align;
void *p;
void *pAligned;
*ptr = NULL;
if (newSize < size)
return 12; // ENOMEM
p = MyAlloc(newSize);
if (!p)
return 12; // ENOMEM
pAligned = MY_ALIGN_PTR_UP_PLUS(p, align);
((void **)pAligned)[-1] = p;
*ptr = pAligned;
return 0;
}
*/
/*
ALLOC_ALIGN_SIZE >= sizeof(void *)
ALLOC_ALIGN_SIZE >= cache_line_size
*/
#define ALLOC_ALIGN_SIZE ((size_t)1 << 7)
static void *SzAlignedAlloc(ISzAllocPtr pp, size_t size)
{
#ifndef USE_posix_memalign
void *p;
void *pAligned;
size_t newSize;
UNUSED_VAR(pp)
/* also we can allocate additional dummy ALLOC_ALIGN_SIZE bytes after aligned
block to prevent cache line sharing with another allocated blocks */
newSize = size + ALLOC_ALIGN_SIZE * 1 + ADJUST_ALLOC_SIZE;
if (newSize < size)
return NULL;
p = MyAlloc(newSize);
if (!p)
return NULL;
pAligned = MY_ALIGN_PTR_UP_PLUS(p, ALLOC_ALIGN_SIZE);
Print(" size="); PrintHex(size, 8);
Print(" a_size="); PrintHex(newSize, 8);
Print(" ptr="); PrintAddr(p);
Print(" a_ptr="); PrintAddr(pAligned);
PrintLn();
((void **)pAligned)[-1] = p;
return pAligned;
#else
void *p;
UNUSED_VAR(pp)
if (posix_memalign(&p, ALLOC_ALIGN_SIZE, size))
return NULL;
Print(" posix_memalign="); PrintAddr(p);
PrintLn();
return p;
#endif
}
static void SzAlignedFree(ISzAllocPtr pp, void *address)
{
UNUSED_VAR(pp)
#ifndef USE_posix_memalign
if (address)
MyFree(((void **)address)[-1]);
#else
free(address);
#endif
}
const ISzAlloc g_AlignedAlloc = { SzAlignedAlloc, SzAlignedFree };
#define MY_ALIGN_PTR_DOWN_1(p) MY_ALIGN_PTR_DOWN(p, sizeof(void *))
/* we align ptr to support cases where CAlignOffsetAlloc::offset is not multiply of sizeof(void *) */
#define REAL_BLOCK_PTR_VAR(p) ((void **)MY_ALIGN_PTR_DOWN_1(p))[-1]
/*
#define REAL_BLOCK_PTR_VAR(p) ((void **)(p))[-1]
*/
static void *AlignOffsetAlloc_Alloc(ISzAllocPtr pp, size_t size)
{
const CAlignOffsetAlloc *p = Z7_CONTAINER_FROM_VTBL_CONST(pp, CAlignOffsetAlloc, vt);
void *adr;
void *pAligned;
size_t newSize;
size_t extra;
size_t alignSize = (size_t)1 << p->numAlignBits;
if (alignSize < sizeof(void *))
alignSize = sizeof(void *);
if (p->offset >= alignSize)
return NULL;
/* also we can allocate additional dummy ALLOC_ALIGN_SIZE bytes after aligned
block to prevent cache line sharing with another allocated blocks */
extra = p->offset & (sizeof(void *) - 1);
newSize = size + alignSize + extra + ADJUST_ALLOC_SIZE;
if (newSize < size)
return NULL;
adr = ISzAlloc_Alloc(p->baseAlloc, newSize);
if (!adr)
return NULL;
pAligned = (char *)MY_ALIGN_PTR_DOWN((char *)adr +
alignSize - p->offset + extra + ADJUST_ALLOC_SIZE, alignSize) + p->offset;
PrintLn();
Print("- Aligned: ");
Print(" size="); PrintHex(size, 8);
Print(" a_size="); PrintHex(newSize, 8);
Print(" ptr="); PrintAddr(adr);
Print(" a_ptr="); PrintAddr(pAligned);
PrintLn();
REAL_BLOCK_PTR_VAR(pAligned) = adr;
return pAligned;
}
static void AlignOffsetAlloc_Free(ISzAllocPtr pp, void *address)
{
if (address)
{
const CAlignOffsetAlloc *p = Z7_CONTAINER_FROM_VTBL_CONST(pp, CAlignOffsetAlloc, vt);
PrintLn();
Print("- Aligned Free: ");
PrintLn();
ISzAlloc_Free(p->baseAlloc, REAL_BLOCK_PTR_VAR(address));
}
}
void AlignOffsetAlloc_CreateVTable(CAlignOffsetAlloc *p)
{
p->vt.Alloc = AlignOffsetAlloc_Alloc;
p->vt.Free = AlignOffsetAlloc_Free;
}

71
extern/lzma/Alloc.h vendored
View File

@@ -1,71 +0,0 @@
/* Alloc.h -- Memory allocation functions
2023-03-04 : Igor Pavlov : Public domain */
#ifndef ZIP7_INC_ALLOC_H
#define ZIP7_INC_ALLOC_H
#include "7zTypes.h"
EXTERN_C_BEGIN
/*
MyFree(NULL) : is allowed, as free(NULL)
MyAlloc(0) : returns NULL : but malloc(0) is allowed to return NULL or non_NULL
MyRealloc(NULL, 0) : returns NULL : but realloc(NULL, 0) is allowed to return NULL or non_NULL
MyRealloc() is similar to realloc() for the following cases:
MyRealloc(non_NULL, 0) : returns NULL and always calls MyFree(ptr)
MyRealloc(NULL, non_ZERO) : returns NULL, if allocation failed
MyRealloc(non_NULL, non_ZERO) : returns NULL, if reallocation failed
*/
void *MyAlloc(size_t size);
void MyFree(void *address);
void *MyRealloc(void *address, size_t size);
#ifdef _WIN32
#ifdef Z7_LARGE_PAGES
void SetLargePageSize(void);
#endif
void *MidAlloc(size_t size);
void MidFree(void *address);
void *BigAlloc(size_t size);
void BigFree(void *address);
#else
#define MidAlloc(size) MyAlloc(size)
#define MidFree(address) MyFree(address)
#define BigAlloc(size) MyAlloc(size)
#define BigFree(address) MyFree(address)
#endif
extern const ISzAlloc g_Alloc;
#ifdef _WIN32
extern const ISzAlloc g_BigAlloc;
extern const ISzAlloc g_MidAlloc;
#else
#define g_BigAlloc g_AlignedAlloc
#define g_MidAlloc g_AlignedAlloc
#endif
extern const ISzAlloc g_AlignedAlloc;
typedef struct
{
ISzAlloc vt;
ISzAllocPtr baseAlloc;
unsigned numAlignBits; /* ((1 << numAlignBits) >= sizeof(void *)) */
size_t offset; /* (offset == (k * sizeof(void *)) && offset < (1 << numAlignBits) */
} CAlignOffsetAlloc;
void AlignOffsetAlloc_CreateVTable(CAlignOffsetAlloc *p);
EXTERN_C_END
#endif

View File

@@ -1,49 +0,0 @@
# SPDX-FileCopyrightText: 2024 Blender Foundation
#
# SPDX-License-Identifier: GPL-2.0-or-later
# avoid noisy warnings
if(CMAKE_COMPILER_IS_GNUCC OR CMAKE_C_COMPILER_ID MATCHES "Clang")
add_c_flag(
"-Wno-self-assign"
)
endif()
set(INC
.
)
set(INC_SYS
)
set(SRC
Alloc.c
CpuArch.c
LzFind.c
LzFindMt.c
LzFindOpt.c
LzmaDec.c
LzmaEnc.c
LzmaLib.c
Threads.c
7zTypes.h
7zWindows.h
Alloc.h
Compiler.h
CpuArch.h
LzFind.h
LzFindMt.h
LzHash.h
LzmaDec.h
LzmaEnc.h
LzmaLib.h
Precomp.h
Threads.h
)
set(LIB
)
blender_add_lib(extern_lzma "${SRC}" "${INC}" "${INC_SYS}" "${LIB}")

159
extern/lzma/Compiler.h vendored
View File

@@ -1,159 +0,0 @@
/* Compiler.h : Compiler specific defines and pragmas
2023-04-02 : Igor Pavlov : Public domain */
#ifndef ZIP7_INC_COMPILER_H
#define ZIP7_INC_COMPILER_H
#if defined(__clang__)
# define Z7_CLANG_VERSION (__clang_major__ * 10000 + __clang_minor__ * 100 + __clang_patchlevel__)
#endif
#if defined(__clang__) && defined(__apple_build_version__)
# define Z7_APPLE_CLANG_VERSION Z7_CLANG_VERSION
#elif defined(__clang__)
# define Z7_LLVM_CLANG_VERSION Z7_CLANG_VERSION
#elif defined(__GNUC__)
# define Z7_GCC_VERSION (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__)
#endif
#ifdef _MSC_VER
#if !defined(__clang__) && !defined(__GNUC__)
#define Z7_MSC_VER_ORIGINAL _MSC_VER
#endif
#endif
#if defined(__MINGW32__) || defined(__MINGW64__)
#define Z7_MINGW
#endif
// #pragma GCC diagnostic ignored "-Wunknown-pragmas"
#ifdef __clang__
// padding size of '' with 4 bytes to alignment boundary
#pragma GCC diagnostic ignored "-Wpadded"
#endif
#ifdef _MSC_VER
#ifdef UNDER_CE
#define RPC_NO_WINDOWS_H
/* #pragma warning(disable : 4115) // '_RPC_ASYNC_STATE' : named type definition in parentheses */
#pragma warning(disable : 4201) // nonstandard extension used : nameless struct/union
#pragma warning(disable : 4214) // nonstandard extension used : bit field types other than int
#endif
#if defined(_MSC_VER) && _MSC_VER >= 1800
#pragma warning(disable : 4464) // relative include path contains '..'
#endif
// == 1200 : -O1 : for __forceinline
// >= 1900 : -O1 : for printf
#pragma warning(disable : 4710) // function not inlined
#if _MSC_VER < 1900
// winnt.h: 'Int64ShllMod32'
#pragma warning(disable : 4514) // unreferenced inline function has been removed
#endif
#if _MSC_VER < 1300
// #pragma warning(disable : 4702) // unreachable code
// Bra.c : -O1:
#pragma warning(disable : 4714) // function marked as __forceinline not inlined
#endif
/*
#if _MSC_VER > 1400 && _MSC_VER <= 1900
// strcat: This function or variable may be unsafe
// sysinfoapi.h: kit10: GetVersion was declared deprecated
#pragma warning(disable : 4996)
#endif
*/
#if _MSC_VER > 1200
// -Wall warnings
#pragma warning(disable : 4711) // function selected for automatic inline expansion
#pragma warning(disable : 4820) // '2' bytes padding added after data member
#if _MSC_VER >= 1400 && _MSC_VER < 1920
// 1400: string.h: _DBG_MEMCPY_INLINE_
// 1600 - 191x : smmintrin.h __cplusplus'
// is not defined as a preprocessor macro, replacing with '0' for '#if/#elif'
#pragma warning(disable : 4668)
// 1400 - 1600 : WinDef.h : 'FARPROC' :
// 1900 - 191x : immintrin.h: _readfsbase_u32
// no function prototype given : converting '()' to '(void)'
#pragma warning(disable : 4255)
#endif
#if _MSC_VER >= 1914
// Compiler will insert Spectre mitigation for memory load if /Qspectre switch specified
#pragma warning(disable : 5045)
#endif
#endif // _MSC_VER > 1200
#endif // _MSC_VER
#if defined(__clang__) && (__clang_major__ >= 4)
#define Z7_PRAGMA_OPT_DISABLE_LOOP_UNROLL_VECTORIZE \
_Pragma("clang loop unroll(disable)") \
_Pragma("clang loop vectorize(disable)")
#define Z7_ATTRIB_NO_VECTORIZE
#elif defined(__GNUC__) && (__GNUC__ >= 5)
#define Z7_ATTRIB_NO_VECTORIZE __attribute__((optimize("no-tree-vectorize")))
// __attribute__((optimize("no-unroll-loops")));
#define Z7_PRAGMA_OPT_DISABLE_LOOP_UNROLL_VECTORIZE
#elif defined(_MSC_VER) && (_MSC_VER >= 1920)
#define Z7_PRAGMA_OPT_DISABLE_LOOP_UNROLL_VECTORIZE \
_Pragma("loop( no_vector )")
#define Z7_ATTRIB_NO_VECTORIZE
#else
#define Z7_PRAGMA_OPT_DISABLE_LOOP_UNROLL_VECTORIZE
#define Z7_ATTRIB_NO_VECTORIZE
#endif
#if defined(MY_CPU_X86_OR_AMD64) && ( \
defined(__clang__) && (__clang_major__ >= 4) \
|| defined(__GNUC__) && (__GNUC__ >= 5))
#define Z7_ATTRIB_NO_SSE __attribute__((__target__("no-sse")))
#else
#define Z7_ATTRIB_NO_SSE
#endif
#define Z7_ATTRIB_NO_VECTOR \
Z7_ATTRIB_NO_VECTORIZE \
Z7_ATTRIB_NO_SSE
#if defined(__clang__) && (__clang_major__ >= 8) \
|| defined(__GNUC__) && (__GNUC__ >= 1000) \
/* || defined(_MSC_VER) && (_MSC_VER >= 1920) */
// GCC is not good for __builtin_expect()
#define Z7_LIKELY(x) (__builtin_expect((x), 1))
#define Z7_UNLIKELY(x) (__builtin_expect((x), 0))
// #define Z7_unlikely [[unlikely]]
// #define Z7_likely [[likely]]
#else
#define Z7_LIKELY(x) (x)
#define Z7_UNLIKELY(x) (x)
// #define Z7_likely
#endif
#if (defined(Z7_CLANG_VERSION) && (Z7_CLANG_VERSION >= 36000))
#define Z7_DIAGNOSCTIC_IGNORE_BEGIN_RESERVED_MACRO_IDENTIFIER \
_Pragma("GCC diagnostic push") \
_Pragma("GCC diagnostic ignored \"-Wreserved-macro-identifier\"")
#define Z7_DIAGNOSCTIC_IGNORE_END_RESERVED_MACRO_IDENTIFIER \
_Pragma("GCC diagnostic pop")
#else
#define Z7_DIAGNOSCTIC_IGNORE_BEGIN_RESERVED_MACRO_IDENTIFIER
#define Z7_DIAGNOSCTIC_IGNORE_END_RESERVED_MACRO_IDENTIFIER
#endif
#define UNUSED_VAR(x) (void)x;
/* #define UNUSED_VAR(x) x=x; */
#endif

823
extern/lzma/CpuArch.c vendored
View File

@@ -1,823 +0,0 @@
/* CpuArch.c -- CPU specific code
2023-05-18 : Igor Pavlov : Public domain */
#include "Precomp.h"
// #include <stdio.h>
#include "CpuArch.h"
#ifdef MY_CPU_X86_OR_AMD64
#undef NEED_CHECK_FOR_CPUID
#if !defined(MY_CPU_AMD64)
#define NEED_CHECK_FOR_CPUID
#endif
/*
cpuid instruction supports (subFunction) parameter in ECX,
that is used only with some specific (function) parameter values.
But we always use only (subFunction==0).
*/
/*
__cpuid(): MSVC and GCC/CLANG use same function/macro name
but parameters are different.
We use MSVC __cpuid() parameters style for our z7_x86_cpuid() function.
*/
#if defined(__GNUC__) /* && (__GNUC__ >= 10) */ \
|| defined(__clang__) /* && (__clang_major__ >= 10) */
/* there was some CLANG/GCC compilers that have issues with
rbx(ebx) handling in asm blocks in -fPIC mode (__PIC__ is defined).
compiler's <cpuid.h> contains the macro __cpuid() that is similar to our code.
The history of __cpuid() changes in CLANG/GCC:
GCC:
2007: it preserved ebx for (__PIC__ && __i386__)
2013: it preserved rbx and ebx for __PIC__
2014: it doesn't preserves rbx and ebx anymore
we suppose that (__GNUC__ >= 5) fixed that __PIC__ ebx/rbx problem.
CLANG:
2014+: it preserves rbx, but only for 64-bit code. No __PIC__ check.
Why CLANG cares about 64-bit mode only, and doesn't care about ebx (in 32-bit)?
Do we need __PIC__ test for CLANG or we must care about rbx even if
__PIC__ is not defined?
*/
#define ASM_LN "\n"
#if defined(MY_CPU_AMD64) && defined(__PIC__) \
&& ((defined (__GNUC__) && (__GNUC__ < 5)) || defined(__clang__))
#define x86_cpuid_MACRO(p, func) { \
__asm__ __volatile__ ( \
ASM_LN "mov %%rbx, %q1" \
ASM_LN "cpuid" \
ASM_LN "xchg %%rbx, %q1" \
: "=a" ((p)[0]), "=&r" ((p)[1]), "=c" ((p)[2]), "=d" ((p)[3]) : "0" (func), "2"(0)); }
/* "=&r" selects free register. It can select even rbx, if that register is free.
"=&D" for (RDI) also works, but the code can be larger with "=&D"
"2"(0) means (subFunction = 0),
2 is (zero-based) index in the output constraint list "=c" (ECX). */
#elif defined(MY_CPU_X86) && defined(__PIC__) \
&& ((defined (__GNUC__) && (__GNUC__ < 5)) || defined(__clang__))
#define x86_cpuid_MACRO(p, func) { \
__asm__ __volatile__ ( \
ASM_LN "mov %%ebx, %k1" \
ASM_LN "cpuid" \
ASM_LN "xchg %%ebx, %k1" \
: "=a" ((p)[0]), "=&r" ((p)[1]), "=c" ((p)[2]), "=d" ((p)[3]) : "0" (func), "2"(0)); }
#else
#define x86_cpuid_MACRO(p, func) { \
__asm__ __volatile__ ( \
ASM_LN "cpuid" \
: "=a" ((p)[0]), "=b" ((p)[1]), "=c" ((p)[2]), "=d" ((p)[3]) : "0" (func), "2"(0)); }
#endif
void Z7_FASTCALL z7_x86_cpuid(UInt32 p[4], UInt32 func)
{
x86_cpuid_MACRO(p, func)
}
Z7_NO_INLINE
UInt32 Z7_FASTCALL z7_x86_cpuid_GetMaxFunc(void)
{
#if defined(NEED_CHECK_FOR_CPUID)
#define EFALGS_CPUID_BIT 21
UInt32 a;
__asm__ __volatile__ (
ASM_LN "pushf"
ASM_LN "pushf"
ASM_LN "pop %0"
// ASM_LN "movl %0, %1"
// ASM_LN "xorl $0x200000, %0"
ASM_LN "btc %1, %0"
ASM_LN "push %0"
ASM_LN "popf"
ASM_LN "pushf"
ASM_LN "pop %0"
ASM_LN "xorl (%%esp), %0"
ASM_LN "popf"
ASM_LN
: "=&r" (a) // "=a"
: "i" (EFALGS_CPUID_BIT)
);
if ((a & (1 << EFALGS_CPUID_BIT)) == 0)
return 0;
#endif
{
UInt32 p[4];
x86_cpuid_MACRO(p, 0)
return p[0];
}
}
#undef ASM_LN
#elif !defined(_MSC_VER)
/*
// for gcc/clang and other: we can try to use __cpuid macro:
#include <cpuid.h>
void Z7_FASTCALL z7_x86_cpuid(UInt32 p[4], UInt32 func)
{
__cpuid(func, p[0], p[1], p[2], p[3]);
}
UInt32 Z7_FASTCALL z7_x86_cpuid_GetMaxFunc(void)
{
return (UInt32)__get_cpuid_max(0, NULL);
}
*/
// for unsupported cpuid:
void Z7_FASTCALL z7_x86_cpuid(UInt32 p[4], UInt32 func)
{
UNUSED_VAR(func)
p[0] = p[1] = p[2] = p[3] = 0;
}
UInt32 Z7_FASTCALL z7_x86_cpuid_GetMaxFunc(void)
{
return 0;
}
#else // _MSC_VER
#if !defined(MY_CPU_AMD64)
UInt32 __declspec(naked) Z7_FASTCALL z7_x86_cpuid_GetMaxFunc(void)
{
#if defined(NEED_CHECK_FOR_CPUID)
#define EFALGS_CPUID_BIT 21
__asm pushfd
__asm pushfd
/*
__asm pop eax
// __asm mov edx, eax
__asm btc eax, EFALGS_CPUID_BIT
__asm push eax
*/
__asm btc dword ptr [esp], EFALGS_CPUID_BIT
__asm popfd
__asm pushfd
__asm pop eax
// __asm xor eax, edx
__asm xor eax, [esp]
// __asm push edx
__asm popfd
__asm and eax, (1 shl EFALGS_CPUID_BIT)
__asm jz end_func
#endif
__asm push ebx
__asm xor eax, eax // func
__asm xor ecx, ecx // subFunction (optional) for (func == 0)
__asm cpuid
__asm pop ebx
#if defined(NEED_CHECK_FOR_CPUID)
end_func:
#endif
__asm ret 0
}
void __declspec(naked) Z7_FASTCALL z7_x86_cpuid(UInt32 p[4], UInt32 func)
{
UNUSED_VAR(p)
UNUSED_VAR(func)
__asm push ebx
__asm push edi
__asm mov edi, ecx // p
__asm mov eax, edx // func
__asm xor ecx, ecx // subfunction (optional) for (func == 0)
__asm cpuid
__asm mov [edi ], eax
__asm mov [edi + 4], ebx
__asm mov [edi + 8], ecx
__asm mov [edi + 12], edx
__asm pop edi
__asm pop ebx
__asm ret 0
}
#else // MY_CPU_AMD64
#if _MSC_VER >= 1600
#include <intrin.h>
#define MY_cpuidex __cpuidex
#else
/*
__cpuid (func == (0 or 7)) requires subfunction number in ECX.
MSDN: The __cpuid intrinsic clears the ECX register before calling the cpuid instruction.
__cpuid() in new MSVC clears ECX.
__cpuid() in old MSVC (14.00) x64 doesn't clear ECX
We still can use __cpuid for low (func) values that don't require ECX,
but __cpuid() in old MSVC will be incorrect for some func values: (func == 7).
So here we use the hack for old MSVC to send (subFunction) in ECX register to cpuid instruction,
where ECX value is first parameter for FASTCALL / NO_INLINE func,
So the caller of MY_cpuidex_HACK() sets ECX as subFunction, and
old MSVC for __cpuid() doesn't change ECX and cpuid instruction gets (subFunction) value.
DON'T remove Z7_NO_INLINE and Z7_FASTCALL for MY_cpuidex_HACK(): !!!
*/
static
Z7_NO_INLINE void Z7_FASTCALL MY_cpuidex_HACK(UInt32 subFunction, UInt32 func, int *CPUInfo)
{
UNUSED_VAR(subFunction)
__cpuid(CPUInfo, func);
}
#define MY_cpuidex(info, func, func2) MY_cpuidex_HACK(func2, func, info)
#pragma message("======== MY_cpuidex_HACK WAS USED ========")
#endif // _MSC_VER >= 1600
#if !defined(MY_CPU_AMD64)
/* inlining for __cpuid() in MSVC x86 (32-bit) produces big ineffective code,
so we disable inlining here */
Z7_NO_INLINE
#endif
void Z7_FASTCALL z7_x86_cpuid(UInt32 p[4], UInt32 func)
{
MY_cpuidex((int *)p, (int)func, 0);
}
Z7_NO_INLINE
UInt32 Z7_FASTCALL z7_x86_cpuid_GetMaxFunc(void)
{
int a[4];
MY_cpuidex(a, 0, 0);
return a[0];
}
#endif // MY_CPU_AMD64
#endif // _MSC_VER
#if defined(NEED_CHECK_FOR_CPUID)
#define CHECK_CPUID_IS_SUPPORTED { if (z7_x86_cpuid_GetMaxFunc() == 0) return 0; }
#else
#define CHECK_CPUID_IS_SUPPORTED
#endif
#undef NEED_CHECK_FOR_CPUID
static
BoolInt x86cpuid_Func_1(UInt32 *p)
{
CHECK_CPUID_IS_SUPPORTED
z7_x86_cpuid(p, 1);
return True;
}
/*
static const UInt32 kVendors[][1] =
{
{ 0x756E6547 }, // , 0x49656E69, 0x6C65746E },
{ 0x68747541 }, // , 0x69746E65, 0x444D4163 },
{ 0x746E6543 } // , 0x48727561, 0x736C7561 }
};
*/
/*
typedef struct
{
UInt32 maxFunc;
UInt32 vendor[3];
UInt32 ver;
UInt32 b;
UInt32 c;
UInt32 d;
} Cx86cpuid;
enum
{
CPU_FIRM_INTEL,
CPU_FIRM_AMD,
CPU_FIRM_VIA
};
int x86cpuid_GetFirm(const Cx86cpuid *p);
#define x86cpuid_ver_GetFamily(ver) (((ver >> 16) & 0xff0) | ((ver >> 8) & 0xf))
#define x86cpuid_ver_GetModel(ver) (((ver >> 12) & 0xf0) | ((ver >> 4) & 0xf))
#define x86cpuid_ver_GetStepping(ver) (ver & 0xf)
int x86cpuid_GetFirm(const Cx86cpuid *p)
{
unsigned i;
for (i = 0; i < sizeof(kVendors) / sizeof(kVendors[0]); i++)
{
const UInt32 *v = kVendors[i];
if (v[0] == p->vendor[0]
// && v[1] == p->vendor[1]
// && v[2] == p->vendor[2]
)
return (int)i;
}
return -1;
}
BoolInt CPU_Is_InOrder()
{
Cx86cpuid p;
UInt32 family, model;
if (!x86cpuid_CheckAndRead(&p))
return True;
family = x86cpuid_ver_GetFamily(p.ver);
model = x86cpuid_ver_GetModel(p.ver);
switch (x86cpuid_GetFirm(&p))
{
case CPU_FIRM_INTEL: return (family < 6 || (family == 6 && (
// In-Order Atom CPU
model == 0x1C // 45 nm, N4xx, D4xx, N5xx, D5xx, 230, 330
|| model == 0x26 // 45 nm, Z6xx
|| model == 0x27 // 32 nm, Z2460
|| model == 0x35 // 32 nm, Z2760
|| model == 0x36 // 32 nm, N2xxx, D2xxx
)));
case CPU_FIRM_AMD: return (family < 5 || (family == 5 && (model < 6 || model == 0xA)));
case CPU_FIRM_VIA: return (family < 6 || (family == 6 && model < 0xF));
}
return False; // v23 : unknown processors are not In-Order
}
*/
#ifdef _WIN32
#include "7zWindows.h"
#endif
#if !defined(MY_CPU_AMD64) && defined(_WIN32)
/* for legacy SSE ia32: there is no user-space cpu instruction to check
that OS supports SSE register storing/restoring on context switches.
So we need some OS-specific function to check that it's safe to use SSE registers.
*/
Z7_FORCE_INLINE
static BoolInt CPU_Sys_Is_SSE_Supported(void)
{
#ifdef _MSC_VER
#pragma warning(push)
#pragma warning(disable : 4996) // `GetVersion': was declared deprecated
#endif
/* low byte is major version of Windows
We suppose that any Windows version since
Windows2000 (major == 5) supports SSE registers */
return (Byte)GetVersion() >= 5;
#if defined(_MSC_VER)
#pragma warning(pop)
#endif
}
#define CHECK_SYS_SSE_SUPPORT if (!CPU_Sys_Is_SSE_Supported()) return False;
#else
#define CHECK_SYS_SSE_SUPPORT
#endif
#if !defined(MY_CPU_AMD64)
BoolInt CPU_IsSupported_CMOV(void)
{
UInt32 a[4];
if (!x86cpuid_Func_1(&a[0]))
return 0;
return (a[3] >> 15) & 1;
}
BoolInt CPU_IsSupported_SSE(void)
{
UInt32 a[4];
CHECK_SYS_SSE_SUPPORT
if (!x86cpuid_Func_1(&a[0]))
return 0;
return (a[3] >> 25) & 1;
}
BoolInt CPU_IsSupported_SSE2(void)
{
UInt32 a[4];
CHECK_SYS_SSE_SUPPORT
if (!x86cpuid_Func_1(&a[0]))
return 0;
return (a[3] >> 26) & 1;
}
#endif
static UInt32 x86cpuid_Func_1_ECX(void)
{
UInt32 a[4];
CHECK_SYS_SSE_SUPPORT
if (!x86cpuid_Func_1(&a[0]))
return 0;
return a[2];
}
BoolInt CPU_IsSupported_AES(void)
{
return (x86cpuid_Func_1_ECX() >> 25) & 1;
}
BoolInt CPU_IsSupported_SSSE3(void)
{
return (x86cpuid_Func_1_ECX() >> 9) & 1;
}
BoolInt CPU_IsSupported_SSE41(void)
{
return (x86cpuid_Func_1_ECX() >> 19) & 1;
}
BoolInt CPU_IsSupported_SHA(void)
{
CHECK_SYS_SSE_SUPPORT
if (z7_x86_cpuid_GetMaxFunc() < 7)
return False;
{
UInt32 d[4];
z7_x86_cpuid(d, 7);
return (d[1] >> 29) & 1;
}
}
/*
MSVC: _xgetbv() intrinsic is available since VS2010SP1.
MSVC also defines (_XCR_XFEATURE_ENABLED_MASK) macro in
<immintrin.h> that we can use or check.
For any 32-bit x86 we can use asm code in MSVC,
but MSVC asm code is huge after compilation.
So _xgetbv() is better
ICC: _xgetbv() intrinsic is available (in what version of ICC?)
ICC defines (__GNUC___) and it supports gnu assembler
also ICC supports MASM style code with -use-msasm switch.
but ICC doesn't support __attribute__((__target__))
GCC/CLANG 9:
_xgetbv() is macro that works via __builtin_ia32_xgetbv()
and we need __attribute__((__target__("xsave")).
But with __target__("xsave") the function will be not
inlined to function that has no __target__("xsave") attribute.
If we want _xgetbv() call inlining, then we should use asm version
instead of calling _xgetbv().
Note:intrinsic is broke before GCC 8.2:
https://gcc.gnu.org/bugzilla/show_bug.cgi?id=85684
*/
#if defined(__INTEL_COMPILER) && (__INTEL_COMPILER >= 1100) \
|| defined(_MSC_VER) && (_MSC_VER >= 1600) && (_MSC_FULL_VER >= 160040219) \
|| defined(__GNUC__) && (__GNUC__ >= 9) \
|| defined(__clang__) && (__clang_major__ >= 9)
// we define ATTRIB_XGETBV, if we want to use predefined _xgetbv() from compiler
#if defined(__INTEL_COMPILER)
#define ATTRIB_XGETBV
#elif defined(__GNUC__) || defined(__clang__)
// we don't define ATTRIB_XGETBV here, because asm version is better for inlining.
// #define ATTRIB_XGETBV __attribute__((__target__("xsave")))
#else
#define ATTRIB_XGETBV
#endif
#endif
#if defined(ATTRIB_XGETBV)
#include <immintrin.h>
#endif
// XFEATURE_ENABLED_MASK/XCR0
#define MY_XCR_XFEATURE_ENABLED_MASK 0
#if defined(ATTRIB_XGETBV)
ATTRIB_XGETBV
#endif
static UInt64 x86_xgetbv_0(UInt32 num)
{
#if defined(ATTRIB_XGETBV)
{
return
#if (defined(_MSC_VER))
_xgetbv(num);
#else
__builtin_ia32_xgetbv(
#if !defined(__clang__)
(int)
#endif
num);
#endif
}
#elif defined(__GNUC__) || defined(__clang__) || defined(__SUNPRO_CC)
UInt32 a, d;
#if defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 4))
__asm__
(
"xgetbv"
: "=a"(a), "=d"(d) : "c"(num) : "cc"
);
#else // is old gcc
__asm__
(
".byte 0x0f, 0x01, 0xd0" "\n\t"
: "=a"(a), "=d"(d) : "c"(num) : "cc"
);
#endif
return ((UInt64)d << 32) | a;
// return a;
#elif defined(_MSC_VER) && !defined(MY_CPU_AMD64)
UInt32 a, d;
__asm {
push eax
push edx
push ecx
mov ecx, num;
// xor ecx, ecx // = MY_XCR_XFEATURE_ENABLED_MASK
_emit 0x0f
_emit 0x01
_emit 0xd0
mov a, eax
mov d, edx
pop ecx
pop edx
pop eax
}
return ((UInt64)d << 32) | a;
// return a;
#else // it's unknown compiler
// #error "Need xgetbv function"
UNUSED_VAR(num)
// for MSVC-X64 we could call external function from external file.
/* Actually we had checked OSXSAVE/AVX in cpuid before.
So it's expected that OS supports at least AVX and below. */
// if (num != MY_XCR_XFEATURE_ENABLED_MASK) return 0; // if not XCR0
return
// (1 << 0) | // x87
(1 << 1) // SSE
| (1 << 2); // AVX
#endif
}
#ifdef _WIN32
/*
Windows versions do not know about new ISA extensions that
can be introduced. But we still can use new extensions,
even if Windows doesn't report about supporting them,
But we can use new extensions, only if Windows knows about new ISA extension
that changes the number or size of registers: SSE, AVX/XSAVE, AVX512
So it's enough to check
MY_PF_AVX_INSTRUCTIONS_AVAILABLE
instead of
MY_PF_AVX2_INSTRUCTIONS_AVAILABLE
*/
#define MY_PF_XSAVE_ENABLED 17
// #define MY_PF_SSSE3_INSTRUCTIONS_AVAILABLE 36
// #define MY_PF_SSE4_1_INSTRUCTIONS_AVAILABLE 37
// #define MY_PF_SSE4_2_INSTRUCTIONS_AVAILABLE 38
// #define MY_PF_AVX_INSTRUCTIONS_AVAILABLE 39
// #define MY_PF_AVX2_INSTRUCTIONS_AVAILABLE 40
// #define MY_PF_AVX512F_INSTRUCTIONS_AVAILABLE 41
#endif
BoolInt CPU_IsSupported_AVX(void)
{
#ifdef _WIN32
if (!IsProcessorFeaturePresent(MY_PF_XSAVE_ENABLED))
return False;
/* PF_AVX_INSTRUCTIONS_AVAILABLE probably is supported starting from
some latest Win10 revisions. But we need AVX in older Windows also.
So we don't use the following check: */
/*
if (!IsProcessorFeaturePresent(MY_PF_AVX_INSTRUCTIONS_AVAILABLE))
return False;
*/
#endif
/*
OS must use new special XSAVE/XRSTOR instructions to save
AVX registers when it required for context switching.
At OS statring:
OS sets CR4.OSXSAVE flag to signal the processor that OS supports the XSAVE extensions.
Also OS sets bitmask in XCR0 register that defines what
registers will be processed by XSAVE instruction:
XCR0.SSE[bit 0] - x87 registers and state
XCR0.SSE[bit 1] - SSE registers and state
XCR0.AVX[bit 2] - AVX registers and state
CR4.OSXSAVE is reflected to CPUID.1:ECX.OSXSAVE[bit 27].
So we can read that bit in user-space.
XCR0 is available for reading in user-space by new XGETBV instruction.
*/
{
const UInt32 c = x86cpuid_Func_1_ECX();
if (0 == (1
& (c >> 28) // AVX instructions are supported by hardware
& (c >> 27))) // OSXSAVE bit: XSAVE and related instructions are enabled by OS.
return False;
}
/* also we can check
CPUID.1:ECX.XSAVE [bit 26] : that shows that
XSAVE, XRESTOR, XSETBV, XGETBV instructions are supported by hardware.
But that check is redundant, because if OSXSAVE bit is set, then XSAVE is also set */
/* If OS have enabled XSAVE extension instructions (OSXSAVE == 1),
in most cases we expect that OS also will support storing/restoring
for AVX and SSE states at least.
But to be ensure for that we call user-space instruction
XGETBV(0) to get XCR0 value that contains bitmask that defines
what exact states(registers) OS have enabled for storing/restoring.
*/
{
const UInt32 bm = (UInt32)x86_xgetbv_0(MY_XCR_XFEATURE_ENABLED_MASK);
// printf("\n=== XGetBV=%d\n", bm);
return 1
& (bm >> 1) // SSE state is supported (set by OS) for storing/restoring
& (bm >> 2); // AVX state is supported (set by OS) for storing/restoring
}
// since Win7SP1: we can use GetEnabledXStateFeatures();
}
BoolInt CPU_IsSupported_AVX2(void)
{
if (!CPU_IsSupported_AVX())
return False;
if (z7_x86_cpuid_GetMaxFunc() < 7)
return False;
{
UInt32 d[4];
z7_x86_cpuid(d, 7);
// printf("\ncpuid(7): ebx=%8x ecx=%8x\n", d[1], d[2]);
return 1
& (d[1] >> 5); // avx2
}
}
BoolInt CPU_IsSupported_VAES_AVX2(void)
{
if (!CPU_IsSupported_AVX())
return False;
if (z7_x86_cpuid_GetMaxFunc() < 7)
return False;
{
UInt32 d[4];
z7_x86_cpuid(d, 7);
// printf("\ncpuid(7): ebx=%8x ecx=%8x\n", d[1], d[2]);
return 1
& (d[1] >> 5) // avx2
// & (d[1] >> 31) // avx512vl
& (d[2] >> 9); // vaes // VEX-256/EVEX
}
}
BoolInt CPU_IsSupported_PageGB(void)
{
CHECK_CPUID_IS_SUPPORTED
{
UInt32 d[4];
z7_x86_cpuid(d, 0x80000000);
if (d[0] < 0x80000001)
return False;
z7_x86_cpuid(d, 0x80000001);
return (d[3] >> 26) & 1;
}
}
#elif defined(MY_CPU_ARM_OR_ARM64)
#ifdef _WIN32
#include "7zWindows.h"
BoolInt CPU_IsSupported_CRC32(void) { return IsProcessorFeaturePresent(PF_ARM_V8_CRC32_INSTRUCTIONS_AVAILABLE) ? 1 : 0; }
BoolInt CPU_IsSupported_CRYPTO(void) { return IsProcessorFeaturePresent(PF_ARM_V8_CRYPTO_INSTRUCTIONS_AVAILABLE) ? 1 : 0; }
BoolInt CPU_IsSupported_NEON(void) { return IsProcessorFeaturePresent(PF_ARM_NEON_INSTRUCTIONS_AVAILABLE) ? 1 : 0; }
#else
#if defined(__APPLE__)
/*
#include <stdio.h>
#include <string.h>
static void Print_sysctlbyname(const char *name)
{
size_t bufSize = 256;
char buf[256];
int res = sysctlbyname(name, &buf, &bufSize, NULL, 0);
{
int i;
printf("\nres = %d : %s : '%s' : bufSize = %d, numeric", res, name, buf, (unsigned)bufSize);
for (i = 0; i < 20; i++)
printf(" %2x", (unsigned)(Byte)buf[i]);
}
}
*/
/*
Print_sysctlbyname("hw.pagesize");
Print_sysctlbyname("machdep.cpu.brand_string");
*/
static BoolInt z7_sysctlbyname_Get_BoolInt(const char *name)
{
UInt32 val = 0;
if (z7_sysctlbyname_Get_UInt32(name, &val) == 0 && val == 1)
return 1;
return 0;
}
BoolInt CPU_IsSupported_CRC32(void)
{
return z7_sysctlbyname_Get_BoolInt("hw.optional.armv8_crc32");
}
BoolInt CPU_IsSupported_NEON(void)
{
return z7_sysctlbyname_Get_BoolInt("hw.optional.neon");
}
#ifdef MY_CPU_ARM64
#define APPLE_CRYPTO_SUPPORT_VAL 1
#else
#define APPLE_CRYPTO_SUPPORT_VAL 0
#endif
BoolInt CPU_IsSupported_SHA1(void) { return APPLE_CRYPTO_SUPPORT_VAL; }
BoolInt CPU_IsSupported_SHA2(void) { return APPLE_CRYPTO_SUPPORT_VAL; }
BoolInt CPU_IsSupported_AES (void) { return APPLE_CRYPTO_SUPPORT_VAL; }
#else // __APPLE__
#include <sys/auxv.h>
#define USE_HWCAP
#ifdef USE_HWCAP
#include <asm/hwcap.h>
#define MY_HWCAP_CHECK_FUNC_2(name1, name2) \
BoolInt CPU_IsSupported_ ## name1() { return (getauxval(AT_HWCAP) & (HWCAP_ ## name2)) ? 1 : 0; }
#ifdef MY_CPU_ARM64
#define MY_HWCAP_CHECK_FUNC(name) \
MY_HWCAP_CHECK_FUNC_2(name, name)
MY_HWCAP_CHECK_FUNC_2(NEON, ASIMD)
// MY_HWCAP_CHECK_FUNC (ASIMD)
#elif defined(MY_CPU_ARM)
#define MY_HWCAP_CHECK_FUNC(name) \
BoolInt CPU_IsSupported_ ## name() { return (getauxval(AT_HWCAP2) & (HWCAP2_ ## name)) ? 1 : 0; }
MY_HWCAP_CHECK_FUNC_2(NEON, NEON)
#endif
#else // USE_HWCAP
#define MY_HWCAP_CHECK_FUNC(name) \
BoolInt CPU_IsSupported_ ## name() { return 0; }
MY_HWCAP_CHECK_FUNC(NEON)
#endif // USE_HWCAP
MY_HWCAP_CHECK_FUNC (CRC32)
MY_HWCAP_CHECK_FUNC (SHA1)
MY_HWCAP_CHECK_FUNC (SHA2)
MY_HWCAP_CHECK_FUNC (AES)
#endif // __APPLE__
#endif // _WIN32
#endif // MY_CPU_ARM_OR_ARM64
#ifdef __APPLE__
#include <sys/sysctl.h>
int z7_sysctlbyname_Get(const char *name, void *buf, size_t *bufSize)
{
return sysctlbyname(name, buf, bufSize, NULL, 0);
}
int z7_sysctlbyname_Get_UInt32(const char *name, UInt32 *val)
{
size_t bufSize = sizeof(*val);
const int res = z7_sysctlbyname_Get(name, val, &bufSize);
if (res == 0 && bufSize != sizeof(*val))
return EFAULT;
return res;
}
#endif

523
extern/lzma/CpuArch.h vendored
View File

@@ -1,523 +0,0 @@
/* CpuArch.h -- CPU specific code
2023-04-02 : Igor Pavlov : Public domain */
#ifndef ZIP7_INC_CPU_ARCH_H
#define ZIP7_INC_CPU_ARCH_H
#include "7zTypes.h"
EXTERN_C_BEGIN
/*
MY_CPU_LE means that CPU is LITTLE ENDIAN.
MY_CPU_BE means that CPU is BIG ENDIAN.
If MY_CPU_LE and MY_CPU_BE are not defined, we don't know about ENDIANNESS of platform.
MY_CPU_LE_UNALIGN means that CPU is LITTLE ENDIAN and CPU supports unaligned memory accesses.
MY_CPU_64BIT means that processor can work with 64-bit registers.
MY_CPU_64BIT can be used to select fast code branch
MY_CPU_64BIT doesn't mean that (sizeof(void *) == 8)
*/
#if defined(_M_X64) \
|| defined(_M_AMD64) \
|| defined(__x86_64__) \
|| defined(__AMD64__) \
|| defined(__amd64__)
#define MY_CPU_AMD64
#ifdef __ILP32__
#define MY_CPU_NAME "x32"
#define MY_CPU_SIZEOF_POINTER 4
#else
#define MY_CPU_NAME "x64"
#define MY_CPU_SIZEOF_POINTER 8
#endif
#define MY_CPU_64BIT
#endif
#if defined(_M_IX86) \
|| defined(__i386__)
#define MY_CPU_X86
#define MY_CPU_NAME "x86"
/* #define MY_CPU_32BIT */
#define MY_CPU_SIZEOF_POINTER 4
#endif
#if defined(_M_ARM64) \
|| defined(__AARCH64EL__) \
|| defined(__AARCH64EB__) \
|| defined(__aarch64__)
#define MY_CPU_ARM64
#ifdef __ILP32__
#define MY_CPU_NAME "arm64-32"
#define MY_CPU_SIZEOF_POINTER 4
#else
#define MY_CPU_NAME "arm64"
#define MY_CPU_SIZEOF_POINTER 8
#endif
#define MY_CPU_64BIT
#endif
#if defined(_M_ARM) \
|| defined(_M_ARM_NT) \
|| defined(_M_ARMT) \
|| defined(__arm__) \
|| defined(__thumb__) \
|| defined(__ARMEL__) \
|| defined(__ARMEB__) \
|| defined(__THUMBEL__) \
|| defined(__THUMBEB__)
#define MY_CPU_ARM
#if defined(__thumb__) || defined(__THUMBEL__) || defined(_M_ARMT)
#define MY_CPU_ARMT
#define MY_CPU_NAME "armt"
#else
#define MY_CPU_ARM32
#define MY_CPU_NAME "arm"
#endif
/* #define MY_CPU_32BIT */
#define MY_CPU_SIZEOF_POINTER 4
#endif
#if defined(_M_IA64) \
|| defined(__ia64__)
#define MY_CPU_IA64
#define MY_CPU_NAME "ia64"
#define MY_CPU_64BIT
#endif
#if defined(__mips64) \
|| defined(__mips64__) \
|| (defined(__mips) && (__mips == 64 || __mips == 4 || __mips == 3))
#define MY_CPU_NAME "mips64"
#define MY_CPU_64BIT
#elif defined(__mips__)
#define MY_CPU_NAME "mips"
/* #define MY_CPU_32BIT */
#endif
#if defined(__ppc64__) \
|| defined(__powerpc64__) \
|| defined(__ppc__) \
|| defined(__powerpc__) \
|| defined(__PPC__) \
|| defined(_POWER)
#define MY_CPU_PPC_OR_PPC64
#if defined(__ppc64__) \
|| defined(__powerpc64__) \
|| defined(_LP64) \
|| defined(__64BIT__)
#ifdef __ILP32__
#define MY_CPU_NAME "ppc64-32"
#define MY_CPU_SIZEOF_POINTER 4
#else
#define MY_CPU_NAME "ppc64"
#define MY_CPU_SIZEOF_POINTER 8
#endif
#define MY_CPU_64BIT
#else
#define MY_CPU_NAME "ppc"
#define MY_CPU_SIZEOF_POINTER 4
/* #define MY_CPU_32BIT */
#endif
#endif
#if defined(__riscv) \
|| defined(__riscv__)
#if __riscv_xlen == 32
#define MY_CPU_NAME "riscv32"
#elif __riscv_xlen == 64
#define MY_CPU_NAME "riscv64"
#else
#define MY_CPU_NAME "riscv"
#endif
#endif
#if defined(MY_CPU_X86) || defined(MY_CPU_AMD64)
#define MY_CPU_X86_OR_AMD64
#endif
#if defined(MY_CPU_ARM) || defined(MY_CPU_ARM64)
#define MY_CPU_ARM_OR_ARM64
#endif
#ifdef _WIN32
#ifdef MY_CPU_ARM
#define MY_CPU_ARM_LE
#endif
#ifdef MY_CPU_ARM64
#define MY_CPU_ARM64_LE
#endif
#ifdef _M_IA64
#define MY_CPU_IA64_LE
#endif
#endif
#if defined(MY_CPU_X86_OR_AMD64) \
|| defined(MY_CPU_ARM_LE) \
|| defined(MY_CPU_ARM64_LE) \
|| defined(MY_CPU_IA64_LE) \
|| defined(__LITTLE_ENDIAN__) \
|| defined(__ARMEL__) \
|| defined(__THUMBEL__) \
|| defined(__AARCH64EL__) \
|| defined(__MIPSEL__) \
|| defined(__MIPSEL) \
|| defined(_MIPSEL) \
|| defined(__BFIN__) \
|| (defined(__BYTE_ORDER__) && (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__))
#define MY_CPU_LE
#endif
#if defined(__BIG_ENDIAN__) \
|| defined(__ARMEB__) \
|| defined(__THUMBEB__) \
|| defined(__AARCH64EB__) \
|| defined(__MIPSEB__) \
|| defined(__MIPSEB) \
|| defined(_MIPSEB) \
|| defined(__m68k__) \
|| defined(__s390__) \
|| defined(__s390x__) \
|| defined(__zarch__) \
|| (defined(__BYTE_ORDER__) && (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__))
#define MY_CPU_BE
#endif
#if defined(MY_CPU_LE) && defined(MY_CPU_BE)
#error Stop_Compiling_Bad_Endian
#endif
#if !defined(MY_CPU_LE) && !defined(MY_CPU_BE)
#error Stop_Compiling_CPU_ENDIAN_must_be_detected_at_compile_time
#endif
#if defined(MY_CPU_32BIT) && defined(MY_CPU_64BIT)
#error Stop_Compiling_Bad_32_64_BIT
#endif
#ifdef __SIZEOF_POINTER__
#ifdef MY_CPU_SIZEOF_POINTER
#if MY_CPU_SIZEOF_POINTER != __SIZEOF_POINTER__
#error Stop_Compiling_Bad_MY_CPU_PTR_SIZE
#endif
#else
#define MY_CPU_SIZEOF_POINTER __SIZEOF_POINTER__
#endif
#endif
#if defined(MY_CPU_SIZEOF_POINTER) && (MY_CPU_SIZEOF_POINTER == 4)
#if defined (_LP64)
#error Stop_Compiling_Bad_MY_CPU_PTR_SIZE
#endif
#endif
#ifdef _MSC_VER
#if _MSC_VER >= 1300
#define MY_CPU_pragma_pack_push_1 __pragma(pack(push, 1))
#define MY_CPU_pragma_pop __pragma(pack(pop))
#else
#define MY_CPU_pragma_pack_push_1
#define MY_CPU_pragma_pop
#endif
#else
#ifdef __xlC__
#define MY_CPU_pragma_pack_push_1 _Pragma("pack(1)")
#define MY_CPU_pragma_pop _Pragma("pack()")
#else
#define MY_CPU_pragma_pack_push_1 _Pragma("pack(push, 1)")
#define MY_CPU_pragma_pop _Pragma("pack(pop)")
#endif
#endif
#ifndef MY_CPU_NAME
#ifdef MY_CPU_LE
#define MY_CPU_NAME "LE"
#elif defined(MY_CPU_BE)
#define MY_CPU_NAME "BE"
#else
/*
#define MY_CPU_NAME ""
*/
#endif
#endif
#ifdef __has_builtin
#define Z7_has_builtin(x) __has_builtin(x)
#else
#define Z7_has_builtin(x) 0
#endif
#define Z7_BSWAP32_CONST(v) \
( (((UInt32)(v) << 24) ) \
| (((UInt32)(v) << 8) & (UInt32)0xff0000) \
| (((UInt32)(v) >> 8) & (UInt32)0xff00 ) \
| (((UInt32)(v) >> 24) ))
#if defined(_MSC_VER) && (_MSC_VER >= 1300)
#include <stdlib.h>
/* Note: these macros will use bswap instruction (486), that is unsupported in 386 cpu */
#pragma intrinsic(_byteswap_ushort)
#pragma intrinsic(_byteswap_ulong)
#pragma intrinsic(_byteswap_uint64)
#define Z7_BSWAP16(v) _byteswap_ushort(v)
#define Z7_BSWAP32(v) _byteswap_ulong (v)
#define Z7_BSWAP64(v) _byteswap_uint64(v)
#define Z7_CPU_FAST_BSWAP_SUPPORTED
#elif (defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3))) \
|| (defined(__clang__) && Z7_has_builtin(__builtin_bswap16))
#define Z7_BSWAP16(v) __builtin_bswap16(v)
#define Z7_BSWAP32(v) __builtin_bswap32(v)
#define Z7_BSWAP64(v) __builtin_bswap64(v)
#define Z7_CPU_FAST_BSWAP_SUPPORTED
#else
#define Z7_BSWAP16(v) ((UInt16) \
( ((UInt32)(v) << 8) \
| ((UInt32)(v) >> 8) \
))
#define Z7_BSWAP32(v) Z7_BSWAP32_CONST(v)
#define Z7_BSWAP64(v) \
( ( ( (UInt64)(v) ) << 8 * 7 ) \
| ( ( (UInt64)(v) & ((UInt32)0xff << 8 * 1) ) << 8 * 5 ) \
| ( ( (UInt64)(v) & ((UInt32)0xff << 8 * 2) ) << 8 * 3 ) \
| ( ( (UInt64)(v) & ((UInt32)0xff << 8 * 3) ) << 8 * 1 ) \
| ( ( (UInt64)(v) >> 8 * 1 ) & ((UInt32)0xff << 8 * 3) ) \
| ( ( (UInt64)(v) >> 8 * 3 ) & ((UInt32)0xff << 8 * 2) ) \
| ( ( (UInt64)(v) >> 8 * 5 ) & ((UInt32)0xff << 8 * 1) ) \
| ( ( (UInt64)(v) >> 8 * 7 ) ) \
)
#endif
#ifdef MY_CPU_LE
#if defined(MY_CPU_X86_OR_AMD64) \
|| defined(MY_CPU_ARM64)
#define MY_CPU_LE_UNALIGN
#define MY_CPU_LE_UNALIGN_64
#elif defined(__ARM_FEATURE_UNALIGNED)
/* gcc9 for 32-bit arm can use LDRD instruction that requires 32-bit alignment.
So we can't use unaligned 64-bit operations. */
#define MY_CPU_LE_UNALIGN
#endif
#endif
#ifdef MY_CPU_LE_UNALIGN
#define GetUi16(p) (*(const UInt16 *)(const void *)(p))
#define GetUi32(p) (*(const UInt32 *)(const void *)(p))
#ifdef MY_CPU_LE_UNALIGN_64
#define GetUi64(p) (*(const UInt64 *)(const void *)(p))
#define SetUi64(p, v) { *(UInt64 *)(void *)(p) = (v); }
#endif
#define SetUi16(p, v) { *(UInt16 *)(void *)(p) = (v); }
#define SetUi32(p, v) { *(UInt32 *)(void *)(p) = (v); }
#else
#define GetUi16(p) ( (UInt16) ( \
((const Byte *)(p))[0] | \
((UInt16)((const Byte *)(p))[1] << 8) ))
#define GetUi32(p) ( \
((const Byte *)(p))[0] | \
((UInt32)((const Byte *)(p))[1] << 8) | \
((UInt32)((const Byte *)(p))[2] << 16) | \
((UInt32)((const Byte *)(p))[3] << 24))
#define SetUi16(p, v) { Byte *_ppp_ = (Byte *)(p); UInt32 _vvv_ = (v); \
_ppp_[0] = (Byte)_vvv_; \
_ppp_[1] = (Byte)(_vvv_ >> 8); }
#define SetUi32(p, v) { Byte *_ppp_ = (Byte *)(p); UInt32 _vvv_ = (v); \
_ppp_[0] = (Byte)_vvv_; \
_ppp_[1] = (Byte)(_vvv_ >> 8); \
_ppp_[2] = (Byte)(_vvv_ >> 16); \
_ppp_[3] = (Byte)(_vvv_ >> 24); }
#endif
#ifndef GetUi64
#define GetUi64(p) (GetUi32(p) | ((UInt64)GetUi32(((const Byte *)(p)) + 4) << 32))
#endif
#ifndef SetUi64
#define SetUi64(p, v) { Byte *_ppp2_ = (Byte *)(p); UInt64 _vvv2_ = (v); \
SetUi32(_ppp2_ , (UInt32)_vvv2_) \
SetUi32(_ppp2_ + 4, (UInt32)(_vvv2_ >> 32)) }
#endif
#if defined(MY_CPU_LE_UNALIGN) && defined(Z7_CPU_FAST_BSWAP_SUPPORTED)
#define GetBe32(p) Z7_BSWAP32 (*(const UInt32 *)(const void *)(p))
#define SetBe32(p, v) { (*(UInt32 *)(void *)(p)) = Z7_BSWAP32(v); }
#if defined(MY_CPU_LE_UNALIGN_64)
#define GetBe64(p) Z7_BSWAP64 (*(const UInt64 *)(const void *)(p))
#endif
#else
#define GetBe32(p) ( \
((UInt32)((const Byte *)(p))[0] << 24) | \
((UInt32)((const Byte *)(p))[1] << 16) | \
((UInt32)((const Byte *)(p))[2] << 8) | \
((const Byte *)(p))[3] )
#define SetBe32(p, v) { Byte *_ppp_ = (Byte *)(p); UInt32 _vvv_ = (v); \
_ppp_[0] = (Byte)(_vvv_ >> 24); \
_ppp_[1] = (Byte)(_vvv_ >> 16); \
_ppp_[2] = (Byte)(_vvv_ >> 8); \
_ppp_[3] = (Byte)_vvv_; }
#endif
#ifndef GetBe64
#define GetBe64(p) (((UInt64)GetBe32(p) << 32) | GetBe32(((const Byte *)(p)) + 4))
#endif
#ifndef GetBe16
#define GetBe16(p) ( (UInt16) ( \
((UInt16)((const Byte *)(p))[0] << 8) | \
((const Byte *)(p))[1] ))
#endif
#if defined(MY_CPU_BE)
#define Z7_CONV_BE_TO_NATIVE_CONST32(v) (v)
#define Z7_CONV_LE_TO_NATIVE_CONST32(v) Z7_BSWAP32_CONST(v)
#define Z7_CONV_NATIVE_TO_BE_32(v) (v)
#elif defined(MY_CPU_LE)
#define Z7_CONV_BE_TO_NATIVE_CONST32(v) Z7_BSWAP32_CONST(v)
#define Z7_CONV_LE_TO_NATIVE_CONST32(v) (v)
#define Z7_CONV_NATIVE_TO_BE_32(v) Z7_BSWAP32(v)
#else
#error Stop_Compiling_Unknown_Endian_CONV
#endif
#if defined(MY_CPU_BE)
#define GetBe32a(p) (*(const UInt32 *)(const void *)(p))
#define GetBe16a(p) (*(const UInt16 *)(const void *)(p))
#define SetBe32a(p, v) { *(UInt32 *)(void *)(p) = (v); }
#define SetBe16a(p, v) { *(UInt16 *)(void *)(p) = (v); }
#define GetUi32a(p) GetUi32(p)
#define GetUi16a(p) GetUi16(p)
#define SetUi32a(p, v) SetUi32(p, v)
#define SetUi16a(p, v) SetUi16(p, v)
#elif defined(MY_CPU_LE)
#define GetUi32a(p) (*(const UInt32 *)(const void *)(p))
#define GetUi16a(p) (*(const UInt16 *)(const void *)(p))
#define SetUi32a(p, v) { *(UInt32 *)(void *)(p) = (v); }
#define SetUi16a(p, v) { *(UInt16 *)(void *)(p) = (v); }
#define GetBe32a(p) GetBe32(p)
#define GetBe16a(p) GetBe16(p)
#define SetBe32a(p, v) SetBe32(p, v)
#define SetBe16a(p, v) SetBe16(p, v)
#else
#error Stop_Compiling_Unknown_Endian_CPU_a
#endif
#if defined(MY_CPU_X86_OR_AMD64) \
|| defined(MY_CPU_ARM_OR_ARM64) \
|| defined(MY_CPU_PPC_OR_PPC64)
#define Z7_CPU_FAST_ROTATE_SUPPORTED
#endif
#ifdef MY_CPU_X86_OR_AMD64
void Z7_FASTCALL z7_x86_cpuid(UInt32 a[4], UInt32 function);
UInt32 Z7_FASTCALL z7_x86_cpuid_GetMaxFunc(void);
#if defined(MY_CPU_AMD64)
#define Z7_IF_X86_CPUID_SUPPORTED
#else
#define Z7_IF_X86_CPUID_SUPPORTED if (z7_x86_cpuid_GetMaxFunc())
#endif
BoolInt CPU_IsSupported_AES(void);
BoolInt CPU_IsSupported_AVX(void);
BoolInt CPU_IsSupported_AVX2(void);
BoolInt CPU_IsSupported_VAES_AVX2(void);
BoolInt CPU_IsSupported_CMOV(void);
BoolInt CPU_IsSupported_SSE(void);
BoolInt CPU_IsSupported_SSE2(void);
BoolInt CPU_IsSupported_SSSE3(void);
BoolInt CPU_IsSupported_SSE41(void);
BoolInt CPU_IsSupported_SHA(void);
BoolInt CPU_IsSupported_PageGB(void);
#elif defined(MY_CPU_ARM_OR_ARM64)
BoolInt CPU_IsSupported_CRC32(void);
BoolInt CPU_IsSupported_NEON(void);
#if defined(_WIN32)
BoolInt CPU_IsSupported_CRYPTO(void);
#define CPU_IsSupported_SHA1 CPU_IsSupported_CRYPTO
#define CPU_IsSupported_SHA2 CPU_IsSupported_CRYPTO
#define CPU_IsSupported_AES CPU_IsSupported_CRYPTO
#else
BoolInt CPU_IsSupported_SHA1(void);
BoolInt CPU_IsSupported_SHA2(void);
BoolInt CPU_IsSupported_AES(void);
#endif
#endif
#if defined(__APPLE__)
int z7_sysctlbyname_Get(const char *name, void *buf, size_t *bufSize);
int z7_sysctlbyname_Get_UInt32(const char *name, UInt32 *val);
#endif
EXTERN_C_END
#endif

1717
extern/lzma/LzFind.c vendored
View File

@@ -1,1717 +0,0 @@
/* LzFind.c -- Match finder for LZ algorithms
2023-03-14 : Igor Pavlov : Public domain */
#include "Precomp.h"
#include <string.h>
// #include <stdio.h>
#include "CpuArch.h"
#include "LzFind.h"
#include "LzHash.h"
#define kBlockMoveAlign (1 << 7) // alignment for memmove()
#define kBlockSizeAlign (1 << 16) // alignment for block allocation
#define kBlockSizeReserveMin (1 << 24) // it's 1/256 from 4 GB dictinary
#define kEmptyHashValue 0
#define kMaxValForNormalize ((UInt32)0)
// #define kMaxValForNormalize ((UInt32)(1 << 20) + 0xfff) // for debug
// #define kNormalizeAlign (1 << 7) // alignment for speculated accesses
#define GET_AVAIL_BYTES(p) \
Inline_MatchFinder_GetNumAvailableBytes(p)
// #define kFix5HashSize (kHash2Size + kHash3Size + kHash4Size)
#define kFix5HashSize kFix4HashSize
/*
HASH2_CALC:
if (hv) match, then cur[0] and cur[1] also match
*/
#define HASH2_CALC hv = GetUi16(cur);
// (crc[0 ... 255] & 0xFF) provides one-to-one correspondence to [0 ... 255]
/*
HASH3_CALC:
if (cur[0]) and (h2) match, then cur[1] also match
if (cur[0]) and (hv) match, then cur[1] and cur[2] also match
*/
#define HASH3_CALC { \
UInt32 temp = p->crc[cur[0]] ^ cur[1]; \
h2 = temp & (kHash2Size - 1); \
hv = (temp ^ ((UInt32)cur[2] << 8)) & p->hashMask; }
#define HASH4_CALC { \
UInt32 temp = p->crc[cur[0]] ^ cur[1]; \
h2 = temp & (kHash2Size - 1); \
temp ^= ((UInt32)cur[2] << 8); \
h3 = temp & (kHash3Size - 1); \
hv = (temp ^ (p->crc[cur[3]] << kLzHash_CrcShift_1)) & p->hashMask; }
#define HASH5_CALC { \
UInt32 temp = p->crc[cur[0]] ^ cur[1]; \
h2 = temp & (kHash2Size - 1); \
temp ^= ((UInt32)cur[2] << 8); \
h3 = temp & (kHash3Size - 1); \
temp ^= (p->crc[cur[3]] << kLzHash_CrcShift_1); \
/* h4 = temp & p->hash4Mask; */ /* (kHash4Size - 1); */ \
hv = (temp ^ (p->crc[cur[4]] << kLzHash_CrcShift_2)) & p->hashMask; }
#define HASH_ZIP_CALC hv = ((cur[2] | ((UInt32)cur[0] << 8)) ^ p->crc[cur[1]]) & 0xFFFF;
static void LzInWindow_Free(CMatchFinder *p, ISzAllocPtr alloc)
{
// if (!p->directInput)
{
ISzAlloc_Free(alloc, p->bufBase);
p->bufBase = NULL;
}
}
static int LzInWindow_Create2(CMatchFinder *p, UInt32 blockSize, ISzAllocPtr alloc)
{
if (blockSize == 0)
return 0;
if (!p->bufBase || p->blockSize != blockSize)
{
// size_t blockSizeT;
LzInWindow_Free(p, alloc);
p->blockSize = blockSize;
// blockSizeT = blockSize;
// printf("\nblockSize = 0x%x\n", blockSize);
/*
#if defined _WIN64
// we can allocate 4GiB, but still use UInt32 for (p->blockSize)
// we use UInt32 type for (p->blockSize), because
// we don't want to wrap over 4 GiB,
// when we use (p->streamPos - p->pos) that is UInt32.
if (blockSize >= (UInt32)0 - (UInt32)kBlockSizeAlign)
{
blockSizeT = ((size_t)1 << 32);
printf("\nchanged to blockSizeT = 4GiB\n");
}
#endif
*/
p->bufBase = (Byte *)ISzAlloc_Alloc(alloc, blockSize);
// printf("\nbufferBase = %p\n", p->bufBase);
// return 0; // for debug
}
return (p->bufBase != NULL);
}
static const Byte *MatchFinder_GetPointerToCurrentPos(CMatchFinder *p) { return p->buffer; }
static UInt32 MatchFinder_GetNumAvailableBytes(CMatchFinder *p) { return GET_AVAIL_BYTES(p); }
Z7_NO_INLINE
static void MatchFinder_ReadBlock(CMatchFinder *p)
{
if (p->streamEndWasReached || p->result != SZ_OK)
return;
/* We use (p->streamPos - p->pos) value.
(p->streamPos < p->pos) is allowed. */
if (p->directInput)
{
UInt32 curSize = 0xFFFFFFFF - GET_AVAIL_BYTES(p);
if (curSize > p->directInputRem)
curSize = (UInt32)p->directInputRem;
p->streamPos += curSize;
p->directInputRem -= curSize;
if (p->directInputRem == 0)
p->streamEndWasReached = 1;
return;
}
for (;;)
{
const Byte *dest = p->buffer + GET_AVAIL_BYTES(p);
size_t size = (size_t)(p->bufBase + p->blockSize - dest);
if (size == 0)
{
/* we call ReadBlock() after NeedMove() and MoveBlock().
NeedMove() and MoveBlock() povide more than (keepSizeAfter)
to the end of (blockSize).
So we don't execute this branch in normal code flow.
We can go here, if we will call ReadBlock() before NeedMove(), MoveBlock().
*/
// p->result = SZ_ERROR_FAIL; // we can show error here
return;
}
// #define kRead 3
// if (size > kRead) size = kRead; // for debug
/*
// we need cast (Byte *)dest.
#ifdef __clang__
#pragma GCC diagnostic ignored "-Wcast-qual"
#endif
*/
p->result = ISeqInStream_Read(p->stream,
p->bufBase + (dest - p->bufBase), &size);
if (p->result != SZ_OK)
return;
if (size == 0)
{
p->streamEndWasReached = 1;
return;
}
p->streamPos += (UInt32)size;
if (GET_AVAIL_BYTES(p) > p->keepSizeAfter)
return;
/* here and in another (p->keepSizeAfter) checks we keep on 1 byte more than was requested by Create() function
(GET_AVAIL_BYTES(p) >= p->keepSizeAfter) - minimal required size */
}
// on exit: (p->result != SZ_OK || p->streamEndWasReached || GET_AVAIL_BYTES(p) > p->keepSizeAfter)
}
Z7_NO_INLINE
void MatchFinder_MoveBlock(CMatchFinder *p)
{
const size_t offset = (size_t)(p->buffer - p->bufBase) - p->keepSizeBefore;
const size_t keepBefore = (offset & (kBlockMoveAlign - 1)) + p->keepSizeBefore;
p->buffer = p->bufBase + keepBefore;
memmove(p->bufBase,
p->bufBase + (offset & ~((size_t)kBlockMoveAlign - 1)),
keepBefore + (size_t)GET_AVAIL_BYTES(p));
}
/* We call MoveBlock() before ReadBlock().
So MoveBlock() can be wasteful operation, if the whole input data
can fit in current block even without calling MoveBlock().
in important case where (dataSize <= historySize)
condition (p->blockSize > dataSize + p->keepSizeAfter) is met
So there is no MoveBlock() in that case case.
*/
int MatchFinder_NeedMove(CMatchFinder *p)
{
if (p->directInput)
return 0;
if (p->streamEndWasReached || p->result != SZ_OK)
return 0;
return ((size_t)(p->bufBase + p->blockSize - p->buffer) <= p->keepSizeAfter);
}
void MatchFinder_ReadIfRequired(CMatchFinder *p)
{
if (p->keepSizeAfter >= GET_AVAIL_BYTES(p))
MatchFinder_ReadBlock(p);
}
static void MatchFinder_SetDefaultSettings(CMatchFinder *p)
{
p->cutValue = 32;
p->btMode = 1;
p->numHashBytes = 4;
p->numHashBytes_Min = 2;
p->numHashOutBits = 0;
p->bigHash = 0;
}
#define kCrcPoly 0xEDB88320
void MatchFinder_Construct(CMatchFinder *p)
{
unsigned i;
p->buffer = NULL;
p->bufBase = NULL;
p->directInput = 0;
p->stream = NULL;
p->hash = NULL;
p->expectedDataSize = (UInt64)(Int64)-1;
MatchFinder_SetDefaultSettings(p);
for (i = 0; i < 256; i++)
{
UInt32 r = (UInt32)i;
unsigned j;
for (j = 0; j < 8; j++)
r = (r >> 1) ^ (kCrcPoly & ((UInt32)0 - (r & 1)));
p->crc[i] = r;
}
}
#undef kCrcPoly
static void MatchFinder_FreeThisClassMemory(CMatchFinder *p, ISzAllocPtr alloc)
{
ISzAlloc_Free(alloc, p->hash);
p->hash = NULL;
}
void MatchFinder_Free(CMatchFinder *p, ISzAllocPtr alloc)
{
MatchFinder_FreeThisClassMemory(p, alloc);
LzInWindow_Free(p, alloc);
}
static CLzRef* AllocRefs(size_t num, ISzAllocPtr alloc)
{
const size_t sizeInBytes = (size_t)num * sizeof(CLzRef);
if (sizeInBytes / sizeof(CLzRef) != num)
return NULL;
return (CLzRef *)ISzAlloc_Alloc(alloc, sizeInBytes);
}
#if (kBlockSizeReserveMin < kBlockSizeAlign * 2)
#error Stop_Compiling_Bad_Reserve
#endif
static UInt32 GetBlockSize(CMatchFinder *p, UInt32 historySize)
{
UInt32 blockSize = (p->keepSizeBefore + p->keepSizeAfter);
/*
if (historySize > kMaxHistorySize)
return 0;
*/
// printf("\nhistorySize == 0x%x\n", historySize);
if (p->keepSizeBefore < historySize || blockSize < p->keepSizeBefore) // if 32-bit overflow
return 0;
{
const UInt32 kBlockSizeMax = (UInt32)0 - (UInt32)kBlockSizeAlign;
const UInt32 rem = kBlockSizeMax - blockSize;
const UInt32 reserve = (blockSize >> (blockSize < ((UInt32)1 << 30) ? 1 : 2))
+ (1 << 12) + kBlockMoveAlign + kBlockSizeAlign; // do not overflow 32-bit here
if (blockSize >= kBlockSizeMax
|| rem < kBlockSizeReserveMin) // we reject settings that will be slow
return 0;
if (reserve >= rem)
blockSize = kBlockSizeMax;
else
{
blockSize += reserve;
blockSize &= ~(UInt32)(kBlockSizeAlign - 1);
}
}
// printf("\n LzFind_blockSize = %x\n", blockSize);
// printf("\n LzFind_blockSize = %d\n", blockSize >> 20);
return blockSize;
}
// input is historySize
static UInt32 MatchFinder_GetHashMask2(CMatchFinder *p, UInt32 hs)
{
if (p->numHashBytes == 2)
return (1 << 16) - 1;
if (hs != 0)
hs--;
hs |= (hs >> 1);
hs |= (hs >> 2);
hs |= (hs >> 4);
hs |= (hs >> 8);
// we propagated 16 bits in (hs). Low 16 bits must be set later
if (hs >= (1 << 24))
{
if (p->numHashBytes == 3)
hs = (1 << 24) - 1;
/* if (bigHash) mode, GetHeads4b() in LzFindMt.c needs (hs >= ((1 << 24) - 1))) */
}
// (hash_size >= (1 << 16)) : Required for (numHashBytes > 2)
hs |= (1 << 16) - 1; /* don't change it! */
// bt5: we adjust the size with recommended minimum size
if (p->numHashBytes >= 5)
hs |= (256 << kLzHash_CrcShift_2) - 1;
return hs;
}
// input is historySize
static UInt32 MatchFinder_GetHashMask(CMatchFinder *p, UInt32 hs)
{
if (p->numHashBytes == 2)
return (1 << 16) - 1;
if (hs != 0)
hs--;
hs |= (hs >> 1);
hs |= (hs >> 2);
hs |= (hs >> 4);
hs |= (hs >> 8);
// we propagated 16 bits in (hs). Low 16 bits must be set later
hs >>= 1;
if (hs >= (1 << 24))
{
if (p->numHashBytes == 3)
hs = (1 << 24) - 1;
else
hs >>= 1;
/* if (bigHash) mode, GetHeads4b() in LzFindMt.c needs (hs >= ((1 << 24) - 1))) */
}
// (hash_size >= (1 << 16)) : Required for (numHashBytes > 2)
hs |= (1 << 16) - 1; /* don't change it! */
// bt5: we adjust the size with recommended minimum size
if (p->numHashBytes >= 5)
hs |= (256 << kLzHash_CrcShift_2) - 1;
return hs;
}
int MatchFinder_Create(CMatchFinder *p, UInt32 historySize,
UInt32 keepAddBufferBefore, UInt32 matchMaxLen, UInt32 keepAddBufferAfter,
ISzAllocPtr alloc)
{
/* we need one additional byte in (p->keepSizeBefore),
since we use MoveBlock() after (p->pos++) and before dictionary using */
// keepAddBufferBefore = (UInt32)0xFFFFFFFF - (1 << 22); // for debug
p->keepSizeBefore = historySize + keepAddBufferBefore + 1;
keepAddBufferAfter += matchMaxLen;
/* we need (p->keepSizeAfter >= p->numHashBytes) */
if (keepAddBufferAfter < p->numHashBytes)
keepAddBufferAfter = p->numHashBytes;
// keepAddBufferAfter -= 2; // for debug
p->keepSizeAfter = keepAddBufferAfter;
if (p->directInput)
p->blockSize = 0;
if (p->directInput || LzInWindow_Create2(p, GetBlockSize(p, historySize), alloc))
{
size_t hashSizeSum;
{
UInt32 hs;
UInt32 hsCur;
if (p->numHashOutBits != 0)
{
unsigned numBits = p->numHashOutBits;
const unsigned nbMax =
(p->numHashBytes == 2 ? 16 :
(p->numHashBytes == 3 ? 24 : 32));
if (numBits > nbMax)
numBits = nbMax;
if (numBits >= 32)
hs = (UInt32)0 - 1;
else
hs = ((UInt32)1 << numBits) - 1;
// (hash_size >= (1 << 16)) : Required for (numHashBytes > 2)
hs |= (1 << 16) - 1; /* don't change it! */
if (p->numHashBytes >= 5)
hs |= (256 << kLzHash_CrcShift_2) - 1;
{
const UInt32 hs2 = MatchFinder_GetHashMask2(p, historySize);
if (hs > hs2)
hs = hs2;
}
hsCur = hs;
if (p->expectedDataSize < historySize)
{
const UInt32 hs2 = MatchFinder_GetHashMask2(p, (UInt32)p->expectedDataSize);
if (hsCur > hs2)
hsCur = hs2;
}
}
else
{
hs = MatchFinder_GetHashMask(p, historySize);
hsCur = hs;
if (p->expectedDataSize < historySize)
{
hsCur = MatchFinder_GetHashMask(p, (UInt32)p->expectedDataSize);
if (hsCur > hs) // is it possible?
hsCur = hs;
}
}
p->hashMask = hsCur;
hashSizeSum = hs;
hashSizeSum++;
if (hashSizeSum < hs)
return 0;
{
UInt32 fixedHashSize = 0;
if (p->numHashBytes > 2 && p->numHashBytes_Min <= 2) fixedHashSize += kHash2Size;
if (p->numHashBytes > 3 && p->numHashBytes_Min <= 3) fixedHashSize += kHash3Size;
// if (p->numHashBytes > 4) p->fixedHashSize += hs4; // kHash4Size;
hashSizeSum += fixedHashSize;
p->fixedHashSize = fixedHashSize;
}
}
p->matchMaxLen = matchMaxLen;
{
size_t newSize;
size_t numSons;
const UInt32 newCyclicBufferSize = historySize + 1; // do not change it
p->historySize = historySize;
p->cyclicBufferSize = newCyclicBufferSize; // it must be = (historySize + 1)
numSons = newCyclicBufferSize;
if (p->btMode)
numSons <<= 1;
newSize = hashSizeSum + numSons;
if (numSons < newCyclicBufferSize || newSize < numSons)
return 0;
// aligned size is not required here, but it can be better for some loops
#define NUM_REFS_ALIGN_MASK 0xF
newSize = (newSize + NUM_REFS_ALIGN_MASK) & ~(size_t)NUM_REFS_ALIGN_MASK;
// 22.02: we don't reallocate buffer, if old size is enough
if (p->hash && p->numRefs >= newSize)
return 1;
MatchFinder_FreeThisClassMemory(p, alloc);
p->numRefs = newSize;
p->hash = AllocRefs(newSize, alloc);
if (p->hash)
{
p->son = p->hash + hashSizeSum;
return 1;
}
}
}
MatchFinder_Free(p, alloc);
return 0;
}
static void MatchFinder_SetLimits(CMatchFinder *p)
{
UInt32 k;
UInt32 n = kMaxValForNormalize - p->pos;
if (n == 0)
n = (UInt32)(Int32)-1; // we allow (pos == 0) at start even with (kMaxValForNormalize == 0)
k = p->cyclicBufferSize - p->cyclicBufferPos;
if (k < n)
n = k;
k = GET_AVAIL_BYTES(p);
{
const UInt32 ksa = p->keepSizeAfter;
UInt32 mm = p->matchMaxLen;
if (k > ksa)
k -= ksa; // we must limit exactly to keepSizeAfter for ReadBlock
else if (k >= mm)
{
// the limitation for (p->lenLimit) update
k -= mm; // optimization : to reduce the number of checks
k++;
// k = 1; // non-optimized version : for debug
}
else
{
mm = k;
if (k != 0)
k = 1;
}
p->lenLimit = mm;
}
if (k < n)
n = k;
p->posLimit = p->pos + n;
}
void MatchFinder_Init_LowHash(CMatchFinder *p)
{
size_t i;
CLzRef *items = p->hash;
const size_t numItems = p->fixedHashSize;
for (i = 0; i < numItems; i++)
items[i] = kEmptyHashValue;
}
void MatchFinder_Init_HighHash(CMatchFinder *p)
{
size_t i;
CLzRef *items = p->hash + p->fixedHashSize;
const size_t numItems = (size_t)p->hashMask + 1;
for (i = 0; i < numItems; i++)
items[i] = kEmptyHashValue;
}
void MatchFinder_Init_4(CMatchFinder *p)
{
if (!p->directInput)
p->buffer = p->bufBase;
{
/* kEmptyHashValue = 0 (Zero) is used in hash tables as NO-VALUE marker.
the code in CMatchFinderMt expects (pos = 1) */
p->pos =
p->streamPos =
1; // it's smallest optimal value. do not change it
// 0; // for debug
}
p->result = SZ_OK;
p->streamEndWasReached = 0;
}
// (CYC_TO_POS_OFFSET == 0) is expected by some optimized code
#define CYC_TO_POS_OFFSET 0
// #define CYC_TO_POS_OFFSET 1 // for debug
void MatchFinder_Init(CMatchFinder *p)
{
MatchFinder_Init_HighHash(p);
MatchFinder_Init_LowHash(p);
MatchFinder_Init_4(p);
// if (readData)
MatchFinder_ReadBlock(p);
/* if we init (cyclicBufferPos = pos), then we can use one variable
instead of both (cyclicBufferPos) and (pos) : only before (cyclicBufferPos) wrapping */
p->cyclicBufferPos = (p->pos - CYC_TO_POS_OFFSET); // init with relation to (pos)
// p->cyclicBufferPos = 0; // smallest value
// p->son[0] = p->son[1] = 0; // unused: we can init skipped record for speculated accesses.
MatchFinder_SetLimits(p);
}
#ifdef MY_CPU_X86_OR_AMD64
#if defined(__clang__) && (__clang_major__ >= 4) \
|| defined(Z7_GCC_VERSION) && (Z7_GCC_VERSION >= 40701)
// || defined(__INTEL_COMPILER) && (__INTEL_COMPILER >= 1900)
#define USE_LZFIND_SATUR_SUB_128
#define USE_LZFIND_SATUR_SUB_256
#define LZFIND_ATTRIB_SSE41 __attribute__((__target__("sse4.1")))
#define LZFIND_ATTRIB_AVX2 __attribute__((__target__("avx2")))
#elif defined(_MSC_VER)
#if (_MSC_VER >= 1600)
#define USE_LZFIND_SATUR_SUB_128
#endif
#if (_MSC_VER >= 1900)
#define USE_LZFIND_SATUR_SUB_256
#endif
#endif
// #elif defined(MY_CPU_ARM_OR_ARM64)
#elif defined(MY_CPU_ARM64)
#if defined(__clang__) && (__clang_major__ >= 8) \
|| defined(__GNUC__) && (__GNUC__ >= 8)
#define USE_LZFIND_SATUR_SUB_128
#ifdef MY_CPU_ARM64
// #define LZFIND_ATTRIB_SSE41 __attribute__((__target__("")))
#else
// #define LZFIND_ATTRIB_SSE41 __attribute__((__target__("fpu=crypto-neon-fp-armv8")))
#endif
#elif defined(_MSC_VER)
#if (_MSC_VER >= 1910)
#define USE_LZFIND_SATUR_SUB_128
#endif
#endif
#if defined(_MSC_VER) && defined(MY_CPU_ARM64) && !defined(__clang__)
#include <arm64_neon.h>
#else
#include <arm_neon.h>
#endif
#endif
#ifdef USE_LZFIND_SATUR_SUB_128
// #define Z7_SHOW_HW_STATUS
#ifdef Z7_SHOW_HW_STATUS
#include <stdio.h>
#define PRF(x) x
PRF(;)
#else
#define PRF(x)
#endif
#ifdef MY_CPU_ARM_OR_ARM64
#ifdef MY_CPU_ARM64
// #define FORCE_LZFIND_SATUR_SUB_128
#endif
typedef uint32x4_t LzFind_v128;
#define SASUB_128_V(v, s) \
vsubq_u32(vmaxq_u32(v, s), s)
#else // MY_CPU_ARM_OR_ARM64
#include <smmintrin.h> // sse4.1
typedef __m128i LzFind_v128;
// SSE 4.1
#define SASUB_128_V(v, s) \
_mm_sub_epi32(_mm_max_epu32(v, s), s)
#endif // MY_CPU_ARM_OR_ARM64
#define SASUB_128(i) \
*( LzFind_v128 *)( void *)(items + (i) * 4) = SASUB_128_V( \
*(const LzFind_v128 *)(const void *)(items + (i) * 4), sub2);
Z7_NO_INLINE
static
#ifdef LZFIND_ATTRIB_SSE41
LZFIND_ATTRIB_SSE41
#endif
void
Z7_FASTCALL
LzFind_SaturSub_128(UInt32 subValue, CLzRef *items, const CLzRef *lim)
{
const LzFind_v128 sub2 =
#ifdef MY_CPU_ARM_OR_ARM64
vdupq_n_u32(subValue);
#else
_mm_set_epi32((Int32)subValue, (Int32)subValue, (Int32)subValue, (Int32)subValue);
#endif
Z7_PRAGMA_OPT_DISABLE_LOOP_UNROLL_VECTORIZE
do
{
SASUB_128(0) SASUB_128(1) items += 2 * 4;
SASUB_128(0) SASUB_128(1) items += 2 * 4;
}
while (items != lim);
}
#ifdef USE_LZFIND_SATUR_SUB_256
#include <immintrin.h> // avx
/*
clang :immintrin.h uses
#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
defined(__AVX2__)
#include <avx2intrin.h>
#endif
so we need <avxintrin.h> for clang-cl */
#if defined(__clang__)
#include <avxintrin.h>
#include <avx2intrin.h>
#endif
// AVX2:
#define SASUB_256(i) \
*( __m256i *)( void *)(items + (i) * 8) = \
_mm256_sub_epi32(_mm256_max_epu32( \
*(const __m256i *)(const void *)(items + (i) * 8), sub2), sub2);
Z7_NO_INLINE
static
#ifdef LZFIND_ATTRIB_AVX2
LZFIND_ATTRIB_AVX2
#endif
void
Z7_FASTCALL
LzFind_SaturSub_256(UInt32 subValue, CLzRef *items, const CLzRef *lim)
{
const __m256i sub2 = _mm256_set_epi32(
(Int32)subValue, (Int32)subValue, (Int32)subValue, (Int32)subValue,
(Int32)subValue, (Int32)subValue, (Int32)subValue, (Int32)subValue);
Z7_PRAGMA_OPT_DISABLE_LOOP_UNROLL_VECTORIZE
do
{
SASUB_256(0) SASUB_256(1) items += 2 * 8;
SASUB_256(0) SASUB_256(1) items += 2 * 8;
}
while (items != lim);
}
#endif // USE_LZFIND_SATUR_SUB_256
#ifndef FORCE_LZFIND_SATUR_SUB_128
typedef void (Z7_FASTCALL *LZFIND_SATUR_SUB_CODE_FUNC)(
UInt32 subValue, CLzRef *items, const CLzRef *lim);
static LZFIND_SATUR_SUB_CODE_FUNC g_LzFind_SaturSub;
#endif // FORCE_LZFIND_SATUR_SUB_128
#endif // USE_LZFIND_SATUR_SUB_128
// kEmptyHashValue must be zero
// #define SASUB_32(i) { UInt32 v = items[i]; UInt32 m = v - subValue; if (v < subValue) m = kEmptyHashValue; items[i] = m; }
#define SASUB_32(i) { UInt32 v = items[i]; if (v < subValue) v = subValue; items[i] = v - subValue; }
#ifdef FORCE_LZFIND_SATUR_SUB_128
#define DEFAULT_SaturSub LzFind_SaturSub_128
#else
#define DEFAULT_SaturSub LzFind_SaturSub_32
Z7_NO_INLINE
static
void
Z7_FASTCALL
LzFind_SaturSub_32(UInt32 subValue, CLzRef *items, const CLzRef *lim)
{
Z7_PRAGMA_OPT_DISABLE_LOOP_UNROLL_VECTORIZE
do
{
SASUB_32(0) SASUB_32(1) items += 2;
SASUB_32(0) SASUB_32(1) items += 2;
SASUB_32(0) SASUB_32(1) items += 2;
SASUB_32(0) SASUB_32(1) items += 2;
}
while (items != lim);
}
#endif
Z7_NO_INLINE
void MatchFinder_Normalize3(UInt32 subValue, CLzRef *items, size_t numItems)
{
#define LZFIND_NORM_ALIGN_BLOCK_SIZE (1 << 7)
Z7_PRAGMA_OPT_DISABLE_LOOP_UNROLL_VECTORIZE
for (; numItems != 0 && ((unsigned)(ptrdiff_t)items & (LZFIND_NORM_ALIGN_BLOCK_SIZE - 1)) != 0; numItems--)
{
SASUB_32(0)
items++;
}
{
const size_t k_Align_Mask = (LZFIND_NORM_ALIGN_BLOCK_SIZE / 4 - 1);
CLzRef *lim = items + (numItems & ~(size_t)k_Align_Mask);
numItems &= k_Align_Mask;
if (items != lim)
{
#if defined(USE_LZFIND_SATUR_SUB_128) && !defined(FORCE_LZFIND_SATUR_SUB_128)
if (g_LzFind_SaturSub)
g_LzFind_SaturSub(subValue, items, lim);
else
#endif
DEFAULT_SaturSub(subValue, items, lim);
}
items = lim;
}
Z7_PRAGMA_OPT_DISABLE_LOOP_UNROLL_VECTORIZE
for (; numItems != 0; numItems--)
{
SASUB_32(0)
items++;
}
}
// call MatchFinder_CheckLimits() only after (p->pos++) update
Z7_NO_INLINE
static void MatchFinder_CheckLimits(CMatchFinder *p)
{
if (// !p->streamEndWasReached && p->result == SZ_OK &&
p->keepSizeAfter == GET_AVAIL_BYTES(p))
{
// we try to read only in exact state (p->keepSizeAfter == GET_AVAIL_BYTES(p))
if (MatchFinder_NeedMove(p))
MatchFinder_MoveBlock(p);
MatchFinder_ReadBlock(p);
}
if (p->pos == kMaxValForNormalize)
if (GET_AVAIL_BYTES(p) >= p->numHashBytes) // optional optimization for last bytes of data.
/*
if we disable normalization for last bytes of data, and
if (data_size == 4 GiB), we don't call wastfull normalization,
but (pos) will be wrapped over Zero (0) in that case.
And we cannot resume later to normal operation
*/
{
// MatchFinder_Normalize(p);
/* after normalization we need (p->pos >= p->historySize + 1); */
/* we can reduce subValue to aligned value, if want to keep alignment
of (p->pos) and (p->buffer) for speculated accesses. */
const UInt32 subValue = (p->pos - p->historySize - 1) /* & ~(UInt32)(kNormalizeAlign - 1) */;
// const UInt32 subValue = (1 << 15); // for debug
// printf("\nMatchFinder_Normalize() subValue == 0x%x\n", subValue);
MatchFinder_REDUCE_OFFSETS(p, subValue)
MatchFinder_Normalize3(subValue, p->hash, (size_t)p->hashMask + 1 + p->fixedHashSize);
{
size_t numSonRefs = p->cyclicBufferSize;
if (p->btMode)
numSonRefs <<= 1;
MatchFinder_Normalize3(subValue, p->son, numSonRefs);
}
}
if (p->cyclicBufferPos == p->cyclicBufferSize)
p->cyclicBufferPos = 0;
MatchFinder_SetLimits(p);
}
/*
(lenLimit > maxLen)
*/
Z7_FORCE_INLINE
static UInt32 * Hc_GetMatchesSpec(size_t lenLimit, UInt32 curMatch, UInt32 pos, const Byte *cur, CLzRef *son,
size_t _cyclicBufferPos, UInt32 _cyclicBufferSize, UInt32 cutValue,
UInt32 *d, unsigned maxLen)
{
/*
son[_cyclicBufferPos] = curMatch;
for (;;)
{
UInt32 delta = pos - curMatch;
if (cutValue-- == 0 || delta >= _cyclicBufferSize)
return d;
{
const Byte *pb = cur - delta;
curMatch = son[_cyclicBufferPos - delta + ((delta > _cyclicBufferPos) ? _cyclicBufferSize : 0)];
if (pb[maxLen] == cur[maxLen] && *pb == *cur)
{
UInt32 len = 0;
while (++len != lenLimit)
if (pb[len] != cur[len])
break;
if (maxLen < len)
{
maxLen = len;
*d++ = len;
*d++ = delta - 1;
if (len == lenLimit)
return d;
}
}
}
}
*/
const Byte *lim = cur + lenLimit;
son[_cyclicBufferPos] = curMatch;
do
{
UInt32 delta;
if (curMatch == 0)
break;
// if (curMatch2 >= curMatch) return NULL;
delta = pos - curMatch;
if (delta >= _cyclicBufferSize)
break;
{
ptrdiff_t diff;
curMatch = son[_cyclicBufferPos - delta + ((delta > _cyclicBufferPos) ? _cyclicBufferSize : 0)];
diff = (ptrdiff_t)0 - (ptrdiff_t)delta;
if (cur[maxLen] == cur[(ptrdiff_t)maxLen + diff])
{
const Byte *c = cur;
while (*c == c[diff])
{
if (++c == lim)
{
d[0] = (UInt32)(lim - cur);
d[1] = delta - 1;
return d + 2;
}
}
{
const unsigned len = (unsigned)(c - cur);
if (maxLen < len)
{
maxLen = len;
d[0] = (UInt32)len;
d[1] = delta - 1;
d += 2;
}
}
}
}
}
while (--cutValue);
return d;
}
Z7_FORCE_INLINE
UInt32 * GetMatchesSpec1(UInt32 lenLimit, UInt32 curMatch, UInt32 pos, const Byte *cur, CLzRef *son,
size_t _cyclicBufferPos, UInt32 _cyclicBufferSize, UInt32 cutValue,
UInt32 *d, UInt32 maxLen)
{
CLzRef *ptr0 = son + ((size_t)_cyclicBufferPos << 1) + 1;
CLzRef *ptr1 = son + ((size_t)_cyclicBufferPos << 1);
unsigned len0 = 0, len1 = 0;
UInt32 cmCheck;
// if (curMatch >= pos) { *ptr0 = *ptr1 = kEmptyHashValue; return NULL; }
cmCheck = (UInt32)(pos - _cyclicBufferSize);
if ((UInt32)pos <= _cyclicBufferSize)
cmCheck = 0;
if (cmCheck < curMatch)
do
{
const UInt32 delta = pos - curMatch;
{
CLzRef *pair = son + ((size_t)(_cyclicBufferPos - delta + ((delta > _cyclicBufferPos) ? _cyclicBufferSize : 0)) << 1);
const Byte *pb = cur - delta;
unsigned len = (len0 < len1 ? len0 : len1);
const UInt32 pair0 = pair[0];
if (pb[len] == cur[len])
{
if (++len != lenLimit && pb[len] == cur[len])
while (++len != lenLimit)
if (pb[len] != cur[len])
break;
if (maxLen < len)
{
maxLen = (UInt32)len;
*d++ = (UInt32)len;
*d++ = delta - 1;
if (len == lenLimit)
{
*ptr1 = pair0;
*ptr0 = pair[1];
return d;
}
}
}
if (pb[len] < cur[len])
{
*ptr1 = curMatch;
// const UInt32 curMatch2 = pair[1];
// if (curMatch2 >= curMatch) { *ptr0 = *ptr1 = kEmptyHashValue; return NULL; }
// curMatch = curMatch2;
curMatch = pair[1];
ptr1 = pair + 1;
len1 = len;
}
else
{
*ptr0 = curMatch;
curMatch = pair[0];
ptr0 = pair;
len0 = len;
}
}
}
while(--cutValue && cmCheck < curMatch);
*ptr0 = *ptr1 = kEmptyHashValue;
return d;
}
static void SkipMatchesSpec(UInt32 lenLimit, UInt32 curMatch, UInt32 pos, const Byte *cur, CLzRef *son,
size_t _cyclicBufferPos, UInt32 _cyclicBufferSize, UInt32 cutValue)
{
CLzRef *ptr0 = son + ((size_t)_cyclicBufferPos << 1) + 1;
CLzRef *ptr1 = son + ((size_t)_cyclicBufferPos << 1);
unsigned len0 = 0, len1 = 0;
UInt32 cmCheck;
cmCheck = (UInt32)(pos - _cyclicBufferSize);
if ((UInt32)pos <= _cyclicBufferSize)
cmCheck = 0;
if (// curMatch >= pos || // failure
cmCheck < curMatch)
do
{
const UInt32 delta = pos - curMatch;
{
CLzRef *pair = son + ((size_t)(_cyclicBufferPos - delta + ((delta > _cyclicBufferPos) ? _cyclicBufferSize : 0)) << 1);
const Byte *pb = cur - delta;
unsigned len = (len0 < len1 ? len0 : len1);
if (pb[len] == cur[len])
{
while (++len != lenLimit)
if (pb[len] != cur[len])
break;
{
if (len == lenLimit)
{
*ptr1 = pair[0];
*ptr0 = pair[1];
return;
}
}
}
if (pb[len] < cur[len])
{
*ptr1 = curMatch;
curMatch = pair[1];
ptr1 = pair + 1;
len1 = len;
}
else
{
*ptr0 = curMatch;
curMatch = pair[0];
ptr0 = pair;
len0 = len;
}
}
}
while(--cutValue && cmCheck < curMatch);
*ptr0 = *ptr1 = kEmptyHashValue;
return;
}
#define MOVE_POS \
++p->cyclicBufferPos; \
p->buffer++; \
{ const UInt32 pos1 = p->pos + 1; p->pos = pos1; if (pos1 == p->posLimit) MatchFinder_CheckLimits(p); }
#define MOVE_POS_RET MOVE_POS return distances;
Z7_NO_INLINE
static void MatchFinder_MovePos(CMatchFinder *p)
{
/* we go here at the end of stream data, when (avail < num_hash_bytes)
We don't update sons[cyclicBufferPos << btMode].
So (sons) record will contain junk. And we cannot resume match searching
to normal operation, even if we will provide more input data in buffer.
p->sons[p->cyclicBufferPos << p->btMode] = 0; // kEmptyHashValue
if (p->btMode)
p->sons[(p->cyclicBufferPos << p->btMode) + 1] = 0; // kEmptyHashValue
*/
MOVE_POS
}
#define GET_MATCHES_HEADER2(minLen, ret_op) \
unsigned lenLimit; UInt32 hv; const Byte *cur; UInt32 curMatch; \
lenLimit = (unsigned)p->lenLimit; { if (lenLimit < minLen) { MatchFinder_MovePos(p); ret_op; }} \
cur = p->buffer;
#define GET_MATCHES_HEADER(minLen) GET_MATCHES_HEADER2(minLen, return distances)
#define SKIP_HEADER(minLen) do { GET_MATCHES_HEADER2(minLen, continue)
#define MF_PARAMS(p) lenLimit, curMatch, p->pos, p->buffer, p->son, p->cyclicBufferPos, p->cyclicBufferSize, p->cutValue
#define SKIP_FOOTER SkipMatchesSpec(MF_PARAMS(p)); MOVE_POS } while (--num);
#define GET_MATCHES_FOOTER_BASE(_maxLen_, func) \
distances = func(MF_PARAMS(p), \
distances, (UInt32)_maxLen_); MOVE_POS_RET
#define GET_MATCHES_FOOTER_BT(_maxLen_) \
GET_MATCHES_FOOTER_BASE(_maxLen_, GetMatchesSpec1)
#define GET_MATCHES_FOOTER_HC(_maxLen_) \
GET_MATCHES_FOOTER_BASE(_maxLen_, Hc_GetMatchesSpec)
#define UPDATE_maxLen { \
const ptrdiff_t diff = (ptrdiff_t)0 - (ptrdiff_t)d2; \
const Byte *c = cur + maxLen; \
const Byte *lim = cur + lenLimit; \
for (; c != lim; c++) if (*(c + diff) != *c) break; \
maxLen = (unsigned)(c - cur); }
static UInt32* Bt2_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances)
{
GET_MATCHES_HEADER(2)
HASH2_CALC
curMatch = p->hash[hv];
p->hash[hv] = p->pos;
GET_MATCHES_FOOTER_BT(1)
}
UInt32* Bt3Zip_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances)
{
GET_MATCHES_HEADER(3)
HASH_ZIP_CALC
curMatch = p->hash[hv];
p->hash[hv] = p->pos;
GET_MATCHES_FOOTER_BT(2)
}
#define SET_mmm \
mmm = p->cyclicBufferSize; \
if (pos < mmm) \
mmm = pos;
static UInt32* Bt3_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances)
{
UInt32 mmm;
UInt32 h2, d2, pos;
unsigned maxLen;
UInt32 *hash;
GET_MATCHES_HEADER(3)
HASH3_CALC
hash = p->hash;
pos = p->pos;
d2 = pos - hash[h2];
curMatch = (hash + kFix3HashSize)[hv];
hash[h2] = pos;
(hash + kFix3HashSize)[hv] = pos;
SET_mmm
maxLen = 2;
if (d2 < mmm && *(cur - d2) == *cur)
{
UPDATE_maxLen
distances[0] = (UInt32)maxLen;
distances[1] = d2 - 1;
distances += 2;
if (maxLen == lenLimit)
{
SkipMatchesSpec(MF_PARAMS(p));
MOVE_POS_RET
}
}
GET_MATCHES_FOOTER_BT(maxLen)
}
static UInt32* Bt4_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances)
{
UInt32 mmm;
UInt32 h2, h3, d2, d3, pos;
unsigned maxLen;
UInt32 *hash;
GET_MATCHES_HEADER(4)
HASH4_CALC
hash = p->hash;
pos = p->pos;
d2 = pos - hash [h2];
d3 = pos - (hash + kFix3HashSize)[h3];
curMatch = (hash + kFix4HashSize)[hv];
hash [h2] = pos;
(hash + kFix3HashSize)[h3] = pos;
(hash + kFix4HashSize)[hv] = pos;
SET_mmm
maxLen = 3;
for (;;)
{
if (d2 < mmm && *(cur - d2) == *cur)
{
distances[0] = 2;
distances[1] = d2 - 1;
distances += 2;
if (*(cur - d2 + 2) == cur[2])
{
// distances[-2] = 3;
}
else if (d3 < mmm && *(cur - d3) == *cur)
{
d2 = d3;
distances[1] = d3 - 1;
distances += 2;
}
else
break;
}
else if (d3 < mmm && *(cur - d3) == *cur)
{
d2 = d3;
distances[1] = d3 - 1;
distances += 2;
}
else
break;
UPDATE_maxLen
distances[-2] = (UInt32)maxLen;
if (maxLen == lenLimit)
{
SkipMatchesSpec(MF_PARAMS(p));
MOVE_POS_RET
}
break;
}
GET_MATCHES_FOOTER_BT(maxLen)
}
static UInt32* Bt5_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances)
{
UInt32 mmm;
UInt32 h2, h3, d2, d3, maxLen, pos;
UInt32 *hash;
GET_MATCHES_HEADER(5)
HASH5_CALC
hash = p->hash;
pos = p->pos;
d2 = pos - hash [h2];
d3 = pos - (hash + kFix3HashSize)[h3];
// d4 = pos - (hash + kFix4HashSize)[h4];
curMatch = (hash + kFix5HashSize)[hv];
hash [h2] = pos;
(hash + kFix3HashSize)[h3] = pos;
// (hash + kFix4HashSize)[h4] = pos;
(hash + kFix5HashSize)[hv] = pos;
SET_mmm
maxLen = 4;
for (;;)
{
if (d2 < mmm && *(cur - d2) == *cur)
{
distances[0] = 2;
distances[1] = d2 - 1;
distances += 2;
if (*(cur - d2 + 2) == cur[2])
{
}
else if (d3 < mmm && *(cur - d3) == *cur)
{
distances[1] = d3 - 1;
distances += 2;
d2 = d3;
}
else
break;
}
else if (d3 < mmm && *(cur - d3) == *cur)
{
distances[1] = d3 - 1;
distances += 2;
d2 = d3;
}
else
break;
distances[-2] = 3;
if (*(cur - d2 + 3) != cur[3])
break;
UPDATE_maxLen
distances[-2] = (UInt32)maxLen;
if (maxLen == lenLimit)
{
SkipMatchesSpec(MF_PARAMS(p));
MOVE_POS_RET
}
break;
}
GET_MATCHES_FOOTER_BT(maxLen)
}
static UInt32* Hc4_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances)
{
UInt32 mmm;
UInt32 h2, h3, d2, d3, pos;
unsigned maxLen;
UInt32 *hash;
GET_MATCHES_HEADER(4)
HASH4_CALC
hash = p->hash;
pos = p->pos;
d2 = pos - hash [h2];
d3 = pos - (hash + kFix3HashSize)[h3];
curMatch = (hash + kFix4HashSize)[hv];
hash [h2] = pos;
(hash + kFix3HashSize)[h3] = pos;
(hash + kFix4HashSize)[hv] = pos;
SET_mmm
maxLen = 3;
for (;;)
{
if (d2 < mmm && *(cur - d2) == *cur)
{
distances[0] = 2;
distances[1] = d2 - 1;
distances += 2;
if (*(cur - d2 + 2) == cur[2])
{
// distances[-2] = 3;
}
else if (d3 < mmm && *(cur - d3) == *cur)
{
d2 = d3;
distances[1] = d3 - 1;
distances += 2;
}
else
break;
}
else if (d3 < mmm && *(cur - d3) == *cur)
{
d2 = d3;
distances[1] = d3 - 1;
distances += 2;
}
else
break;
UPDATE_maxLen
distances[-2] = (UInt32)maxLen;
if (maxLen == lenLimit)
{
p->son[p->cyclicBufferPos] = curMatch;
MOVE_POS_RET
}
break;
}
GET_MATCHES_FOOTER_HC(maxLen)
}
static UInt32 * Hc5_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances)
{
UInt32 mmm;
UInt32 h2, h3, d2, d3, maxLen, pos;
UInt32 *hash;
GET_MATCHES_HEADER(5)
HASH5_CALC
hash = p->hash;
pos = p->pos;
d2 = pos - hash [h2];
d3 = pos - (hash + kFix3HashSize)[h3];
// d4 = pos - (hash + kFix4HashSize)[h4];
curMatch = (hash + kFix5HashSize)[hv];
hash [h2] = pos;
(hash + kFix3HashSize)[h3] = pos;
// (hash + kFix4HashSize)[h4] = pos;
(hash + kFix5HashSize)[hv] = pos;
SET_mmm
maxLen = 4;
for (;;)
{
if (d2 < mmm && *(cur - d2) == *cur)
{
distances[0] = 2;
distances[1] = d2 - 1;
distances += 2;
if (*(cur - d2 + 2) == cur[2])
{
}
else if (d3 < mmm && *(cur - d3) == *cur)
{
distances[1] = d3 - 1;
distances += 2;
d2 = d3;
}
else
break;
}
else if (d3 < mmm && *(cur - d3) == *cur)
{
distances[1] = d3 - 1;
distances += 2;
d2 = d3;
}
else
break;
distances[-2] = 3;
if (*(cur - d2 + 3) != cur[3])
break;
UPDATE_maxLen
distances[-2] = maxLen;
if (maxLen == lenLimit)
{
p->son[p->cyclicBufferPos] = curMatch;
MOVE_POS_RET
}
break;
}
GET_MATCHES_FOOTER_HC(maxLen)
}
UInt32* Hc3Zip_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances)
{
GET_MATCHES_HEADER(3)
HASH_ZIP_CALC
curMatch = p->hash[hv];
p->hash[hv] = p->pos;
GET_MATCHES_FOOTER_HC(2)
}
static void Bt2_MatchFinder_Skip(CMatchFinder *p, UInt32 num)
{
SKIP_HEADER(2)
{
HASH2_CALC
curMatch = p->hash[hv];
p->hash[hv] = p->pos;
}
SKIP_FOOTER
}
void Bt3Zip_MatchFinder_Skip(CMatchFinder *p, UInt32 num)
{
SKIP_HEADER(3)
{
HASH_ZIP_CALC
curMatch = p->hash[hv];
p->hash[hv] = p->pos;
}
SKIP_FOOTER
}
static void Bt3_MatchFinder_Skip(CMatchFinder *p, UInt32 num)
{
SKIP_HEADER(3)
{
UInt32 h2;
UInt32 *hash;
HASH3_CALC
hash = p->hash;
curMatch = (hash + kFix3HashSize)[hv];
hash[h2] =
(hash + kFix3HashSize)[hv] = p->pos;
}
SKIP_FOOTER
}
static void Bt4_MatchFinder_Skip(CMatchFinder *p, UInt32 num)
{
SKIP_HEADER(4)
{
UInt32 h2, h3;
UInt32 *hash;
HASH4_CALC
hash = p->hash;
curMatch = (hash + kFix4HashSize)[hv];
hash [h2] =
(hash + kFix3HashSize)[h3] =
(hash + kFix4HashSize)[hv] = p->pos;
}
SKIP_FOOTER
}
static void Bt5_MatchFinder_Skip(CMatchFinder *p, UInt32 num)
{
SKIP_HEADER(5)
{
UInt32 h2, h3;
UInt32 *hash;
HASH5_CALC
hash = p->hash;
curMatch = (hash + kFix5HashSize)[hv];
hash [h2] =
(hash + kFix3HashSize)[h3] =
// (hash + kFix4HashSize)[h4] =
(hash + kFix5HashSize)[hv] = p->pos;
}
SKIP_FOOTER
}
#define HC_SKIP_HEADER(minLen) \
do { if (p->lenLimit < minLen) { MatchFinder_MovePos(p); num--; continue; } { \
const Byte *cur; \
UInt32 *hash; \
UInt32 *son; \
UInt32 pos = p->pos; \
UInt32 num2 = num; \
/* (p->pos == p->posLimit) is not allowed here !!! */ \
{ const UInt32 rem = p->posLimit - pos; if (num2 > rem) num2 = rem; } \
num -= num2; \
{ const UInt32 cycPos = p->cyclicBufferPos; \
son = p->son + cycPos; \
p->cyclicBufferPos = cycPos + num2; } \
cur = p->buffer; \
hash = p->hash; \
do { \
UInt32 curMatch; \
UInt32 hv;
#define HC_SKIP_FOOTER \
cur++; pos++; *son++ = curMatch; \
} while (--num2); \
p->buffer = cur; \
p->pos = pos; \
if (pos == p->posLimit) MatchFinder_CheckLimits(p); \
}} while(num); \
static void Hc4_MatchFinder_Skip(CMatchFinder *p, UInt32 num)
{
HC_SKIP_HEADER(4)
UInt32 h2, h3;
HASH4_CALC
curMatch = (hash + kFix4HashSize)[hv];
hash [h2] =
(hash + kFix3HashSize)[h3] =
(hash + kFix4HashSize)[hv] = pos;
HC_SKIP_FOOTER
}
static void Hc5_MatchFinder_Skip(CMatchFinder *p, UInt32 num)
{
HC_SKIP_HEADER(5)
UInt32 h2, h3;
HASH5_CALC
curMatch = (hash + kFix5HashSize)[hv];
hash [h2] =
(hash + kFix3HashSize)[h3] =
// (hash + kFix4HashSize)[h4] =
(hash + kFix5HashSize)[hv] = pos;
HC_SKIP_FOOTER
}
void Hc3Zip_MatchFinder_Skip(CMatchFinder *p, UInt32 num)
{
HC_SKIP_HEADER(3)
HASH_ZIP_CALC
curMatch = hash[hv];
hash[hv] = pos;
HC_SKIP_FOOTER
}
void MatchFinder_CreateVTable(CMatchFinder *p, IMatchFinder2 *vTable)
{
vTable->Init = (Mf_Init_Func)MatchFinder_Init;
vTable->GetNumAvailableBytes = (Mf_GetNumAvailableBytes_Func)MatchFinder_GetNumAvailableBytes;
vTable->GetPointerToCurrentPos = (Mf_GetPointerToCurrentPos_Func)MatchFinder_GetPointerToCurrentPos;
if (!p->btMode)
{
if (p->numHashBytes <= 4)
{
vTable->GetMatches = (Mf_GetMatches_Func)Hc4_MatchFinder_GetMatches;
vTable->Skip = (Mf_Skip_Func)Hc4_MatchFinder_Skip;
}
else
{
vTable->GetMatches = (Mf_GetMatches_Func)Hc5_MatchFinder_GetMatches;
vTable->Skip = (Mf_Skip_Func)Hc5_MatchFinder_Skip;
}
}
else if (p->numHashBytes == 2)
{
vTable->GetMatches = (Mf_GetMatches_Func)Bt2_MatchFinder_GetMatches;
vTable->Skip = (Mf_Skip_Func)Bt2_MatchFinder_Skip;
}
else if (p->numHashBytes == 3)
{
vTable->GetMatches = (Mf_GetMatches_Func)Bt3_MatchFinder_GetMatches;
vTable->Skip = (Mf_Skip_Func)Bt3_MatchFinder_Skip;
}
else if (p->numHashBytes == 4)
{
vTable->GetMatches = (Mf_GetMatches_Func)Bt4_MatchFinder_GetMatches;
vTable->Skip = (Mf_Skip_Func)Bt4_MatchFinder_Skip;
}
else
{
vTable->GetMatches = (Mf_GetMatches_Func)Bt5_MatchFinder_GetMatches;
vTable->Skip = (Mf_Skip_Func)Bt5_MatchFinder_Skip;
}
}
void LzFindPrepare(void)
{
#ifndef FORCE_LZFIND_SATUR_SUB_128
#ifdef USE_LZFIND_SATUR_SUB_128
LZFIND_SATUR_SUB_CODE_FUNC f = NULL;
#ifdef MY_CPU_ARM_OR_ARM64
{
if (CPU_IsSupported_NEON())
{
// #pragma message ("=== LzFind NEON")
PRF(printf("\n=== LzFind NEON\n"));
f = LzFind_SaturSub_128;
}
// f = 0; // for debug
}
#else // MY_CPU_ARM_OR_ARM64
if (CPU_IsSupported_SSE41())
{
// #pragma message ("=== LzFind SSE41")
PRF(printf("\n=== LzFind SSE41\n"));
f = LzFind_SaturSub_128;
#ifdef USE_LZFIND_SATUR_SUB_256
if (CPU_IsSupported_AVX2())
{
// #pragma message ("=== LzFind AVX2")
PRF(printf("\n=== LzFind AVX2\n"));
f = LzFind_SaturSub_256;
}
#endif
}
#endif // MY_CPU_ARM_OR_ARM64
g_LzFind_SaturSub = f;
#endif // USE_LZFIND_SATUR_SUB_128
#endif // FORCE_LZFIND_SATUR_SUB_128
}
#undef MOVE_POS
#undef MOVE_POS_RET
#undef PRF

159
extern/lzma/LzFind.h vendored
View File

@@ -1,159 +0,0 @@
/* LzFind.h -- Match finder for LZ algorithms
2023-03-04 : Igor Pavlov : Public domain */
#ifndef ZIP7_INC_LZ_FIND_H
#define ZIP7_INC_LZ_FIND_H
#include "7zTypes.h"
EXTERN_C_BEGIN
typedef UInt32 CLzRef;
typedef struct
{
const Byte *buffer;
UInt32 pos;
UInt32 posLimit;
UInt32 streamPos; /* wrap over Zero is allowed (streamPos < pos). Use (UInt32)(streamPos - pos) */
UInt32 lenLimit;
UInt32 cyclicBufferPos;
UInt32 cyclicBufferSize; /* it must be = (historySize + 1) */
Byte streamEndWasReached;
Byte btMode;
Byte bigHash;
Byte directInput;
UInt32 matchMaxLen;
CLzRef *hash;
CLzRef *son;
UInt32 hashMask;
UInt32 cutValue;
Byte *bufBase;
ISeqInStreamPtr stream;
UInt32 blockSize;
UInt32 keepSizeBefore;
UInt32 keepSizeAfter;
UInt32 numHashBytes;
size_t directInputRem;
UInt32 historySize;
UInt32 fixedHashSize;
Byte numHashBytes_Min;
Byte numHashOutBits;
Byte _pad2_[2];
SRes result;
UInt32 crc[256];
size_t numRefs;
UInt64 expectedDataSize;
} CMatchFinder;
#define Inline_MatchFinder_GetPointerToCurrentPos(p) ((const Byte *)(p)->buffer)
#define Inline_MatchFinder_GetNumAvailableBytes(p) ((UInt32)((p)->streamPos - (p)->pos))
/*
#define Inline_MatchFinder_IsFinishedOK(p) \
((p)->streamEndWasReached \
&& (p)->streamPos == (p)->pos \
&& (!(p)->directInput || (p)->directInputRem == 0))
*/
int MatchFinder_NeedMove(CMatchFinder *p);
/* Byte *MatchFinder_GetPointerToCurrentPos(CMatchFinder *p); */
void MatchFinder_MoveBlock(CMatchFinder *p);
void MatchFinder_ReadIfRequired(CMatchFinder *p);
void MatchFinder_Construct(CMatchFinder *p);
/* (directInput = 0) is default value.
It's required to provide correct (directInput) value
before calling MatchFinder_Create().
You can set (directInput) by any of the following calls:
- MatchFinder_SET_DIRECT_INPUT_BUF()
- MatchFinder_SET_STREAM()
- MatchFinder_SET_STREAM_MODE()
*/
#define MatchFinder_SET_DIRECT_INPUT_BUF(p, _src_, _srcLen_) { \
(p)->stream = NULL; \
(p)->directInput = 1; \
(p)->buffer = (_src_); \
(p)->directInputRem = (_srcLen_); }
/*
#define MatchFinder_SET_STREAM_MODE(p) { \
(p)->directInput = 0; }
*/
#define MatchFinder_SET_STREAM(p, _stream_) { \
(p)->stream = _stream_; \
(p)->directInput = 0; }
int MatchFinder_Create(CMatchFinder *p, UInt32 historySize,
UInt32 keepAddBufferBefore, UInt32 matchMaxLen, UInt32 keepAddBufferAfter,
ISzAllocPtr alloc);
void MatchFinder_Free(CMatchFinder *p, ISzAllocPtr alloc);
void MatchFinder_Normalize3(UInt32 subValue, CLzRef *items, size_t numItems);
/*
#define MatchFinder_INIT_POS(p, val) \
(p)->pos = (val); \
(p)->streamPos = (val);
*/
// void MatchFinder_ReduceOffsets(CMatchFinder *p, UInt32 subValue);
#define MatchFinder_REDUCE_OFFSETS(p, subValue) \
(p)->pos -= (subValue); \
(p)->streamPos -= (subValue);
UInt32 * GetMatchesSpec1(UInt32 lenLimit, UInt32 curMatch, UInt32 pos, const Byte *buffer, CLzRef *son,
size_t _cyclicBufferPos, UInt32 _cyclicBufferSize, UInt32 _cutValue,
UInt32 *distances, UInt32 maxLen);
/*
Conditions:
Mf_GetNumAvailableBytes_Func must be called before each Mf_GetMatchLen_Func.
Mf_GetPointerToCurrentPos_Func's result must be used only before any other function
*/
typedef void (*Mf_Init_Func)(void *object);
typedef UInt32 (*Mf_GetNumAvailableBytes_Func)(void *object);
typedef const Byte * (*Mf_GetPointerToCurrentPos_Func)(void *object);
typedef UInt32 * (*Mf_GetMatches_Func)(void *object, UInt32 *distances);
typedef void (*Mf_Skip_Func)(void *object, UInt32);
typedef struct
{
Mf_Init_Func Init;
Mf_GetNumAvailableBytes_Func GetNumAvailableBytes;
Mf_GetPointerToCurrentPos_Func GetPointerToCurrentPos;
Mf_GetMatches_Func GetMatches;
Mf_Skip_Func Skip;
} IMatchFinder2;
void MatchFinder_CreateVTable(CMatchFinder *p, IMatchFinder2 *vTable);
void MatchFinder_Init_LowHash(CMatchFinder *p);
void MatchFinder_Init_HighHash(CMatchFinder *p);
void MatchFinder_Init_4(CMatchFinder *p);
void MatchFinder_Init(CMatchFinder *p);
UInt32* Bt3Zip_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances);
UInt32* Hc3Zip_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances);
void Bt3Zip_MatchFinder_Skip(CMatchFinder *p, UInt32 num);
void Hc3Zip_MatchFinder_Skip(CMatchFinder *p, UInt32 num);
void LzFindPrepare(void);
EXTERN_C_END
#endif

1406
extern/lzma/LzFindMt.c vendored
View File

@@ -1,1406 +0,0 @@
/* LzFindMt.c -- multithreaded Match finder for LZ algorithms
2023-04-02 : Igor Pavlov : Public domain */
#include "Precomp.h"
// #include <stdio.h>
#include "CpuArch.h"
#include "LzHash.h"
#include "LzFindMt.h"
// #define LOG_ITERS
// #define LOG_THREAD
#ifdef LOG_THREAD
#include <stdio.h>
#define PRF(x) x
#else
#define PRF(x)
#endif
#ifdef LOG_ITERS
#include <stdio.h>
extern UInt64 g_NumIters_Tree;
extern UInt64 g_NumIters_Loop;
extern UInt64 g_NumIters_Bytes;
#define LOG_ITER(x) x
#else
#define LOG_ITER(x)
#endif
#define kMtHashBlockSize ((UInt32)1 << 17)
#define kMtHashNumBlocks (1 << 1)
#define GET_HASH_BLOCK_OFFSET(i) (((i) & (kMtHashNumBlocks - 1)) * kMtHashBlockSize)
#define kMtBtBlockSize ((UInt32)1 << 16)
#define kMtBtNumBlocks (1 << 4)
#define GET_BT_BLOCK_OFFSET(i) (((i) & (kMtBtNumBlocks - 1)) * (size_t)kMtBtBlockSize)
/*
HASH functions:
We use raw 8/16 bits from a[1] and a[2],
xored with crc(a[0]) and crc(a[3]).
We check a[0], a[3] only. We don't need to compare a[1] and a[2] in matches.
our crc() function provides one-to-one correspondence for low 8-bit values:
(crc[0...0xFF] & 0xFF) <-> [0...0xFF]
*/
#define MF(mt) ((mt)->MatchFinder)
#define MF_CRC (p->crc)
// #define MF(mt) (&(mt)->MatchFinder)
// #define MF_CRC (p->MatchFinder.crc)
#define MT_HASH2_CALC \
h2 = (MF_CRC[cur[0]] ^ cur[1]) & (kHash2Size - 1);
#define MT_HASH3_CALC { \
UInt32 temp = MF_CRC[cur[0]] ^ cur[1]; \
h2 = temp & (kHash2Size - 1); \
h3 = (temp ^ ((UInt32)cur[2] << 8)) & (kHash3Size - 1); }
/*
#define MT_HASH3_CALC__NO_2 { \
UInt32 temp = p->crc[cur[0]] ^ cur[1]; \
h3 = (temp ^ ((UInt32)cur[2] << 8)) & (kHash3Size - 1); }
#define MT_HASH4_CALC { \
UInt32 temp = p->crc[cur[0]] ^ cur[1]; \
h2 = temp & (kHash2Size - 1); \
temp ^= ((UInt32)cur[2] << 8); \
h3 = temp & (kHash3Size - 1); \
h4 = (temp ^ (p->crc[cur[3]] << kLzHash_CrcShift_1)) & p->hash4Mask; }
// (kHash4Size - 1);
*/
Z7_NO_INLINE
static void MtSync_Construct(CMtSync *p)
{
p->affinity = 0;
p->wasCreated = False;
p->csWasInitialized = False;
p->csWasEntered = False;
Thread_CONSTRUCT(&p->thread)
Event_Construct(&p->canStart);
Event_Construct(&p->wasStopped);
Semaphore_Construct(&p->freeSemaphore);
Semaphore_Construct(&p->filledSemaphore);
}
#define DEBUG_BUFFER_LOCK // define it to debug lock state
#ifdef DEBUG_BUFFER_LOCK
#include <stdlib.h>
#define BUFFER_MUST_BE_LOCKED(p) if (!(p)->csWasEntered) exit(1);
#define BUFFER_MUST_BE_UNLOCKED(p) if ( (p)->csWasEntered) exit(1);
#else
#define BUFFER_MUST_BE_LOCKED(p)
#define BUFFER_MUST_BE_UNLOCKED(p)
#endif
#define LOCK_BUFFER(p) { \
BUFFER_MUST_BE_UNLOCKED(p); \
CriticalSection_Enter(&(p)->cs); \
(p)->csWasEntered = True; }
#define UNLOCK_BUFFER(p) { \
BUFFER_MUST_BE_LOCKED(p); \
CriticalSection_Leave(&(p)->cs); \
(p)->csWasEntered = False; }
Z7_NO_INLINE
static UInt32 MtSync_GetNextBlock(CMtSync *p)
{
UInt32 numBlocks = 0;
if (p->needStart)
{
BUFFER_MUST_BE_UNLOCKED(p)
p->numProcessedBlocks = 1;
p->needStart = False;
p->stopWriting = False;
p->exit = False;
Event_Reset(&p->wasStopped);
Event_Set(&p->canStart);
}
else
{
UNLOCK_BUFFER(p)
// we free current block
numBlocks = p->numProcessedBlocks++;
Semaphore_Release1(&p->freeSemaphore);
}
// buffer is UNLOCKED here
Semaphore_Wait(&p->filledSemaphore);
LOCK_BUFFER(p)
return numBlocks;
}
/* if Writing (Processing) thread was started, we must call MtSync_StopWriting() */
Z7_NO_INLINE
static void MtSync_StopWriting(CMtSync *p)
{
if (!Thread_WasCreated(&p->thread) || p->needStart)
return;
PRF(printf("\nMtSync_StopWriting %p\n", p));
if (p->csWasEntered)
{
/* we don't use buffer in this thread after StopWriting().
So we UNLOCK buffer.
And we restore default UNLOCKED state for stopped thread */
UNLOCK_BUFFER(p)
}
/* We send (p->stopWriting) message and release freeSemaphore
to free current block.
So the thread will see (p->stopWriting) at some
iteration after Wait(freeSemaphore).
The thread doesn't need to fill all avail free blocks,
so we can get fast thread stop.
*/
p->stopWriting = True;
Semaphore_Release1(&p->freeSemaphore); // check semaphore count !!!
PRF(printf("\nMtSync_StopWriting %p : Event_Wait(&p->wasStopped)\n", p));
Event_Wait(&p->wasStopped);
PRF(printf("\nMtSync_StopWriting %p : Event_Wait() finsihed\n", p));
/* 21.03 : we don't restore samaphore counters here.
We will recreate and reinit samaphores in next start */
p->needStart = True;
}
Z7_NO_INLINE
static void MtSync_Destruct(CMtSync *p)
{
PRF(printf("\nMtSync_Destruct %p\n", p));
if (Thread_WasCreated(&p->thread))
{
/* we want thread to be in Stopped state before sending EXIT command.
note: stop(btSync) will stop (htSync) also */
MtSync_StopWriting(p);
/* thread in Stopped state here : (p->needStart == true) */
p->exit = True;
// if (p->needStart) // it's (true)
Event_Set(&p->canStart); // we send EXIT command to thread
Thread_Wait_Close(&p->thread); // we wait thread finishing
}
if (p->csWasInitialized)
{
CriticalSection_Delete(&p->cs);
p->csWasInitialized = False;
}
p->csWasEntered = False;
Event_Close(&p->canStart);
Event_Close(&p->wasStopped);
Semaphore_Close(&p->freeSemaphore);
Semaphore_Close(&p->filledSemaphore);
p->wasCreated = False;
}
// #define RINOK_THREAD(x) { if ((x) != 0) return SZ_ERROR_THREAD; }
// we want to get real system error codes here instead of SZ_ERROR_THREAD
#define RINOK_THREAD(x) RINOK_WRes(x)
// call it before each new file (when new starting is required):
Z7_NO_INLINE
static SRes MtSync_Init(CMtSync *p, UInt32 numBlocks)
{
WRes wres;
// BUFFER_MUST_BE_UNLOCKED(p)
if (!p->needStart || p->csWasEntered)
return SZ_ERROR_FAIL;
wres = Semaphore_OptCreateInit(&p->freeSemaphore, numBlocks, numBlocks);
if (wres == 0)
wres = Semaphore_OptCreateInit(&p->filledSemaphore, 0, numBlocks);
return MY_SRes_HRESULT_FROM_WRes(wres);
}
static WRes MtSync_Create_WRes(CMtSync *p, THREAD_FUNC_TYPE startAddress, void *obj)
{
WRes wres;
if (p->wasCreated)
return SZ_OK;
RINOK_THREAD(CriticalSection_Init(&p->cs))
p->csWasInitialized = True;
p->csWasEntered = False;
RINOK_THREAD(AutoResetEvent_CreateNotSignaled(&p->canStart))
RINOK_THREAD(AutoResetEvent_CreateNotSignaled(&p->wasStopped))
p->needStart = True;
p->exit = True; /* p->exit is unused before (canStart) Event.
But in case of some unexpected code failure we will get fast exit from thread */
// return ERROR_TOO_MANY_POSTS; // for debug
// return EINVAL; // for debug
if (p->affinity != 0)
wres = Thread_Create_With_Affinity(&p->thread, startAddress, obj, (CAffinityMask)p->affinity);
else
wres = Thread_Create(&p->thread, startAddress, obj);
RINOK_THREAD(wres)
p->wasCreated = True;
return SZ_OK;
}
Z7_NO_INLINE
static SRes MtSync_Create(CMtSync *p, THREAD_FUNC_TYPE startAddress, void *obj)
{
const WRes wres = MtSync_Create_WRes(p, startAddress, obj);
if (wres == 0)
return 0;
MtSync_Destruct(p);
return MY_SRes_HRESULT_FROM_WRes(wres);
}
// ---------- HASH THREAD ----------
#define kMtMaxValForNormalize 0xFFFFFFFF
// #define kMtMaxValForNormalize ((1 << 21)) // for debug
// #define kNormalizeAlign (1 << 7) // alignment for speculated accesses
#ifdef MY_CPU_LE_UNALIGN
#define GetUi24hi_from32(p) ((UInt32)GetUi32(p) >> 8)
#else
#define GetUi24hi_from32(p) ((p)[1] ^ ((UInt32)(p)[2] << 8) ^ ((UInt32)(p)[3] << 16))
#endif
#define GetHeads_DECL(name) \
static void GetHeads ## name(const Byte *p, UInt32 pos, \
UInt32 *hash, UInt32 hashMask, UInt32 *heads, UInt32 numHeads, const UInt32 *crc)
#define GetHeads_LOOP(v) \
for (; numHeads != 0; numHeads--) { \
const UInt32 value = (v); \
p++; \
*heads++ = pos - hash[value]; \
hash[value] = pos++; }
#define DEF_GetHeads2(name, v, action) \
GetHeads_DECL(name) { action \
GetHeads_LOOP(v) }
#define DEF_GetHeads(name, v) DEF_GetHeads2(name, v, ;)
DEF_GetHeads2(2, GetUi16(p), UNUSED_VAR(hashMask); UNUSED_VAR(crc); )
DEF_GetHeads(3, (crc[p[0]] ^ GetUi16(p + 1)) & hashMask)
DEF_GetHeads2(3b, GetUi16(p) ^ ((UInt32)(p)[2] << 16), UNUSED_VAR(hashMask); UNUSED_VAR(crc); )
// BT3 is not good for crc collisions for big hashMask values.
/*
GetHeads_DECL(3b)
{
UNUSED_VAR(hashMask);
UNUSED_VAR(crc);
{
const Byte *pLim = p + numHeads;
if (numHeads == 0)
return;
pLim--;
while (p < pLim)
{
UInt32 v1 = GetUi32(p);
UInt32 v0 = v1 & 0xFFFFFF;
UInt32 h0, h1;
p += 2;
v1 >>= 8;
h0 = hash[v0]; hash[v0] = pos; heads[0] = pos - h0; pos++;
h1 = hash[v1]; hash[v1] = pos; heads[1] = pos - h1; pos++;
heads += 2;
}
if (p == pLim)
{
UInt32 v0 = GetUi16(p) ^ ((UInt32)(p)[2] << 16);
*heads = pos - hash[v0];
hash[v0] = pos;
}
}
}
*/
/*
GetHeads_DECL(4)
{
unsigned sh = 0;
UNUSED_VAR(crc)
while ((hashMask & 0x80000000) == 0)
{
hashMask <<= 1;
sh++;
}
GetHeads_LOOP((GetUi32(p) * 0xa54a1) >> sh)
}
#define GetHeads4b GetHeads4
*/
#define USE_GetHeads_LOCAL_CRC
#ifdef USE_GetHeads_LOCAL_CRC
GetHeads_DECL(4)
{
UInt32 crc0[256];
UInt32 crc1[256];
{
unsigned i;
for (i = 0; i < 256; i++)
{
UInt32 v = crc[i];
crc0[i] = v & hashMask;
crc1[i] = (v << kLzHash_CrcShift_1) & hashMask;
// crc1[i] = rotlFixed(v, 8) & hashMask;
}
}
GetHeads_LOOP(crc0[p[0]] ^ crc1[p[3]] ^ (UInt32)GetUi16(p+1))
}
GetHeads_DECL(4b)
{
UInt32 crc0[256];
{
unsigned i;
for (i = 0; i < 256; i++)
crc0[i] = crc[i] & hashMask;
}
GetHeads_LOOP(crc0[p[0]] ^ GetUi24hi_from32(p))
}
GetHeads_DECL(5)
{
UInt32 crc0[256];
UInt32 crc1[256];
UInt32 crc2[256];
{
unsigned i;
for (i = 0; i < 256; i++)
{
UInt32 v = crc[i];
crc0[i] = v & hashMask;
crc1[i] = (v << kLzHash_CrcShift_1) & hashMask;
crc2[i] = (v << kLzHash_CrcShift_2) & hashMask;
}
}
GetHeads_LOOP(crc0[p[0]] ^ crc1[p[3]] ^ crc2[p[4]] ^ (UInt32)GetUi16(p+1))
}
GetHeads_DECL(5b)
{
UInt32 crc0[256];
UInt32 crc1[256];
{
unsigned i;
for (i = 0; i < 256; i++)
{
UInt32 v = crc[i];
crc0[i] = v & hashMask;
crc1[i] = (v << kLzHash_CrcShift_1) & hashMask;
}
}
GetHeads_LOOP(crc0[p[0]] ^ crc1[p[4]] ^ GetUi24hi_from32(p))
}
#else
DEF_GetHeads(4, (crc[p[0]] ^ (crc[p[3]] << kLzHash_CrcShift_1) ^ (UInt32)GetUi16(p+1)) & hashMask)
DEF_GetHeads(4b, (crc[p[0]] ^ GetUi24hi_from32(p)) & hashMask)
DEF_GetHeads(5, (crc[p[0]] ^ (crc[p[3]] << kLzHash_CrcShift_1) ^ (crc[p[4]] << kLzHash_CrcShift_2) ^ (UInt32)GetUi16(p + 1)) & hashMask)
DEF_GetHeads(5b, (crc[p[0]] ^ (crc[p[4]] << kLzHash_CrcShift_1) ^ GetUi24hi_from32(p)) & hashMask)
#endif
static void HashThreadFunc(CMatchFinderMt *mt)
{
CMtSync *p = &mt->hashSync;
PRF(printf("\nHashThreadFunc\n"));
for (;;)
{
UInt32 blockIndex = 0;
PRF(printf("\nHashThreadFunc : Event_Wait(&p->canStart)\n"));
Event_Wait(&p->canStart);
PRF(printf("\nHashThreadFunc : Event_Wait(&p->canStart) : after \n"));
if (p->exit)
{
PRF(printf("\nHashThreadFunc : exit \n"));
return;
}
MatchFinder_Init_HighHash(MF(mt));
for (;;)
{
PRF(printf("Hash thread block = %d pos = %d\n", (unsigned)blockIndex, mt->MatchFinder->pos));
{
CMatchFinder *mf = MF(mt);
if (MatchFinder_NeedMove(mf))
{
CriticalSection_Enter(&mt->btSync.cs);
CriticalSection_Enter(&mt->hashSync.cs);
{
const Byte *beforePtr = Inline_MatchFinder_GetPointerToCurrentPos(mf);
ptrdiff_t offset;
MatchFinder_MoveBlock(mf);
offset = beforePtr - Inline_MatchFinder_GetPointerToCurrentPos(mf);
mt->pointerToCurPos -= offset;
mt->buffer -= offset;
}
CriticalSection_Leave(&mt->hashSync.cs);
CriticalSection_Leave(&mt->btSync.cs);
continue;
}
Semaphore_Wait(&p->freeSemaphore);
if (p->exit) // exit is unexpected here. But we check it here for some failure case
return;
// for faster stop : we check (p->stopWriting) after Wait(freeSemaphore)
if (p->stopWriting)
break;
MatchFinder_ReadIfRequired(mf);
{
UInt32 *heads = mt->hashBuf + GET_HASH_BLOCK_OFFSET(blockIndex++);
UInt32 num = Inline_MatchFinder_GetNumAvailableBytes(mf);
heads[0] = 2;
heads[1] = num;
/* heads[1] contains the number of avail bytes:
if (avail < mf->numHashBytes) :
{
it means that stream was finished
HASH_THREAD and BT_TREAD must move position for heads[1] (avail) bytes.
HASH_THREAD doesn't stop,
HASH_THREAD fills only the header (2 numbers) for all next blocks:
{2, NumHashBytes - 1}, {2,0}, {2,0}, ... , {2,0}
}
else
{
HASH_THREAD and BT_TREAD must move position for (heads[0] - 2) bytes;
}
*/
if (num >= mf->numHashBytes)
{
num = num - mf->numHashBytes + 1;
if (num > kMtHashBlockSize - 2)
num = kMtHashBlockSize - 2;
if (mf->pos > (UInt32)kMtMaxValForNormalize - num)
{
const UInt32 subValue = (mf->pos - mf->historySize - 1); // & ~(UInt32)(kNormalizeAlign - 1);
MatchFinder_REDUCE_OFFSETS(mf, subValue)
MatchFinder_Normalize3(subValue, mf->hash + mf->fixedHashSize, (size_t)mf->hashMask + 1);
}
heads[0] = 2 + num;
mt->GetHeadsFunc(mf->buffer, mf->pos, mf->hash + mf->fixedHashSize, mf->hashMask, heads + 2, num, mf->crc);
}
mf->pos += num; // wrap over zero is allowed at the end of stream
mf->buffer += num;
}
}
Semaphore_Release1(&p->filledSemaphore);
} // for() processing end
// p->numBlocks_Sent = blockIndex;
Event_Set(&p->wasStopped);
} // for() thread end
}
// ---------- BT THREAD ----------
/* we use one variable instead of two (cyclicBufferPos == pos) before CyclicBuf wrap.
here we define fixed offset of (p->pos) from (p->cyclicBufferPos) */
#define CYC_TO_POS_OFFSET 0
// #define CYC_TO_POS_OFFSET 1 // for debug
#define MFMT_GM_INLINE
#ifdef MFMT_GM_INLINE
/*
we use size_t for (pos) instead of UInt32
to eliminate "movsx" BUG in old MSVC x64 compiler.
*/
UInt32 * Z7_FASTCALL GetMatchesSpecN_2(const Byte *lenLimit, size_t pos, const Byte *cur, CLzRef *son,
UInt32 _cutValue, UInt32 *d, size_t _maxLen, const UInt32 *hash, const UInt32 *limit, const UInt32 *size,
size_t _cyclicBufferPos, UInt32 _cyclicBufferSize,
UInt32 *posRes);
#endif
static void BtGetMatches(CMatchFinderMt *p, UInt32 *d)
{
UInt32 numProcessed = 0;
UInt32 curPos = 2;
/* GetMatchesSpec() functions don't create (len = 1)
in [len, dist] match pairs, if (p->numHashBytes >= 2)
Also we suppose here that (matchMaxLen >= 2).
So the following code for (reserve) is not required
UInt32 reserve = (p->matchMaxLen * 2);
const UInt32 kNumHashBytes_Max = 5; // BT_HASH_BYTES_MAX
if (reserve < kNumHashBytes_Max - 1)
reserve = kNumHashBytes_Max - 1;
const UInt32 limit = kMtBtBlockSize - (reserve);
*/
const UInt32 limit = kMtBtBlockSize - (p->matchMaxLen * 2);
d[1] = p->hashNumAvail;
if (p->failure_BT)
{
// printf("\n == 1 BtGetMatches() p->failure_BT\n");
d[0] = 0;
// d[1] = 0;
return;
}
while (curPos < limit)
{
if (p->hashBufPos == p->hashBufPosLimit)
{
// MatchFinderMt_GetNextBlock_Hash(p);
UInt32 avail;
{
const UInt32 bi = MtSync_GetNextBlock(&p->hashSync);
const UInt32 k = GET_HASH_BLOCK_OFFSET(bi);
const UInt32 *h = p->hashBuf + k;
avail = h[1];
p->hashBufPosLimit = k + h[0];
p->hashNumAvail = avail;
p->hashBufPos = k + 2;
}
{
/* we must prevent UInt32 overflow for avail total value,
if avail was increased with new hash block */
UInt32 availSum = numProcessed + avail;
if (availSum < numProcessed)
availSum = (UInt32)(Int32)-1;
d[1] = availSum;
}
if (avail >= p->numHashBytes)
continue;
// if (p->hashBufPos != p->hashBufPosLimit) exit(1);
/* (avail < p->numHashBytes)
It means that stream was finished.
And (avail) - is a number of remaining bytes,
we fill (d) for (avail) bytes for LZ_THREAD (receiver).
but we don't update (p->pos) and (p->cyclicBufferPos) here in BT_THREAD */
/* here we suppose that we have space enough:
(kMtBtBlockSize - curPos >= p->hashNumAvail) */
p->hashNumAvail = 0;
d[0] = curPos + avail;
d += curPos;
for (; avail != 0; avail--)
*d++ = 0;
return;
}
{
UInt32 size = p->hashBufPosLimit - p->hashBufPos;
UInt32 pos = p->pos;
UInt32 cyclicBufferPos = p->cyclicBufferPos;
UInt32 lenLimit = p->matchMaxLen;
if (lenLimit >= p->hashNumAvail)
lenLimit = p->hashNumAvail;
{
UInt32 size2 = p->hashNumAvail - lenLimit + 1;
if (size2 < size)
size = size2;
size2 = p->cyclicBufferSize - cyclicBufferPos;
if (size2 < size)
size = size2;
}
if (pos > (UInt32)kMtMaxValForNormalize - size)
{
const UInt32 subValue = (pos - p->cyclicBufferSize); // & ~(UInt32)(kNormalizeAlign - 1);
pos -= subValue;
p->pos = pos;
MatchFinder_Normalize3(subValue, p->son, (size_t)p->cyclicBufferSize * 2);
}
#ifndef MFMT_GM_INLINE
while (curPos < limit && size-- != 0)
{
UInt32 *startDistances = d + curPos;
UInt32 num = (UInt32)(GetMatchesSpec1(lenLimit, pos - p->hashBuf[p->hashBufPos++],
pos, p->buffer, p->son, cyclicBufferPos, p->cyclicBufferSize, p->cutValue,
startDistances + 1, p->numHashBytes - 1) - startDistances);
*startDistances = num - 1;
curPos += num;
cyclicBufferPos++;
pos++;
p->buffer++;
}
#else
{
UInt32 posRes = pos;
const UInt32 *d_end;
{
d_end = GetMatchesSpecN_2(
p->buffer + lenLimit - 1,
pos, p->buffer, p->son, p->cutValue, d + curPos,
p->numHashBytes - 1, p->hashBuf + p->hashBufPos,
d + limit, p->hashBuf + p->hashBufPos + size,
cyclicBufferPos, p->cyclicBufferSize,
&posRes);
}
{
if (!d_end)
{
// printf("\n == 2 BtGetMatches() p->failure_BT\n");
// internal data failure
p->failure_BT = True;
d[0] = 0;
// d[1] = 0;
return;
}
}
curPos = (UInt32)(d_end - d);
{
const UInt32 processed = posRes - pos;
pos = posRes;
p->hashBufPos += processed;
cyclicBufferPos += processed;
p->buffer += processed;
}
}
#endif
{
const UInt32 processed = pos - p->pos;
numProcessed += processed;
p->hashNumAvail -= processed;
p->pos = pos;
}
if (cyclicBufferPos == p->cyclicBufferSize)
cyclicBufferPos = 0;
p->cyclicBufferPos = cyclicBufferPos;
}
}
d[0] = curPos;
}
static void BtFillBlock(CMatchFinderMt *p, UInt32 globalBlockIndex)
{
CMtSync *sync = &p->hashSync;
BUFFER_MUST_BE_UNLOCKED(sync)
if (!sync->needStart)
{
LOCK_BUFFER(sync)
}
BtGetMatches(p, p->btBuf + GET_BT_BLOCK_OFFSET(globalBlockIndex));
/* We suppose that we have called GetNextBlock() from start.
So buffer is LOCKED */
UNLOCK_BUFFER(sync)
}
Z7_NO_INLINE
static void BtThreadFunc(CMatchFinderMt *mt)
{
CMtSync *p = &mt->btSync;
for (;;)
{
UInt32 blockIndex = 0;
Event_Wait(&p->canStart);
for (;;)
{
PRF(printf(" BT thread block = %d pos = %d\n", (unsigned)blockIndex, mt->pos));
/* (p->exit == true) is possible after (p->canStart) at first loop iteration
and is unexpected after more Wait(freeSemaphore) iterations */
if (p->exit)
return;
Semaphore_Wait(&p->freeSemaphore);
// for faster stop : we check (p->stopWriting) after Wait(freeSemaphore)
if (p->stopWriting)
break;
BtFillBlock(mt, blockIndex++);
Semaphore_Release1(&p->filledSemaphore);
}
// we stop HASH_THREAD here
MtSync_StopWriting(&mt->hashSync);
// p->numBlocks_Sent = blockIndex;
Event_Set(&p->wasStopped);
}
}
void MatchFinderMt_Construct(CMatchFinderMt *p)
{
p->hashBuf = NULL;
MtSync_Construct(&p->hashSync);
MtSync_Construct(&p->btSync);
}
static void MatchFinderMt_FreeMem(CMatchFinderMt *p, ISzAllocPtr alloc)
{
ISzAlloc_Free(alloc, p->hashBuf);
p->hashBuf = NULL;
}
void MatchFinderMt_Destruct(CMatchFinderMt *p, ISzAllocPtr alloc)
{
/*
HASH_THREAD can use CriticalSection(s) btSync.cs and hashSync.cs.
So we must be sure that HASH_THREAD will not use CriticalSection(s)
after deleting CriticalSection here.
we call ReleaseStream(p)
that calls StopWriting(btSync)
that calls StopWriting(hashSync), if it's required to stop HASH_THREAD.
after StopWriting() it's safe to destruct MtSync(s) in any order */
MatchFinderMt_ReleaseStream(p);
MtSync_Destruct(&p->btSync);
MtSync_Destruct(&p->hashSync);
LOG_ITER(
printf("\nTree %9d * %7d iter = %9d = sum : bytes = %9d\n",
(UInt32)(g_NumIters_Tree / 1000),
(UInt32)(((UInt64)g_NumIters_Loop * 1000) / (g_NumIters_Tree + 1)),
(UInt32)(g_NumIters_Loop / 1000),
(UInt32)(g_NumIters_Bytes / 1000)
));
MatchFinderMt_FreeMem(p, alloc);
}
#define kHashBufferSize (kMtHashBlockSize * kMtHashNumBlocks)
#define kBtBufferSize (kMtBtBlockSize * kMtBtNumBlocks)
static THREAD_FUNC_DECL HashThreadFunc2(void *p) { HashThreadFunc((CMatchFinderMt *)p); return 0; }
static THREAD_FUNC_DECL BtThreadFunc2(void *p)
{
Byte allocaDummy[0x180];
unsigned i = 0;
for (i = 0; i < 16; i++)
allocaDummy[i] = (Byte)0;
if (allocaDummy[0] == 0)
BtThreadFunc((CMatchFinderMt *)p);
return 0;
}
SRes MatchFinderMt_Create(CMatchFinderMt *p, UInt32 historySize, UInt32 keepAddBufferBefore,
UInt32 matchMaxLen, UInt32 keepAddBufferAfter, ISzAllocPtr alloc)
{
CMatchFinder *mf = MF(p);
p->historySize = historySize;
if (kMtBtBlockSize <= matchMaxLen * 4)
return SZ_ERROR_PARAM;
if (!p->hashBuf)
{
p->hashBuf = (UInt32 *)ISzAlloc_Alloc(alloc, ((size_t)kHashBufferSize + (size_t)kBtBufferSize) * sizeof(UInt32));
if (!p->hashBuf)
return SZ_ERROR_MEM;
p->btBuf = p->hashBuf + kHashBufferSize;
}
keepAddBufferBefore += (kHashBufferSize + kBtBufferSize);
keepAddBufferAfter += kMtHashBlockSize;
if (!MatchFinder_Create(mf, historySize, keepAddBufferBefore, matchMaxLen, keepAddBufferAfter, alloc))
return SZ_ERROR_MEM;
RINOK(MtSync_Create(&p->hashSync, HashThreadFunc2, p))
RINOK(MtSync_Create(&p->btSync, BtThreadFunc2, p))
return SZ_OK;
}
SRes MatchFinderMt_InitMt(CMatchFinderMt *p)
{
RINOK(MtSync_Init(&p->hashSync, kMtHashNumBlocks))
return MtSync_Init(&p->btSync, kMtBtNumBlocks);
}
static void MatchFinderMt_Init(CMatchFinderMt *p)
{
CMatchFinder *mf = MF(p);
p->btBufPos =
p->btBufPosLimit = NULL;
p->hashBufPos =
p->hashBufPosLimit = 0;
p->hashNumAvail = 0; // 21.03
p->failure_BT = False;
/* Init without data reading. We don't want to read data in this thread */
MatchFinder_Init_4(mf);
MatchFinder_Init_LowHash(mf);
p->pointerToCurPos = Inline_MatchFinder_GetPointerToCurrentPos(mf);
p->btNumAvailBytes = 0;
p->failure_LZ_BT = False;
// p->failure_LZ_LZ = False;
p->lzPos =
1; // optimal smallest value
// 0; // for debug: ignores match to start
// kNormalizeAlign; // for debug
p->hash = mf->hash;
p->fixedHashSize = mf->fixedHashSize;
// p->hash4Mask = mf->hash4Mask;
p->crc = mf->crc;
// memcpy(p->crc, mf->crc, sizeof(mf->crc));
p->son = mf->son;
p->matchMaxLen = mf->matchMaxLen;
p->numHashBytes = mf->numHashBytes;
/* (mf->pos) and (mf->streamPos) were already initialized to 1 in MatchFinder_Init_4() */
// mf->streamPos = mf->pos = 1; // optimal smallest value
// 0; // for debug: ignores match to start
// kNormalizeAlign; // for debug
/* we must init (p->pos = mf->pos) for BT, because
BT code needs (p->pos == delta_value_for_empty_hash_record == mf->pos) */
p->pos = mf->pos; // do not change it
p->cyclicBufferPos = (p->pos - CYC_TO_POS_OFFSET);
p->cyclicBufferSize = mf->cyclicBufferSize;
p->buffer = mf->buffer;
p->cutValue = mf->cutValue;
// p->son[0] = p->son[1] = 0; // unused: to init skipped record for speculated accesses.
}
/* ReleaseStream is required to finish multithreading */
void MatchFinderMt_ReleaseStream(CMatchFinderMt *p)
{
// Sleep(1); // for debug
MtSync_StopWriting(&p->btSync);
// Sleep(200); // for debug
/* p->MatchFinder->ReleaseStream(); */
}
Z7_NO_INLINE
static UInt32 MatchFinderMt_GetNextBlock_Bt(CMatchFinderMt *p)
{
if (p->failure_LZ_BT)
p->btBufPos = p->failureBuf;
else
{
const UInt32 bi = MtSync_GetNextBlock(&p->btSync);
const UInt32 *bt = p->btBuf + GET_BT_BLOCK_OFFSET(bi);
{
const UInt32 numItems = bt[0];
p->btBufPosLimit = bt + numItems;
p->btNumAvailBytes = bt[1];
p->btBufPos = bt + 2;
if (numItems < 2 || numItems > kMtBtBlockSize)
{
p->failureBuf[0] = 0;
p->btBufPos = p->failureBuf;
p->btBufPosLimit = p->failureBuf + 1;
p->failure_LZ_BT = True;
// p->btNumAvailBytes = 0;
/* we don't want to decrease AvailBytes, that was load before.
that can be unxepected for the code that have loaded anopther value before */
}
}
if (p->lzPos >= (UInt32)kMtMaxValForNormalize - (UInt32)kMtBtBlockSize)
{
/* we don't check (lzPos) over exact avail bytes in (btBuf).
(fixedHashSize) is small, so normalization is fast */
const UInt32 subValue = (p->lzPos - p->historySize - 1); // & ~(UInt32)(kNormalizeAlign - 1);
p->lzPos -= subValue;
MatchFinder_Normalize3(subValue, p->hash, p->fixedHashSize);
}
}
return p->btNumAvailBytes;
}
static const Byte * MatchFinderMt_GetPointerToCurrentPos(CMatchFinderMt *p)
{
return p->pointerToCurPos;
}
#define GET_NEXT_BLOCK_IF_REQUIRED if (p->btBufPos == p->btBufPosLimit) MatchFinderMt_GetNextBlock_Bt(p);
static UInt32 MatchFinderMt_GetNumAvailableBytes(CMatchFinderMt *p)
{
if (p->btBufPos != p->btBufPosLimit)
return p->btNumAvailBytes;
return MatchFinderMt_GetNextBlock_Bt(p);
}
// #define CHECK_FAILURE_LZ(_match_, _pos_) if (_match_ >= _pos_) { p->failure_LZ_LZ = True; return d; }
#define CHECK_FAILURE_LZ(_match_, _pos_)
static UInt32 * MixMatches2(CMatchFinderMt *p, UInt32 matchMinPos, UInt32 *d)
{
UInt32 h2, c2;
UInt32 *hash = p->hash;
const Byte *cur = p->pointerToCurPos;
const UInt32 m = p->lzPos;
MT_HASH2_CALC
c2 = hash[h2];
hash[h2] = m;
if (c2 >= matchMinPos)
{
CHECK_FAILURE_LZ(c2, m)
if (cur[(ptrdiff_t)c2 - (ptrdiff_t)m] == cur[0])
{
*d++ = 2;
*d++ = m - c2 - 1;
}
}
return d;
}
static UInt32 * MixMatches3(CMatchFinderMt *p, UInt32 matchMinPos, UInt32 *d)
{
UInt32 h2, h3, c2, c3;
UInt32 *hash = p->hash;
const Byte *cur = p->pointerToCurPos;
const UInt32 m = p->lzPos;
MT_HASH3_CALC
c2 = hash[h2];
c3 = (hash + kFix3HashSize)[h3];
hash[h2] = m;
(hash + kFix3HashSize)[h3] = m;
if (c2 >= matchMinPos)
{
CHECK_FAILURE_LZ(c2, m)
if (cur[(ptrdiff_t)c2 - (ptrdiff_t)m] == cur[0])
{
d[1] = m - c2 - 1;
if (cur[(ptrdiff_t)c2 - (ptrdiff_t)m + 2] == cur[2])
{
d[0] = 3;
return d + 2;
}
d[0] = 2;
d += 2;
}
}
if (c3 >= matchMinPos)
{
CHECK_FAILURE_LZ(c3, m)
if (cur[(ptrdiff_t)c3 - (ptrdiff_t)m] == cur[0])
{
*d++ = 3;
*d++ = m - c3 - 1;
}
}
return d;
}
#define INCREASE_LZ_POS p->lzPos++; p->pointerToCurPos++;
/*
static
UInt32* MatchFinderMt_GetMatches_Bt4(CMatchFinderMt *p, UInt32 *d)
{
const UInt32 *bt = p->btBufPos;
const UInt32 len = *bt++;
const UInt32 *btLim = bt + len;
UInt32 matchMinPos;
UInt32 avail = p->btNumAvailBytes - 1;
p->btBufPos = btLim;
{
p->btNumAvailBytes = avail;
#define BT_HASH_BYTES_MAX 5
matchMinPos = p->lzPos;
if (len != 0)
matchMinPos -= bt[1];
else if (avail < (BT_HASH_BYTES_MAX - 1) - 1)
{
INCREASE_LZ_POS
return d;
}
else
{
const UInt32 hs = p->historySize;
if (matchMinPos > hs)
matchMinPos -= hs;
else
matchMinPos = 1;
}
}
for (;;)
{
UInt32 h2, h3, c2, c3;
UInt32 *hash = p->hash;
const Byte *cur = p->pointerToCurPos;
UInt32 m = p->lzPos;
MT_HASH3_CALC
c2 = hash[h2];
c3 = (hash + kFix3HashSize)[h3];
hash[h2] = m;
(hash + kFix3HashSize)[h3] = m;
if (c2 >= matchMinPos && cur[(ptrdiff_t)c2 - (ptrdiff_t)m] == cur[0])
{
d[1] = m - c2 - 1;
if (cur[(ptrdiff_t)c2 - (ptrdiff_t)m + 2] == cur[2])
{
d[0] = 3;
d += 2;
break;
}
// else
{
d[0] = 2;
d += 2;
}
}
if (c3 >= matchMinPos && cur[(ptrdiff_t)c3 - (ptrdiff_t)m] == cur[0])
{
*d++ = 3;
*d++ = m - c3 - 1;
}
break;
}
if (len != 0)
{
do
{
const UInt32 v0 = bt[0];
const UInt32 v1 = bt[1];
bt += 2;
d[0] = v0;
d[1] = v1;
d += 2;
}
while (bt != btLim);
}
INCREASE_LZ_POS
return d;
}
*/
static UInt32 * MixMatches4(CMatchFinderMt *p, UInt32 matchMinPos, UInt32 *d)
{
UInt32 h2, h3, /* h4, */ c2, c3 /* , c4 */;
UInt32 *hash = p->hash;
const Byte *cur = p->pointerToCurPos;
const UInt32 m = p->lzPos;
MT_HASH3_CALC
// MT_HASH4_CALC
c2 = hash[h2];
c3 = (hash + kFix3HashSize)[h3];
// c4 = (hash + kFix4HashSize)[h4];
hash[h2] = m;
(hash + kFix3HashSize)[h3] = m;
// (hash + kFix4HashSize)[h4] = m;
// #define BT5_USE_H2
// #ifdef BT5_USE_H2
if (c2 >= matchMinPos && cur[(ptrdiff_t)c2 - (ptrdiff_t)m] == cur[0])
{
d[1] = m - c2 - 1;
if (cur[(ptrdiff_t)c2 - (ptrdiff_t)m + 2] == cur[2])
{
// d[0] = (cur[(ptrdiff_t)c2 - (ptrdiff_t)m + 3] == cur[3]) ? 4 : 3;
// return d + 2;
if (cur[(ptrdiff_t)c2 - (ptrdiff_t)m + 3] == cur[3])
{
d[0] = 4;
return d + 2;
}
d[0] = 3;
d += 2;
#ifdef BT5_USE_H4
if (c4 >= matchMinPos)
if (
cur[(ptrdiff_t)c4 - (ptrdiff_t)m] == cur[0] &&
cur[(ptrdiff_t)c4 - (ptrdiff_t)m + 3] == cur[3]
)
{
*d++ = 4;
*d++ = m - c4 - 1;
}
#endif
return d;
}
d[0] = 2;
d += 2;
}
// #endif
if (c3 >= matchMinPos && cur[(ptrdiff_t)c3 - (ptrdiff_t)m] == cur[0])
{
d[1] = m - c3 - 1;
if (cur[(ptrdiff_t)c3 - (ptrdiff_t)m + 3] == cur[3])
{
d[0] = 4;
return d + 2;
}
d[0] = 3;
d += 2;
}
#ifdef BT5_USE_H4
if (c4 >= matchMinPos)
if (
cur[(ptrdiff_t)c4 - (ptrdiff_t)m] == cur[0] &&
cur[(ptrdiff_t)c4 - (ptrdiff_t)m + 3] == cur[3]
)
{
*d++ = 4;
*d++ = m - c4 - 1;
}
#endif
return d;
}
static UInt32 * MatchFinderMt2_GetMatches(CMatchFinderMt *p, UInt32 *d)
{
const UInt32 *bt = p->btBufPos;
const UInt32 len = *bt++;
const UInt32 *btLim = bt + len;
p->btBufPos = btLim;
p->btNumAvailBytes--;
INCREASE_LZ_POS
{
while (bt != btLim)
{
const UInt32 v0 = bt[0];
const UInt32 v1 = bt[1];
bt += 2;
d[0] = v0;
d[1] = v1;
d += 2;
}
}
return d;
}
static UInt32 * MatchFinderMt_GetMatches(CMatchFinderMt *p, UInt32 *d)
{
const UInt32 *bt = p->btBufPos;
UInt32 len = *bt++;
const UInt32 avail = p->btNumAvailBytes - 1;
p->btNumAvailBytes = avail;
p->btBufPos = bt + len;
if (len == 0)
{
#define BT_HASH_BYTES_MAX 5
if (avail >= (BT_HASH_BYTES_MAX - 1) - 1)
{
UInt32 m = p->lzPos;
if (m > p->historySize)
m -= p->historySize;
else
m = 1;
d = p->MixMatchesFunc(p, m, d);
}
}
else
{
/*
first match pair from BinTree: (match_len, match_dist),
(match_len >= numHashBytes).
MixMatchesFunc() inserts only hash matches that are nearer than (match_dist)
*/
d = p->MixMatchesFunc(p, p->lzPos - bt[1], d);
// if (d) // check for failure
do
{
const UInt32 v0 = bt[0];
const UInt32 v1 = bt[1];
bt += 2;
d[0] = v0;
d[1] = v1;
d += 2;
}
while (len -= 2);
}
INCREASE_LZ_POS
return d;
}
#define SKIP_HEADER2_MT do { GET_NEXT_BLOCK_IF_REQUIRED
#define SKIP_HEADER_MT(n) SKIP_HEADER2_MT if (p->btNumAvailBytes-- >= (n)) { const Byte *cur = p->pointerToCurPos; UInt32 *hash = p->hash;
#define SKIP_FOOTER_MT } INCREASE_LZ_POS p->btBufPos += (size_t)*p->btBufPos + 1; } while (--num != 0);
static void MatchFinderMt0_Skip(CMatchFinderMt *p, UInt32 num)
{
SKIP_HEADER2_MT { p->btNumAvailBytes--;
SKIP_FOOTER_MT
}
static void MatchFinderMt2_Skip(CMatchFinderMt *p, UInt32 num)
{
SKIP_HEADER_MT(2)
UInt32 h2;
MT_HASH2_CALC
hash[h2] = p->lzPos;
SKIP_FOOTER_MT
}
static void MatchFinderMt3_Skip(CMatchFinderMt *p, UInt32 num)
{
SKIP_HEADER_MT(3)
UInt32 h2, h3;
MT_HASH3_CALC
(hash + kFix3HashSize)[h3] =
hash[ h2] =
p->lzPos;
SKIP_FOOTER_MT
}
/*
// MatchFinderMt4_Skip() is similar to MatchFinderMt3_Skip().
// The difference is that MatchFinderMt3_Skip() updates hash for last 3 bytes of stream.
static void MatchFinderMt4_Skip(CMatchFinderMt *p, UInt32 num)
{
SKIP_HEADER_MT(4)
UInt32 h2, h3; // h4
MT_HASH3_CALC
// MT_HASH4_CALC
// (hash + kFix4HashSize)[h4] =
(hash + kFix3HashSize)[h3] =
hash[ h2] =
p->lzPos;
SKIP_FOOTER_MT
}
*/
void MatchFinderMt_CreateVTable(CMatchFinderMt *p, IMatchFinder2 *vTable)
{
vTable->Init = (Mf_Init_Func)MatchFinderMt_Init;
vTable->GetNumAvailableBytes = (Mf_GetNumAvailableBytes_Func)MatchFinderMt_GetNumAvailableBytes;
vTable->GetPointerToCurrentPos = (Mf_GetPointerToCurrentPos_Func)MatchFinderMt_GetPointerToCurrentPos;
vTable->GetMatches = (Mf_GetMatches_Func)MatchFinderMt_GetMatches;
switch (MF(p)->numHashBytes)
{
case 2:
p->GetHeadsFunc = GetHeads2;
p->MixMatchesFunc = (Mf_Mix_Matches)NULL;
vTable->Skip = (Mf_Skip_Func)MatchFinderMt0_Skip;
vTable->GetMatches = (Mf_GetMatches_Func)MatchFinderMt2_GetMatches;
break;
case 3:
p->GetHeadsFunc = MF(p)->bigHash ? GetHeads3b : GetHeads3;
p->MixMatchesFunc = (Mf_Mix_Matches)MixMatches2;
vTable->Skip = (Mf_Skip_Func)MatchFinderMt2_Skip;
break;
case 4:
p->GetHeadsFunc = MF(p)->bigHash ? GetHeads4b : GetHeads4;
// it's fast inline version of GetMatches()
// vTable->GetMatches = (Mf_GetMatches_Func)MatchFinderMt_GetMatches_Bt4;
p->MixMatchesFunc = (Mf_Mix_Matches)MixMatches3;
vTable->Skip = (Mf_Skip_Func)MatchFinderMt3_Skip;
break;
default:
p->GetHeadsFunc = MF(p)->bigHash ? GetHeads5b : GetHeads5;
p->MixMatchesFunc = (Mf_Mix_Matches)MixMatches4;
vTable->Skip =
(Mf_Skip_Func)MatchFinderMt3_Skip;
// (Mf_Skip_Func)MatchFinderMt4_Skip;
break;
}
}
#undef RINOK_THREAD
#undef PRF
#undef MF
#undef GetUi24hi_from32
#undef LOCK_BUFFER
#undef UNLOCK_BUFFER

109
extern/lzma/LzFindMt.h vendored
View File

@@ -1,109 +0,0 @@
/* LzFindMt.h -- multithreaded Match finder for LZ algorithms
2023-03-05 : Igor Pavlov : Public domain */
#ifndef ZIP7_INC_LZ_FIND_MT_H
#define ZIP7_INC_LZ_FIND_MT_H
#include "LzFind.h"
#include "Threads.h"
EXTERN_C_BEGIN
typedef struct
{
UInt32 numProcessedBlocks;
CThread thread;
UInt64 affinity;
BoolInt wasCreated;
BoolInt needStart;
BoolInt csWasInitialized;
BoolInt csWasEntered;
BoolInt exit;
BoolInt stopWriting;
CAutoResetEvent canStart;
CAutoResetEvent wasStopped;
CSemaphore freeSemaphore;
CSemaphore filledSemaphore;
CCriticalSection cs;
// UInt32 numBlocks_Sent;
} CMtSync;
typedef UInt32 * (*Mf_Mix_Matches)(void *p, UInt32 matchMinPos, UInt32 *distances);
/* kMtCacheLineDummy must be >= size_of_CPU_cache_line */
#define kMtCacheLineDummy 128
typedef void (*Mf_GetHeads)(const Byte *buffer, UInt32 pos,
UInt32 *hash, UInt32 hashMask, UInt32 *heads, UInt32 numHeads, const UInt32 *crc);
typedef struct
{
/* LZ */
const Byte *pointerToCurPos;
UInt32 *btBuf;
const UInt32 *btBufPos;
const UInt32 *btBufPosLimit;
UInt32 lzPos;
UInt32 btNumAvailBytes;
UInt32 *hash;
UInt32 fixedHashSize;
// UInt32 hash4Mask;
UInt32 historySize;
const UInt32 *crc;
Mf_Mix_Matches MixMatchesFunc;
UInt32 failure_LZ_BT; // failure in BT transfered to LZ
// UInt32 failure_LZ_LZ; // failure in LZ tables
UInt32 failureBuf[1];
// UInt32 crc[256];
/* LZ + BT */
CMtSync btSync;
Byte btDummy[kMtCacheLineDummy];
/* BT */
UInt32 *hashBuf;
UInt32 hashBufPos;
UInt32 hashBufPosLimit;
UInt32 hashNumAvail;
UInt32 failure_BT;
CLzRef *son;
UInt32 matchMaxLen;
UInt32 numHashBytes;
UInt32 pos;
const Byte *buffer;
UInt32 cyclicBufferPos;
UInt32 cyclicBufferSize; /* it must be = (historySize + 1) */
UInt32 cutValue;
/* BT + Hash */
CMtSync hashSync;
/* Byte hashDummy[kMtCacheLineDummy]; */
/* Hash */
Mf_GetHeads GetHeadsFunc;
CMatchFinder *MatchFinder;
// CMatchFinder MatchFinder;
} CMatchFinderMt;
// only for Mt part
void MatchFinderMt_Construct(CMatchFinderMt *p);
void MatchFinderMt_Destruct(CMatchFinderMt *p, ISzAllocPtr alloc);
SRes MatchFinderMt_Create(CMatchFinderMt *p, UInt32 historySize, UInt32 keepAddBufferBefore,
UInt32 matchMaxLen, UInt32 keepAddBufferAfter, ISzAllocPtr alloc);
void MatchFinderMt_CreateVTable(CMatchFinderMt *p, IMatchFinder2 *vTable);
/* call MatchFinderMt_InitMt() before IMatchFinder::Init() */
SRes MatchFinderMt_InitMt(CMatchFinderMt *p);
void MatchFinderMt_ReleaseStream(CMatchFinderMt *p);
EXTERN_C_END
#endif

View File

@@ -1,578 +0,0 @@
/* LzFindOpt.c -- multithreaded Match finder for LZ algorithms
2023-04-02 : Igor Pavlov : Public domain */
#include "Precomp.h"
#include "CpuArch.h"
#include "LzFind.h"
// #include "LzFindMt.h"
// #define LOG_ITERS
// #define LOG_THREAD
#ifdef LOG_THREAD
#include <stdio.h>
#define PRF(x) x
#else
// #define PRF(x)
#endif
#ifdef LOG_ITERS
#include <stdio.h>
UInt64 g_NumIters_Tree;
UInt64 g_NumIters_Loop;
UInt64 g_NumIters_Bytes;
#define LOG_ITER(x) x
#else
#define LOG_ITER(x)
#endif
// ---------- BT THREAD ----------
#define USE_SON_PREFETCH
#define USE_LONG_MATCH_OPT
#define kEmptyHashValue 0
// #define CYC_TO_POS_OFFSET 0
// #define CYC_TO_POS_OFFSET 1 // for debug
/*
Z7_NO_INLINE
UInt32 * Z7_FASTCALL GetMatchesSpecN_1(const Byte *lenLimit, size_t pos, const Byte *cur, CLzRef *son,
UInt32 _cutValue, UInt32 *d, size_t _maxLen, const UInt32 *hash, const UInt32 *limit, const UInt32 *size, UInt32 *posRes)
{
do
{
UInt32 delta;
if (hash == size)
break;
delta = *hash++;
if (delta == 0 || delta > (UInt32)pos)
return NULL;
lenLimit++;
if (delta == (UInt32)pos)
{
CLzRef *ptr1 = son + ((size_t)pos << 1) - CYC_TO_POS_OFFSET * 2;
*d++ = 0;
ptr1[0] = kEmptyHashValue;
ptr1[1] = kEmptyHashValue;
}
else
{
UInt32 *_distances = ++d;
CLzRef *ptr0 = son + ((size_t)(pos) << 1) - CYC_TO_POS_OFFSET * 2 + 1;
CLzRef *ptr1 = son + ((size_t)(pos) << 1) - CYC_TO_POS_OFFSET * 2;
const Byte *len0 = cur, *len1 = cur;
UInt32 cutValue = _cutValue;
const Byte *maxLen = cur + _maxLen;
for (LOG_ITER(g_NumIters_Tree++);;)
{
LOG_ITER(g_NumIters_Loop++);
{
const ptrdiff_t diff = (ptrdiff_t)0 - (ptrdiff_t)delta;
CLzRef *pair = son + ((size_t)(((ptrdiff_t)pos - CYC_TO_POS_OFFSET) + diff) << 1);
const Byte *len = (len0 < len1 ? len0 : len1);
#ifdef USE_SON_PREFETCH
const UInt32 pair0 = *pair;
#endif
if (len[diff] == len[0])
{
if (++len != lenLimit && len[diff] == len[0])
while (++len != lenLimit)
{
LOG_ITER(g_NumIters_Bytes++);
if (len[diff] != len[0])
break;
}
if (maxLen < len)
{
maxLen = len;
*d++ = (UInt32)(len - cur);
*d++ = delta - 1;
if (len == lenLimit)
{
const UInt32 pair1 = pair[1];
*ptr1 =
#ifdef USE_SON_PREFETCH
pair0;
#else
pair[0];
#endif
*ptr0 = pair1;
_distances[-1] = (UInt32)(d - _distances);
#ifdef USE_LONG_MATCH_OPT
if (hash == size || *hash != delta || lenLimit[diff] != lenLimit[0] || d >= limit)
break;
{
for (;;)
{
hash++;
pos++;
cur++;
lenLimit++;
{
CLzRef *ptr = son + ((size_t)(pos) << 1) - CYC_TO_POS_OFFSET * 2;
#if 0
*(UInt64 *)(void *)ptr = ((const UInt64 *)(const void *)ptr)[diff];
#else
const UInt32 p0 = ptr[0 + (diff * 2)];
const UInt32 p1 = ptr[1 + (diff * 2)];
ptr[0] = p0;
ptr[1] = p1;
// ptr[0] = ptr[0 + (diff * 2)];
// ptr[1] = ptr[1 + (diff * 2)];
#endif
}
// PrintSon(son + 2, pos - 1);
// printf("\npos = %x delta = %x\n", pos, delta);
len++;
*d++ = 2;
*d++ = (UInt32)(len - cur);
*d++ = delta - 1;
if (hash == size || *hash != delta || lenLimit[diff] != lenLimit[0] || d >= limit)
break;
}
}
#endif
break;
}
}
}
{
const UInt32 curMatch = (UInt32)pos - delta; // (UInt32)(pos + diff);
if (len[diff] < len[0])
{
delta = pair[1];
if (delta >= curMatch)
return NULL;
*ptr1 = curMatch;
ptr1 = pair + 1;
len1 = len;
}
else
{
delta = *pair;
if (delta >= curMatch)
return NULL;
*ptr0 = curMatch;
ptr0 = pair;
len0 = len;
}
delta = (UInt32)pos - delta;
if (--cutValue == 0 || delta >= pos)
{
*ptr0 = *ptr1 = kEmptyHashValue;
_distances[-1] = (UInt32)(d - _distances);
break;
}
}
}
} // for (tree iterations)
}
pos++;
cur++;
}
while (d < limit);
*posRes = (UInt32)pos;
return d;
}
*/
/* define cbs if you use 2 functions.
GetMatchesSpecN_1() : (pos < _cyclicBufferSize)
GetMatchesSpecN_2() : (pos >= _cyclicBufferSize)
do not define cbs if you use 1 function:
GetMatchesSpecN_2()
*/
// #define cbs _cyclicBufferSize
/*
we use size_t for (pos) and (_cyclicBufferPos_ instead of UInt32
to eliminate "movsx" BUG in old MSVC x64 compiler.
*/
UInt32 * Z7_FASTCALL GetMatchesSpecN_2(const Byte *lenLimit, size_t pos, const Byte *cur, CLzRef *son,
UInt32 _cutValue, UInt32 *d, size_t _maxLen, const UInt32 *hash, const UInt32 *limit, const UInt32 *size,
size_t _cyclicBufferPos, UInt32 _cyclicBufferSize,
UInt32 *posRes);
Z7_NO_INLINE
UInt32 * Z7_FASTCALL GetMatchesSpecN_2(const Byte *lenLimit, size_t pos, const Byte *cur, CLzRef *son,
UInt32 _cutValue, UInt32 *d, size_t _maxLen, const UInt32 *hash, const UInt32 *limit, const UInt32 *size,
size_t _cyclicBufferPos, UInt32 _cyclicBufferSize,
UInt32 *posRes)
{
do // while (hash != size)
{
UInt32 delta;
#ifndef cbs
UInt32 cbs;
#endif
if (hash == size)
break;
delta = *hash++;
if (delta == 0)
return NULL;
lenLimit++;
#ifndef cbs
cbs = _cyclicBufferSize;
if ((UInt32)pos < cbs)
{
if (delta > (UInt32)pos)
return NULL;
cbs = (UInt32)pos;
}
#endif
if (delta >= cbs)
{
CLzRef *ptr1 = son + ((size_t)_cyclicBufferPos << 1);
*d++ = 0;
ptr1[0] = kEmptyHashValue;
ptr1[1] = kEmptyHashValue;
}
else
{
UInt32 *_distances = ++d;
CLzRef *ptr0 = son + ((size_t)_cyclicBufferPos << 1) + 1;
CLzRef *ptr1 = son + ((size_t)_cyclicBufferPos << 1);
UInt32 cutValue = _cutValue;
const Byte *len0 = cur, *len1 = cur;
const Byte *maxLen = cur + _maxLen;
// if (cutValue == 0) { *ptr0 = *ptr1 = kEmptyHashValue; } else
for (LOG_ITER(g_NumIters_Tree++);;)
{
LOG_ITER(g_NumIters_Loop++);
{
// SPEC code
CLzRef *pair = son + ((size_t)((ptrdiff_t)_cyclicBufferPos - (ptrdiff_t)delta
+ (ptrdiff_t)(UInt32)(_cyclicBufferPos < delta ? cbs : 0)
) << 1);
const ptrdiff_t diff = (ptrdiff_t)0 - (ptrdiff_t)delta;
const Byte *len = (len0 < len1 ? len0 : len1);
#ifdef USE_SON_PREFETCH
const UInt32 pair0 = *pair;
#endif
if (len[diff] == len[0])
{
if (++len != lenLimit && len[diff] == len[0])
while (++len != lenLimit)
{
LOG_ITER(g_NumIters_Bytes++);
if (len[diff] != len[0])
break;
}
if (maxLen < len)
{
maxLen = len;
*d++ = (UInt32)(len - cur);
*d++ = delta - 1;
if (len == lenLimit)
{
const UInt32 pair1 = pair[1];
*ptr1 =
#ifdef USE_SON_PREFETCH
pair0;
#else
pair[0];
#endif
*ptr0 = pair1;
_distances[-1] = (UInt32)(d - _distances);
#ifdef USE_LONG_MATCH_OPT
if (hash == size || *hash != delta || lenLimit[diff] != lenLimit[0] || d >= limit)
break;
{
for (;;)
{
*d++ = 2;
*d++ = (UInt32)(lenLimit - cur);
*d++ = delta - 1;
cur++;
lenLimit++;
// SPEC
_cyclicBufferPos++;
{
// SPEC code
CLzRef *dest = son + ((size_t)(_cyclicBufferPos) << 1);
const CLzRef *src = dest + ((diff
+ (ptrdiff_t)(UInt32)((_cyclicBufferPos < delta) ? cbs : 0)) << 1);
// CLzRef *ptr = son + ((size_t)(pos) << 1) - CYC_TO_POS_OFFSET * 2;
#if 0
*(UInt64 *)(void *)dest = *((const UInt64 *)(const void *)src);
#else
const UInt32 p0 = src[0];
const UInt32 p1 = src[1];
dest[0] = p0;
dest[1] = p1;
#endif
}
pos++;
hash++;
if (hash == size || *hash != delta || lenLimit[diff] != lenLimit[0] || d >= limit)
break;
} // for() end for long matches
}
#endif
break; // break from TREE iterations
}
}
}
{
const UInt32 curMatch = (UInt32)pos - delta; // (UInt32)(pos + diff);
if (len[diff] < len[0])
{
delta = pair[1];
*ptr1 = curMatch;
ptr1 = pair + 1;
len1 = len;
if (delta >= curMatch)
return NULL;
}
else
{
delta = *pair;
*ptr0 = curMatch;
ptr0 = pair;
len0 = len;
if (delta >= curMatch)
return NULL;
}
delta = (UInt32)pos - delta;
if (--cutValue == 0 || delta >= cbs)
{
*ptr0 = *ptr1 = kEmptyHashValue;
_distances[-1] = (UInt32)(d - _distances);
break;
}
}
}
} // for (tree iterations)
}
pos++;
_cyclicBufferPos++;
cur++;
}
while (d < limit);
*posRes = (UInt32)pos;
return d;
}
/*
typedef UInt32 uint32plus; // size_t
UInt32 * Z7_FASTCALL GetMatchesSpecN_3(uint32plus lenLimit, size_t pos, const Byte *cur, CLzRef *son,
UInt32 _cutValue, UInt32 *d, uint32plus _maxLen, const UInt32 *hash, const UInt32 *limit, const UInt32 *size,
size_t _cyclicBufferPos, UInt32 _cyclicBufferSize,
UInt32 *posRes)
{
do // while (hash != size)
{
UInt32 delta;
#ifndef cbs
UInt32 cbs;
#endif
if (hash == size)
break;
delta = *hash++;
if (delta == 0)
return NULL;
#ifndef cbs
cbs = _cyclicBufferSize;
if ((UInt32)pos < cbs)
{
if (delta > (UInt32)pos)
return NULL;
cbs = (UInt32)pos;
}
#endif
if (delta >= cbs)
{
CLzRef *ptr1 = son + ((size_t)_cyclicBufferPos << 1);
*d++ = 0;
ptr1[0] = kEmptyHashValue;
ptr1[1] = kEmptyHashValue;
}
else
{
CLzRef *ptr0 = son + ((size_t)_cyclicBufferPos << 1) + 1;
CLzRef *ptr1 = son + ((size_t)_cyclicBufferPos << 1);
UInt32 *_distances = ++d;
uint32plus len0 = 0, len1 = 0;
UInt32 cutValue = _cutValue;
uint32plus maxLen = _maxLen;
// lenLimit++; // const Byte *lenLimit = cur + _lenLimit;
for (LOG_ITER(g_NumIters_Tree++);;)
{
LOG_ITER(g_NumIters_Loop++);
{
// const ptrdiff_t diff = (ptrdiff_t)0 - (ptrdiff_t)delta;
CLzRef *pair = son + ((size_t)((ptrdiff_t)_cyclicBufferPos - delta
+ (ptrdiff_t)(UInt32)(_cyclicBufferPos < delta ? cbs : 0)
) << 1);
const Byte *pb = cur - delta;
uint32plus len = (len0 < len1 ? len0 : len1);
#ifdef USE_SON_PREFETCH
const UInt32 pair0 = *pair;
#endif
if (pb[len] == cur[len])
{
if (++len != lenLimit && pb[len] == cur[len])
while (++len != lenLimit)
if (pb[len] != cur[len])
break;
if (maxLen < len)
{
maxLen = len;
*d++ = (UInt32)len;
*d++ = delta - 1;
if (len == lenLimit)
{
{
const UInt32 pair1 = pair[1];
*ptr0 = pair1;
*ptr1 =
#ifdef USE_SON_PREFETCH
pair0;
#else
pair[0];
#endif
}
_distances[-1] = (UInt32)(d - _distances);
#ifdef USE_LONG_MATCH_OPT
if (hash == size || *hash != delta || pb[lenLimit] != cur[lenLimit] || d >= limit)
break;
{
const ptrdiff_t diff = (ptrdiff_t)0 - (ptrdiff_t)delta;
for (;;)
{
*d++ = 2;
*d++ = (UInt32)lenLimit;
*d++ = delta - 1;
_cyclicBufferPos++;
{
CLzRef *dest = son + ((size_t)_cyclicBufferPos << 1);
const CLzRef *src = dest + ((diff +
(ptrdiff_t)(UInt32)(_cyclicBufferPos < delta ? cbs : 0)) << 1);
#if 0
*(UInt64 *)(void *)dest = *((const UInt64 *)(const void *)src);
#else
const UInt32 p0 = src[0];
const UInt32 p1 = src[1];
dest[0] = p0;
dest[1] = p1;
#endif
}
hash++;
pos++;
cur++;
pb++;
if (hash == size || *hash != delta || pb[lenLimit] != cur[lenLimit] || d >= limit)
break;
}
}
#endif
break;
}
}
}
{
const UInt32 curMatch = (UInt32)pos - delta;
if (pb[len] < cur[len])
{
delta = pair[1];
*ptr1 = curMatch;
ptr1 = pair + 1;
len1 = len;
}
else
{
delta = *pair;
*ptr0 = curMatch;
ptr0 = pair;
len0 = len;
}
{
if (delta >= curMatch)
return NULL;
delta = (UInt32)pos - delta;
if (delta >= cbs
// delta >= _cyclicBufferSize || delta >= pos
|| --cutValue == 0)
{
*ptr0 = *ptr1 = kEmptyHashValue;
_distances[-1] = (UInt32)(d - _distances);
break;
}
}
}
}
} // for (tree iterations)
}
pos++;
_cyclicBufferPos++;
cur++;
}
while (d < limit);
*posRes = (UInt32)pos;
return d;
}
*/

34
extern/lzma/LzHash.h vendored
View File

@@ -1,34 +0,0 @@
/* LzHash.h -- HASH constants for LZ algorithms
2023-03-05 : Igor Pavlov : Public domain */
#ifndef ZIP7_INC_LZ_HASH_H
#define ZIP7_INC_LZ_HASH_H
/*
(kHash2Size >= (1 << 8)) : Required
(kHash3Size >= (1 << 16)) : Required
*/
#define kHash2Size (1 << 10)
#define kHash3Size (1 << 16)
// #define kHash4Size (1 << 20)
#define kFix3HashSize (kHash2Size)
#define kFix4HashSize (kHash2Size + kHash3Size)
// #define kFix5HashSize (kHash2Size + kHash3Size + kHash4Size)
/*
We use up to 3 crc values for hash:
crc0
crc1 << Shift_1
crc2 << Shift_2
(Shift_1 = 5) and (Shift_2 = 10) is good tradeoff.
Small values for Shift are not good for collision rate.
Big value for Shift_2 increases the minimum size
of hash table, that will be slow for small files.
*/
#define kLzHash_CrcShift_1 5
#define kLzHash_CrcShift_2 10
#endif

1363
extern/lzma/LzmaDec.c vendored
View File

@@ -1,1363 +0,0 @@
/* LzmaDec.c -- LZMA Decoder
2023-04-07 : Igor Pavlov : Public domain */
#include "Precomp.h"
#include <string.h>
/* #include "CpuArch.h" */
#include "LzmaDec.h"
// #define kNumTopBits 24
#define kTopValue ((UInt32)1 << 24)
#define kNumBitModelTotalBits 11
#define kBitModelTotal (1 << kNumBitModelTotalBits)
#define RC_INIT_SIZE 5
#ifndef Z7_LZMA_DEC_OPT
#define kNumMoveBits 5
#define NORMALIZE if (range < kTopValue) { range <<= 8; code = (code << 8) | (*buf++); }
#define IF_BIT_0(p) ttt = *(p); NORMALIZE; bound = (range >> kNumBitModelTotalBits) * (UInt32)ttt; if (code < bound)
#define UPDATE_0(p) range = bound; *(p) = (CLzmaProb)(ttt + ((kBitModelTotal - ttt) >> kNumMoveBits));
#define UPDATE_1(p) range -= bound; code -= bound; *(p) = (CLzmaProb)(ttt - (ttt >> kNumMoveBits));
#define GET_BIT2(p, i, A0, A1) IF_BIT_0(p) \
{ UPDATE_0(p) i = (i + i); A0; } else \
{ UPDATE_1(p) i = (i + i) + 1; A1; }
#define TREE_GET_BIT(probs, i) { GET_BIT2(probs + i, i, ;, ;); }
#define REV_BIT(p, i, A0, A1) IF_BIT_0(p + i) \
{ UPDATE_0(p + i) A0; } else \
{ UPDATE_1(p + i) A1; }
#define REV_BIT_VAR( p, i, m) REV_BIT(p, i, i += m; m += m, m += m; i += m; )
#define REV_BIT_CONST(p, i, m) REV_BIT(p, i, i += m; , i += m * 2; )
#define REV_BIT_LAST( p, i, m) REV_BIT(p, i, i -= m , ; )
#define TREE_DECODE(probs, limit, i) \
{ i = 1; do { TREE_GET_BIT(probs, i); } while (i < limit); i -= limit; }
/* #define Z7_LZMA_SIZE_OPT */
#ifdef Z7_LZMA_SIZE_OPT
#define TREE_6_DECODE(probs, i) TREE_DECODE(probs, (1 << 6), i)
#else
#define TREE_6_DECODE(probs, i) \
{ i = 1; \
TREE_GET_BIT(probs, i) \
TREE_GET_BIT(probs, i) \
TREE_GET_BIT(probs, i) \
TREE_GET_BIT(probs, i) \
TREE_GET_BIT(probs, i) \
TREE_GET_BIT(probs, i) \
i -= 0x40; }
#endif
#define NORMAL_LITER_DEC TREE_GET_BIT(prob, symbol)
#define MATCHED_LITER_DEC \
matchByte += matchByte; \
bit = offs; \
offs &= matchByte; \
probLit = prob + (offs + bit + symbol); \
GET_BIT2(probLit, symbol, offs ^= bit; , ;)
#endif // Z7_LZMA_DEC_OPT
#define NORMALIZE_CHECK if (range < kTopValue) { if (buf >= bufLimit) return DUMMY_INPUT_EOF; range <<= 8; code = (code << 8) | (*buf++); }
#define IF_BIT_0_CHECK(p) ttt = *(p); NORMALIZE_CHECK bound = (range >> kNumBitModelTotalBits) * (UInt32)ttt; if (code < bound)
#define UPDATE_0_CHECK range = bound;
#define UPDATE_1_CHECK range -= bound; code -= bound;
#define GET_BIT2_CHECK(p, i, A0, A1) IF_BIT_0_CHECK(p) \
{ UPDATE_0_CHECK i = (i + i); A0; } else \
{ UPDATE_1_CHECK i = (i + i) + 1; A1; }
#define GET_BIT_CHECK(p, i) GET_BIT2_CHECK(p, i, ; , ;)
#define TREE_DECODE_CHECK(probs, limit, i) \
{ i = 1; do { GET_BIT_CHECK(probs + i, i) } while (i < limit); i -= limit; }
#define REV_BIT_CHECK(p, i, m) IF_BIT_0_CHECK(p + i) \
{ UPDATE_0_CHECK i += m; m += m; } else \
{ UPDATE_1_CHECK m += m; i += m; }
#define kNumPosBitsMax 4
#define kNumPosStatesMax (1 << kNumPosBitsMax)
#define kLenNumLowBits 3
#define kLenNumLowSymbols (1 << kLenNumLowBits)
#define kLenNumHighBits 8
#define kLenNumHighSymbols (1 << kLenNumHighBits)
#define LenLow 0
#define LenHigh (LenLow + 2 * (kNumPosStatesMax << kLenNumLowBits))
#define kNumLenProbs (LenHigh + kLenNumHighSymbols)
#define LenChoice LenLow
#define LenChoice2 (LenLow + (1 << kLenNumLowBits))
#define kNumStates 12
#define kNumStates2 16
#define kNumLitStates 7
#define kStartPosModelIndex 4
#define kEndPosModelIndex 14
#define kNumFullDistances (1 << (kEndPosModelIndex >> 1))
#define kNumPosSlotBits 6
#define kNumLenToPosStates 4
#define kNumAlignBits 4
#define kAlignTableSize (1 << kNumAlignBits)
#define kMatchMinLen 2
#define kMatchSpecLenStart (kMatchMinLen + kLenNumLowSymbols * 2 + kLenNumHighSymbols)
#define kMatchSpecLen_Error_Data (1 << 9)
#define kMatchSpecLen_Error_Fail (kMatchSpecLen_Error_Data - 1)
/* External ASM code needs same CLzmaProb array layout. So don't change it. */
/* (probs_1664) is faster and better for code size at some platforms */
/*
#ifdef MY_CPU_X86_OR_AMD64
*/
#define kStartOffset 1664
#define GET_PROBS p->probs_1664
/*
#define GET_PROBS p->probs + kStartOffset
#else
#define kStartOffset 0
#define GET_PROBS p->probs
#endif
*/
#define SpecPos (-kStartOffset)
#define IsRep0Long (SpecPos + kNumFullDistances)
#define RepLenCoder (IsRep0Long + (kNumStates2 << kNumPosBitsMax))
#define LenCoder (RepLenCoder + kNumLenProbs)
#define IsMatch (LenCoder + kNumLenProbs)
#define Align (IsMatch + (kNumStates2 << kNumPosBitsMax))
#define IsRep (Align + kAlignTableSize)
#define IsRepG0 (IsRep + kNumStates)
#define IsRepG1 (IsRepG0 + kNumStates)
#define IsRepG2 (IsRepG1 + kNumStates)
#define PosSlot (IsRepG2 + kNumStates)
#define Literal (PosSlot + (kNumLenToPosStates << kNumPosSlotBits))
#define NUM_BASE_PROBS (Literal + kStartOffset)
#if Align != 0 && kStartOffset != 0
#error Stop_Compiling_Bad_LZMA_kAlign
#endif
#if NUM_BASE_PROBS != 1984
#error Stop_Compiling_Bad_LZMA_PROBS
#endif
#define LZMA_LIT_SIZE 0x300
#define LzmaProps_GetNumProbs(p) (NUM_BASE_PROBS + ((UInt32)LZMA_LIT_SIZE << ((p)->lc + (p)->lp)))
#define CALC_POS_STATE(processedPos, pbMask) (((processedPos) & (pbMask)) << 4)
#define COMBINED_PS_STATE (posState + state)
#define GET_LEN_STATE (posState)
#define LZMA_DIC_MIN (1 << 12)
/*
p->remainLen : shows status of LZMA decoder:
< kMatchSpecLenStart : the number of bytes to be copied with (p->rep0) offset
= kMatchSpecLenStart : the LZMA stream was finished with end mark
= kMatchSpecLenStart + 1 : need init range coder
= kMatchSpecLenStart + 2 : need init range coder and state
= kMatchSpecLen_Error_Fail : Internal Code Failure
= kMatchSpecLen_Error_Data + [0 ... 273] : LZMA Data Error
*/
/* ---------- LZMA_DECODE_REAL ---------- */
/*
LzmaDec_DecodeReal_3() can be implemented in external ASM file.
3 - is the code compatibility version of that function for check at link time.
*/
#define LZMA_DECODE_REAL LzmaDec_DecodeReal_3
/*
LZMA_DECODE_REAL()
In:
RangeCoder is normalized
if (p->dicPos == limit)
{
LzmaDec_TryDummy() was called before to exclude LITERAL and MATCH-REP cases.
So first symbol can be only MATCH-NON-REP. And if that MATCH-NON-REP symbol
is not END_OF_PAYALOAD_MARKER, then the function doesn't write any byte to dictionary,
the function returns SZ_OK, and the caller can use (p->remainLen) and (p->reps[0]) later.
}
Processing:
The first LZMA symbol will be decoded in any case.
All main checks for limits are at the end of main loop,
It decodes additional LZMA-symbols while (p->buf < bufLimit && dicPos < limit),
RangeCoder is still without last normalization when (p->buf < bufLimit) is being checked.
But if (p->buf < bufLimit), the caller provided at least (LZMA_REQUIRED_INPUT_MAX + 1) bytes for
next iteration before limit (bufLimit + LZMA_REQUIRED_INPUT_MAX),
that is enough for worst case LZMA symbol with one additional RangeCoder normalization for one bit.
So that function never reads bufLimit [LZMA_REQUIRED_INPUT_MAX] byte.
Out:
RangeCoder is normalized
Result:
SZ_OK - OK
p->remainLen:
< kMatchSpecLenStart : the number of bytes to be copied with (p->reps[0]) offset
= kMatchSpecLenStart : the LZMA stream was finished with end mark
SZ_ERROR_DATA - error, when the MATCH-Symbol refers out of dictionary
p->remainLen : undefined
p->reps[*] : undefined
*/
#ifdef Z7_LZMA_DEC_OPT
int Z7_FASTCALL LZMA_DECODE_REAL(CLzmaDec *p, SizeT limit, const Byte *bufLimit);
#else
static
int Z7_FASTCALL LZMA_DECODE_REAL(CLzmaDec *p, SizeT limit, const Byte *bufLimit)
{
CLzmaProb *probs = GET_PROBS;
unsigned state = (unsigned)p->state;
UInt32 rep0 = p->reps[0], rep1 = p->reps[1], rep2 = p->reps[2], rep3 = p->reps[3];
unsigned pbMask = ((unsigned)1 << (p->prop.pb)) - 1;
unsigned lc = p->prop.lc;
unsigned lpMask = ((unsigned)0x100 << p->prop.lp) - ((unsigned)0x100 >> lc);
Byte *dic = p->dic;
SizeT dicBufSize = p->dicBufSize;
SizeT dicPos = p->dicPos;
UInt32 processedPos = p->processedPos;
UInt32 checkDicSize = p->checkDicSize;
unsigned len = 0;
const Byte *buf = p->buf;
UInt32 range = p->range;
UInt32 code = p->code;
do
{
CLzmaProb *prob;
UInt32 bound;
unsigned ttt;
unsigned posState = CALC_POS_STATE(processedPos, pbMask);
prob = probs + IsMatch + COMBINED_PS_STATE;
IF_BIT_0(prob)
{
unsigned symbol;
UPDATE_0(prob)
prob = probs + Literal;
if (processedPos != 0 || checkDicSize != 0)
prob += (UInt32)3 * ((((processedPos << 8) + dic[(dicPos == 0 ? dicBufSize : dicPos) - 1]) & lpMask) << lc);
processedPos++;
if (state < kNumLitStates)
{
state -= (state < 4) ? state : 3;
symbol = 1;
#ifdef Z7_LZMA_SIZE_OPT
do { NORMAL_LITER_DEC } while (symbol < 0x100);
#else
NORMAL_LITER_DEC
NORMAL_LITER_DEC
NORMAL_LITER_DEC
NORMAL_LITER_DEC
NORMAL_LITER_DEC
NORMAL_LITER_DEC
NORMAL_LITER_DEC
NORMAL_LITER_DEC
#endif
}
else
{
unsigned matchByte = dic[dicPos - rep0 + (dicPos < rep0 ? dicBufSize : 0)];
unsigned offs = 0x100;
state -= (state < 10) ? 3 : 6;
symbol = 1;
#ifdef Z7_LZMA_SIZE_OPT
do
{
unsigned bit;
CLzmaProb *probLit;
MATCHED_LITER_DEC
}
while (symbol < 0x100);
#else
{
unsigned bit;
CLzmaProb *probLit;
MATCHED_LITER_DEC
MATCHED_LITER_DEC
MATCHED_LITER_DEC
MATCHED_LITER_DEC
MATCHED_LITER_DEC
MATCHED_LITER_DEC
MATCHED_LITER_DEC
MATCHED_LITER_DEC
}
#endif
}
dic[dicPos++] = (Byte)symbol;
continue;
}
{
UPDATE_1(prob)
prob = probs + IsRep + state;
IF_BIT_0(prob)
{
UPDATE_0(prob)
state += kNumStates;
prob = probs + LenCoder;
}
else
{
UPDATE_1(prob)
prob = probs + IsRepG0 + state;
IF_BIT_0(prob)
{
UPDATE_0(prob)
prob = probs + IsRep0Long + COMBINED_PS_STATE;
IF_BIT_0(prob)
{
UPDATE_0(prob)
// that case was checked before with kBadRepCode
// if (checkDicSize == 0 && processedPos == 0) { len = kMatchSpecLen_Error_Data + 1; break; }
// The caller doesn't allow (dicPos == limit) case here
// so we don't need the following check:
// if (dicPos == limit) { state = state < kNumLitStates ? 9 : 11; len = 1; break; }
dic[dicPos] = dic[dicPos - rep0 + (dicPos < rep0 ? dicBufSize : 0)];
dicPos++;
processedPos++;
state = state < kNumLitStates ? 9 : 11;
continue;
}
UPDATE_1(prob)
}
else
{
UInt32 distance;
UPDATE_1(prob)
prob = probs + IsRepG1 + state;
IF_BIT_0(prob)
{
UPDATE_0(prob)
distance = rep1;
}
else
{
UPDATE_1(prob)
prob = probs + IsRepG2 + state;
IF_BIT_0(prob)
{
UPDATE_0(prob)
distance = rep2;
}
else
{
UPDATE_1(prob)
distance = rep3;
rep3 = rep2;
}
rep2 = rep1;
}
rep1 = rep0;
rep0 = distance;
}
state = state < kNumLitStates ? 8 : 11;
prob = probs + RepLenCoder;
}
#ifdef Z7_LZMA_SIZE_OPT
{
unsigned lim, offset;
CLzmaProb *probLen = prob + LenChoice;
IF_BIT_0(probLen)
{
UPDATE_0(probLen)
probLen = prob + LenLow + GET_LEN_STATE;
offset = 0;
lim = (1 << kLenNumLowBits);
}
else
{
UPDATE_1(probLen)
probLen = prob + LenChoice2;
IF_BIT_0(probLen)
{
UPDATE_0(probLen)
probLen = prob + LenLow + GET_LEN_STATE + (1 << kLenNumLowBits);
offset = kLenNumLowSymbols;
lim = (1 << kLenNumLowBits);
}
else
{
UPDATE_1(probLen)
probLen = prob + LenHigh;
offset = kLenNumLowSymbols * 2;
lim = (1 << kLenNumHighBits);
}
}
TREE_DECODE(probLen, lim, len)
len += offset;
}
#else
{
CLzmaProb *probLen = prob + LenChoice;
IF_BIT_0(probLen)
{
UPDATE_0(probLen)
probLen = prob + LenLow + GET_LEN_STATE;
len = 1;
TREE_GET_BIT(probLen, len)
TREE_GET_BIT(probLen, len)
TREE_GET_BIT(probLen, len)
len -= 8;
}
else
{
UPDATE_1(probLen)
probLen = prob + LenChoice2;
IF_BIT_0(probLen)
{
UPDATE_0(probLen)
probLen = prob + LenLow + GET_LEN_STATE + (1 << kLenNumLowBits);
len = 1;
TREE_GET_BIT(probLen, len)
TREE_GET_BIT(probLen, len)
TREE_GET_BIT(probLen, len)
}
else
{
UPDATE_1(probLen)
probLen = prob + LenHigh;
TREE_DECODE(probLen, (1 << kLenNumHighBits), len)
len += kLenNumLowSymbols * 2;
}
}
}
#endif
if (state >= kNumStates)
{
UInt32 distance;
prob = probs + PosSlot +
((len < kNumLenToPosStates ? len : kNumLenToPosStates - 1) << kNumPosSlotBits);
TREE_6_DECODE(prob, distance)
if (distance >= kStartPosModelIndex)
{
unsigned posSlot = (unsigned)distance;
unsigned numDirectBits = (unsigned)(((distance >> 1) - 1));
distance = (2 | (distance & 1));
if (posSlot < kEndPosModelIndex)
{
distance <<= numDirectBits;
prob = probs + SpecPos;
{
UInt32 m = 1;
distance++;
do
{
REV_BIT_VAR(prob, distance, m)
}
while (--numDirectBits);
distance -= m;
}
}
else
{
numDirectBits -= kNumAlignBits;
do
{
NORMALIZE
range >>= 1;
{
UInt32 t;
code -= range;
t = (0 - ((UInt32)code >> 31)); /* (UInt32)((Int32)code >> 31) */
distance = (distance << 1) + (t + 1);
code += range & t;
}
/*
distance <<= 1;
if (code >= range)
{
code -= range;
distance |= 1;
}
*/
}
while (--numDirectBits);
prob = probs + Align;
distance <<= kNumAlignBits;
{
unsigned i = 1;
REV_BIT_CONST(prob, i, 1)
REV_BIT_CONST(prob, i, 2)
REV_BIT_CONST(prob, i, 4)
REV_BIT_LAST (prob, i, 8)
distance |= i;
}
if (distance == (UInt32)0xFFFFFFFF)
{
len = kMatchSpecLenStart;
state -= kNumStates;
break;
}
}
}
rep3 = rep2;
rep2 = rep1;
rep1 = rep0;
rep0 = distance + 1;
state = (state < kNumStates + kNumLitStates) ? kNumLitStates : kNumLitStates + 3;
if (distance >= (checkDicSize == 0 ? processedPos: checkDicSize))
{
len += kMatchSpecLen_Error_Data + kMatchMinLen;
// len = kMatchSpecLen_Error_Data;
// len += kMatchMinLen;
break;
}
}
len += kMatchMinLen;
{
SizeT rem;
unsigned curLen;
SizeT pos;
if ((rem = limit - dicPos) == 0)
{
/*
We stop decoding and return SZ_OK, and we can resume decoding later.
Any error conditions can be tested later in caller code.
For more strict mode we can stop decoding with error
// len += kMatchSpecLen_Error_Data;
*/
break;
}
curLen = ((rem < len) ? (unsigned)rem : len);
pos = dicPos - rep0 + (dicPos < rep0 ? dicBufSize : 0);
processedPos += (UInt32)curLen;
len -= curLen;
if (curLen <= dicBufSize - pos)
{
Byte *dest = dic + dicPos;
ptrdiff_t src = (ptrdiff_t)pos - (ptrdiff_t)dicPos;
const Byte *lim = dest + curLen;
dicPos += (SizeT)curLen;
do
*(dest) = (Byte)*(dest + src);
while (++dest != lim);
}
else
{
do
{
dic[dicPos++] = dic[pos];
if (++pos == dicBufSize)
pos = 0;
}
while (--curLen != 0);
}
}
}
}
while (dicPos < limit && buf < bufLimit);
NORMALIZE
p->buf = buf;
p->range = range;
p->code = code;
p->remainLen = (UInt32)len; // & (kMatchSpecLen_Error_Data - 1); // we can write real length for error matches too.
p->dicPos = dicPos;
p->processedPos = processedPos;
p->reps[0] = rep0;
p->reps[1] = rep1;
p->reps[2] = rep2;
p->reps[3] = rep3;
p->state = (UInt32)state;
if (len >= kMatchSpecLen_Error_Data)
return SZ_ERROR_DATA;
return SZ_OK;
}
#endif
static void Z7_FASTCALL LzmaDec_WriteRem(CLzmaDec *p, SizeT limit)
{
unsigned len = (unsigned)p->remainLen;
if (len == 0 /* || len >= kMatchSpecLenStart */)
return;
{
SizeT dicPos = p->dicPos;
Byte *dic;
SizeT dicBufSize;
SizeT rep0; /* we use SizeT to avoid the BUG of VC14 for AMD64 */
{
SizeT rem = limit - dicPos;
if (rem < len)
{
len = (unsigned)(rem);
if (len == 0)
return;
}
}
if (p->checkDicSize == 0 && p->prop.dicSize - p->processedPos <= len)
p->checkDicSize = p->prop.dicSize;
p->processedPos += (UInt32)len;
p->remainLen -= (UInt32)len;
dic = p->dic;
rep0 = p->reps[0];
dicBufSize = p->dicBufSize;
do
{
dic[dicPos] = dic[dicPos - rep0 + (dicPos < rep0 ? dicBufSize : 0)];
dicPos++;
}
while (--len);
p->dicPos = dicPos;
}
}
/*
At staring of new stream we have one of the following symbols:
- Literal - is allowed
- Non-Rep-Match - is allowed only if it's end marker symbol
- Rep-Match - is not allowed
We use early check of (RangeCoder:Code) over kBadRepCode to simplify main decoding code
*/
#define kRange0 0xFFFFFFFF
#define kBound0 ((kRange0 >> kNumBitModelTotalBits) << (kNumBitModelTotalBits - 1))
#define kBadRepCode (kBound0 + (((kRange0 - kBound0) >> kNumBitModelTotalBits) << (kNumBitModelTotalBits - 1)))
#if kBadRepCode != (0xC0000000 - 0x400)
#error Stop_Compiling_Bad_LZMA_Check
#endif
/*
LzmaDec_DecodeReal2():
It calls LZMA_DECODE_REAL() and it adjusts limit according (p->checkDicSize).
We correct (p->checkDicSize) after LZMA_DECODE_REAL() and in LzmaDec_WriteRem(),
and we support the following state of (p->checkDicSize):
if (total_processed < p->prop.dicSize) then
{
(total_processed == p->processedPos)
(p->checkDicSize == 0)
}
else
(p->checkDicSize == p->prop.dicSize)
*/
static int Z7_FASTCALL LzmaDec_DecodeReal2(CLzmaDec *p, SizeT limit, const Byte *bufLimit)
{
if (p->checkDicSize == 0)
{
UInt32 rem = p->prop.dicSize - p->processedPos;
if (limit - p->dicPos > rem)
limit = p->dicPos + rem;
}
{
int res = LZMA_DECODE_REAL(p, limit, bufLimit);
if (p->checkDicSize == 0 && p->processedPos >= p->prop.dicSize)
p->checkDicSize = p->prop.dicSize;
return res;
}
}
typedef enum
{
DUMMY_INPUT_EOF, /* need more input data */
DUMMY_LIT,
DUMMY_MATCH,
DUMMY_REP
} ELzmaDummy;
#define IS_DUMMY_END_MARKER_POSSIBLE(dummyRes) ((dummyRes) == DUMMY_MATCH)
static ELzmaDummy LzmaDec_TryDummy(const CLzmaDec *p, const Byte *buf, const Byte **bufOut)
{
UInt32 range = p->range;
UInt32 code = p->code;
const Byte *bufLimit = *bufOut;
const CLzmaProb *probs = GET_PROBS;
unsigned state = (unsigned)p->state;
ELzmaDummy res;
for (;;)
{
const CLzmaProb *prob;
UInt32 bound;
unsigned ttt;
unsigned posState = CALC_POS_STATE(p->processedPos, ((unsigned)1 << p->prop.pb) - 1);
prob = probs + IsMatch + COMBINED_PS_STATE;
IF_BIT_0_CHECK(prob)
{
UPDATE_0_CHECK
prob = probs + Literal;
if (p->checkDicSize != 0 || p->processedPos != 0)
prob += ((UInt32)LZMA_LIT_SIZE *
((((p->processedPos) & (((unsigned)1 << (p->prop.lp)) - 1)) << p->prop.lc) +
((unsigned)p->dic[(p->dicPos == 0 ? p->dicBufSize : p->dicPos) - 1] >> (8 - p->prop.lc))));
if (state < kNumLitStates)
{
unsigned symbol = 1;
do { GET_BIT_CHECK(prob + symbol, symbol) } while (symbol < 0x100);
}
else
{
unsigned matchByte = p->dic[p->dicPos - p->reps[0] +
(p->dicPos < p->reps[0] ? p->dicBufSize : 0)];
unsigned offs = 0x100;
unsigned symbol = 1;
do
{
unsigned bit;
const CLzmaProb *probLit;
matchByte += matchByte;
bit = offs;
offs &= matchByte;
probLit = prob + (offs + bit + symbol);
GET_BIT2_CHECK(probLit, symbol, offs ^= bit; , ; )
}
while (symbol < 0x100);
}
res = DUMMY_LIT;
}
else
{
unsigned len;
UPDATE_1_CHECK
prob = probs + IsRep + state;
IF_BIT_0_CHECK(prob)
{
UPDATE_0_CHECK
state = 0;
prob = probs + LenCoder;
res = DUMMY_MATCH;
}
else
{
UPDATE_1_CHECK
res = DUMMY_REP;
prob = probs + IsRepG0 + state;
IF_BIT_0_CHECK(prob)
{
UPDATE_0_CHECK
prob = probs + IsRep0Long + COMBINED_PS_STATE;
IF_BIT_0_CHECK(prob)
{
UPDATE_0_CHECK
break;
}
else
{
UPDATE_1_CHECK
}
}
else
{
UPDATE_1_CHECK
prob = probs + IsRepG1 + state;
IF_BIT_0_CHECK(prob)
{
UPDATE_0_CHECK
}
else
{
UPDATE_1_CHECK
prob = probs + IsRepG2 + state;
IF_BIT_0_CHECK(prob)
{
UPDATE_0_CHECK
}
else
{
UPDATE_1_CHECK
}
}
}
state = kNumStates;
prob = probs + RepLenCoder;
}
{
unsigned limit, offset;
const CLzmaProb *probLen = prob + LenChoice;
IF_BIT_0_CHECK(probLen)
{
UPDATE_0_CHECK
probLen = prob + LenLow + GET_LEN_STATE;
offset = 0;
limit = 1 << kLenNumLowBits;
}
else
{
UPDATE_1_CHECK
probLen = prob + LenChoice2;
IF_BIT_0_CHECK(probLen)
{
UPDATE_0_CHECK
probLen = prob + LenLow + GET_LEN_STATE + (1 << kLenNumLowBits);
offset = kLenNumLowSymbols;
limit = 1 << kLenNumLowBits;
}
else
{
UPDATE_1_CHECK
probLen = prob + LenHigh;
offset = kLenNumLowSymbols * 2;
limit = 1 << kLenNumHighBits;
}
}
TREE_DECODE_CHECK(probLen, limit, len)
len += offset;
}
if (state < 4)
{
unsigned posSlot;
prob = probs + PosSlot +
((len < kNumLenToPosStates - 1 ? len : kNumLenToPosStates - 1) <<
kNumPosSlotBits);
TREE_DECODE_CHECK(prob, 1 << kNumPosSlotBits, posSlot)
if (posSlot >= kStartPosModelIndex)
{
unsigned numDirectBits = ((posSlot >> 1) - 1);
if (posSlot < kEndPosModelIndex)
{
prob = probs + SpecPos + ((2 | (posSlot & 1)) << numDirectBits);
}
else
{
numDirectBits -= kNumAlignBits;
do
{
NORMALIZE_CHECK
range >>= 1;
code -= range & (((code - range) >> 31) - 1);
/* if (code >= range) code -= range; */
}
while (--numDirectBits);
prob = probs + Align;
numDirectBits = kNumAlignBits;
}
{
unsigned i = 1;
unsigned m = 1;
do
{
REV_BIT_CHECK(prob, i, m)
}
while (--numDirectBits);
}
}
}
}
break;
}
NORMALIZE_CHECK
*bufOut = buf;
return res;
}
void LzmaDec_InitDicAndState(CLzmaDec *p, BoolInt initDic, BoolInt initState);
void LzmaDec_InitDicAndState(CLzmaDec *p, BoolInt initDic, BoolInt initState)
{
p->remainLen = kMatchSpecLenStart + 1;
p->tempBufSize = 0;
if (initDic)
{
p->processedPos = 0;
p->checkDicSize = 0;
p->remainLen = kMatchSpecLenStart + 2;
}
if (initState)
p->remainLen = kMatchSpecLenStart + 2;
}
void LzmaDec_Init(CLzmaDec *p)
{
p->dicPos = 0;
LzmaDec_InitDicAndState(p, True, True);
}
/*
LZMA supports optional end_marker.
So the decoder can lookahead for one additional LZMA-Symbol to check end_marker.
That additional LZMA-Symbol can require up to LZMA_REQUIRED_INPUT_MAX bytes in input stream.
When the decoder reaches dicLimit, it looks (finishMode) parameter:
if (finishMode == LZMA_FINISH_ANY), the decoder doesn't lookahead
if (finishMode != LZMA_FINISH_ANY), the decoder lookahead, if end_marker is possible for current position
When the decoder lookahead, and the lookahead symbol is not end_marker, we have two ways:
1) Strict mode (default) : the decoder returns SZ_ERROR_DATA.
2) The relaxed mode (alternative mode) : we could return SZ_OK, and the caller
must check (status) value. The caller can show the error,
if the end of stream is expected, and the (status) is noit
LZMA_STATUS_FINISHED_WITH_MARK or LZMA_STATUS_MAYBE_FINISHED_WITHOUT_MARK.
*/
#define RETURN_NOT_FINISHED_FOR_FINISH \
*status = LZMA_STATUS_NOT_FINISHED; \
return SZ_ERROR_DATA; // for strict mode
// return SZ_OK; // for relaxed mode
SRes LzmaDec_DecodeToDic(CLzmaDec *p, SizeT dicLimit, const Byte *src, SizeT *srcLen,
ELzmaFinishMode finishMode, ELzmaStatus *status)
{
SizeT inSize = *srcLen;
(*srcLen) = 0;
*status = LZMA_STATUS_NOT_SPECIFIED;
if (p->remainLen > kMatchSpecLenStart)
{
if (p->remainLen > kMatchSpecLenStart + 2)
return p->remainLen == kMatchSpecLen_Error_Fail ? SZ_ERROR_FAIL : SZ_ERROR_DATA;
for (; inSize > 0 && p->tempBufSize < RC_INIT_SIZE; (*srcLen)++, inSize--)
p->tempBuf[p->tempBufSize++] = *src++;
if (p->tempBufSize != 0 && p->tempBuf[0] != 0)
return SZ_ERROR_DATA;
if (p->tempBufSize < RC_INIT_SIZE)
{
*status = LZMA_STATUS_NEEDS_MORE_INPUT;
return SZ_OK;
}
p->code =
((UInt32)p->tempBuf[1] << 24)
| ((UInt32)p->tempBuf[2] << 16)
| ((UInt32)p->tempBuf[3] << 8)
| ((UInt32)p->tempBuf[4]);
if (p->checkDicSize == 0
&& p->processedPos == 0
&& p->code >= kBadRepCode)
return SZ_ERROR_DATA;
p->range = 0xFFFFFFFF;
p->tempBufSize = 0;
if (p->remainLen > kMatchSpecLenStart + 1)
{
SizeT numProbs = LzmaProps_GetNumProbs(&p->prop);
SizeT i;
CLzmaProb *probs = p->probs;
for (i = 0; i < numProbs; i++)
probs[i] = kBitModelTotal >> 1;
p->reps[0] = p->reps[1] = p->reps[2] = p->reps[3] = 1;
p->state = 0;
}
p->remainLen = 0;
}
for (;;)
{
if (p->remainLen == kMatchSpecLenStart)
{
if (p->code != 0)
return SZ_ERROR_DATA;
*status = LZMA_STATUS_FINISHED_WITH_MARK;
return SZ_OK;
}
LzmaDec_WriteRem(p, dicLimit);
{
// (p->remainLen == 0 || p->dicPos == dicLimit)
int checkEndMarkNow = 0;
if (p->dicPos >= dicLimit)
{
if (p->remainLen == 0 && p->code == 0)
{
*status = LZMA_STATUS_MAYBE_FINISHED_WITHOUT_MARK;
return SZ_OK;
}
if (finishMode == LZMA_FINISH_ANY)
{
*status = LZMA_STATUS_NOT_FINISHED;
return SZ_OK;
}
if (p->remainLen != 0)
{
RETURN_NOT_FINISHED_FOR_FINISH
}
checkEndMarkNow = 1;
}
// (p->remainLen == 0)
if (p->tempBufSize == 0)
{
const Byte *bufLimit;
int dummyProcessed = -1;
if (inSize < LZMA_REQUIRED_INPUT_MAX || checkEndMarkNow)
{
const Byte *bufOut = src + inSize;
ELzmaDummy dummyRes = LzmaDec_TryDummy(p, src, &bufOut);
if (dummyRes == DUMMY_INPUT_EOF)
{
size_t i;
if (inSize >= LZMA_REQUIRED_INPUT_MAX)
break;
(*srcLen) += inSize;
p->tempBufSize = (unsigned)inSize;
for (i = 0; i < inSize; i++)
p->tempBuf[i] = src[i];
*status = LZMA_STATUS_NEEDS_MORE_INPUT;
return SZ_OK;
}
dummyProcessed = (int)(bufOut - src);
if ((unsigned)dummyProcessed > LZMA_REQUIRED_INPUT_MAX)
break;
if (checkEndMarkNow && !IS_DUMMY_END_MARKER_POSSIBLE(dummyRes))
{
unsigned i;
(*srcLen) += (unsigned)dummyProcessed;
p->tempBufSize = (unsigned)dummyProcessed;
for (i = 0; i < (unsigned)dummyProcessed; i++)
p->tempBuf[i] = src[i];
// p->remainLen = kMatchSpecLen_Error_Data;
RETURN_NOT_FINISHED_FOR_FINISH
}
bufLimit = src;
// we will decode only one iteration
}
else
bufLimit = src + inSize - LZMA_REQUIRED_INPUT_MAX;
p->buf = src;
{
int res = LzmaDec_DecodeReal2(p, dicLimit, bufLimit);
SizeT processed = (SizeT)(p->buf - src);
if (dummyProcessed < 0)
{
if (processed > inSize)
break;
}
else if ((unsigned)dummyProcessed != processed)
break;
src += processed;
inSize -= processed;
(*srcLen) += processed;
if (res != SZ_OK)
{
p->remainLen = kMatchSpecLen_Error_Data;
return SZ_ERROR_DATA;
}
}
continue;
}
{
// we have some data in (p->tempBuf)
// in strict mode: tempBufSize is not enough for one Symbol decoding.
// in relaxed mode: tempBufSize not larger than required for one Symbol decoding.
unsigned rem = p->tempBufSize;
unsigned ahead = 0;
int dummyProcessed = -1;
while (rem < LZMA_REQUIRED_INPUT_MAX && ahead < inSize)
p->tempBuf[rem++] = src[ahead++];
// ahead - the size of new data copied from (src) to (p->tempBuf)
// rem - the size of temp buffer including new data from (src)
if (rem < LZMA_REQUIRED_INPUT_MAX || checkEndMarkNow)
{
const Byte *bufOut = p->tempBuf + rem;
ELzmaDummy dummyRes = LzmaDec_TryDummy(p, p->tempBuf, &bufOut);
if (dummyRes == DUMMY_INPUT_EOF)
{
if (rem >= LZMA_REQUIRED_INPUT_MAX)
break;
p->tempBufSize = rem;
(*srcLen) += (SizeT)ahead;
*status = LZMA_STATUS_NEEDS_MORE_INPUT;
return SZ_OK;
}
dummyProcessed = (int)(bufOut - p->tempBuf);
if ((unsigned)dummyProcessed < p->tempBufSize)
break;
if (checkEndMarkNow && !IS_DUMMY_END_MARKER_POSSIBLE(dummyRes))
{
(*srcLen) += (unsigned)dummyProcessed - p->tempBufSize;
p->tempBufSize = (unsigned)dummyProcessed;
// p->remainLen = kMatchSpecLen_Error_Data;
RETURN_NOT_FINISHED_FOR_FINISH
}
}
p->buf = p->tempBuf;
{
// we decode one symbol from (p->tempBuf) here, so the (bufLimit) is equal to (p->buf)
int res = LzmaDec_DecodeReal2(p, dicLimit, p->buf);
SizeT processed = (SizeT)(p->buf - p->tempBuf);
rem = p->tempBufSize;
if (dummyProcessed < 0)
{
if (processed > LZMA_REQUIRED_INPUT_MAX)
break;
if (processed < rem)
break;
}
else if ((unsigned)dummyProcessed != processed)
break;
processed -= rem;
src += processed;
inSize -= processed;
(*srcLen) += processed;
p->tempBufSize = 0;
if (res != SZ_OK)
{
p->remainLen = kMatchSpecLen_Error_Data;
return SZ_ERROR_DATA;
}
}
}
}
}
/* Some unexpected error: internal error of code, memory corruption or hardware failure */
p->remainLen = kMatchSpecLen_Error_Fail;
return SZ_ERROR_FAIL;
}
SRes LzmaDec_DecodeToBuf(CLzmaDec *p, Byte *dest, SizeT *destLen, const Byte *src, SizeT *srcLen, ELzmaFinishMode finishMode, ELzmaStatus *status)
{
SizeT outSize = *destLen;
SizeT inSize = *srcLen;
*srcLen = *destLen = 0;
for (;;)
{
SizeT inSizeCur = inSize, outSizeCur, dicPos;
ELzmaFinishMode curFinishMode;
SRes res;
if (p->dicPos == p->dicBufSize)
p->dicPos = 0;
dicPos = p->dicPos;
if (outSize > p->dicBufSize - dicPos)
{
outSizeCur = p->dicBufSize;
curFinishMode = LZMA_FINISH_ANY;
}
else
{
outSizeCur = dicPos + outSize;
curFinishMode = finishMode;
}
res = LzmaDec_DecodeToDic(p, outSizeCur, src, &inSizeCur, curFinishMode, status);
src += inSizeCur;
inSize -= inSizeCur;
*srcLen += inSizeCur;
outSizeCur = p->dicPos - dicPos;
memcpy(dest, p->dic + dicPos, outSizeCur);
dest += outSizeCur;
outSize -= outSizeCur;
*destLen += outSizeCur;
if (res != 0)
return res;
if (outSizeCur == 0 || outSize == 0)
return SZ_OK;
}
}
void LzmaDec_FreeProbs(CLzmaDec *p, ISzAllocPtr alloc)
{
ISzAlloc_Free(alloc, p->probs);
p->probs = NULL;
}
static void LzmaDec_FreeDict(CLzmaDec *p, ISzAllocPtr alloc)
{
ISzAlloc_Free(alloc, p->dic);
p->dic = NULL;
}
void LzmaDec_Free(CLzmaDec *p, ISzAllocPtr alloc)
{
LzmaDec_FreeProbs(p, alloc);
LzmaDec_FreeDict(p, alloc);
}
SRes LzmaProps_Decode(CLzmaProps *p, const Byte *data, unsigned size)
{
UInt32 dicSize;
Byte d;
if (size < LZMA_PROPS_SIZE)
return SZ_ERROR_UNSUPPORTED;
else
dicSize = data[1] | ((UInt32)data[2] << 8) | ((UInt32)data[3] << 16) | ((UInt32)data[4] << 24);
if (dicSize < LZMA_DIC_MIN)
dicSize = LZMA_DIC_MIN;
p->dicSize = dicSize;
d = data[0];
if (d >= (9 * 5 * 5))
return SZ_ERROR_UNSUPPORTED;
p->lc = (Byte)(d % 9);
d /= 9;
p->pb = (Byte)(d / 5);
p->lp = (Byte)(d % 5);
return SZ_OK;
}
static SRes LzmaDec_AllocateProbs2(CLzmaDec *p, const CLzmaProps *propNew, ISzAllocPtr alloc)
{
UInt32 numProbs = LzmaProps_GetNumProbs(propNew);
if (!p->probs || numProbs != p->numProbs)
{
LzmaDec_FreeProbs(p, alloc);
p->probs = (CLzmaProb *)ISzAlloc_Alloc(alloc, numProbs * sizeof(CLzmaProb));
if (!p->probs)
return SZ_ERROR_MEM;
p->probs_1664 = p->probs + 1664;
p->numProbs = numProbs;
}
return SZ_OK;
}
SRes LzmaDec_AllocateProbs(CLzmaDec *p, const Byte *props, unsigned propsSize, ISzAllocPtr alloc)
{
CLzmaProps propNew;
RINOK(LzmaProps_Decode(&propNew, props, propsSize))
RINOK(LzmaDec_AllocateProbs2(p, &propNew, alloc))
p->prop = propNew;
return SZ_OK;
}
SRes LzmaDec_Allocate(CLzmaDec *p, const Byte *props, unsigned propsSize, ISzAllocPtr alloc)
{
CLzmaProps propNew;
SizeT dicBufSize;
RINOK(LzmaProps_Decode(&propNew, props, propsSize))
RINOK(LzmaDec_AllocateProbs2(p, &propNew, alloc))
{
UInt32 dictSize = propNew.dicSize;
SizeT mask = ((UInt32)1 << 12) - 1;
if (dictSize >= ((UInt32)1 << 30)) mask = ((UInt32)1 << 22) - 1;
else if (dictSize >= ((UInt32)1 << 22)) mask = ((UInt32)1 << 20) - 1;
dicBufSize = ((SizeT)dictSize + mask) & ~mask;
if (dicBufSize < dictSize)
dicBufSize = dictSize;
}
if (!p->dic || dicBufSize != p->dicBufSize)
{
LzmaDec_FreeDict(p, alloc);
p->dic = (Byte *)ISzAlloc_Alloc(alloc, dicBufSize);
if (!p->dic)
{
LzmaDec_FreeProbs(p, alloc);
return SZ_ERROR_MEM;
}
}
p->dicBufSize = dicBufSize;
p->prop = propNew;
return SZ_OK;
}
SRes LzmaDecode(Byte *dest, SizeT *destLen, const Byte *src, SizeT *srcLen,
const Byte *propData, unsigned propSize, ELzmaFinishMode finishMode,
ELzmaStatus *status, ISzAllocPtr alloc)
{
CLzmaDec p;
SRes res;
SizeT outSize = *destLen, inSize = *srcLen;
*destLen = *srcLen = 0;
*status = LZMA_STATUS_NOT_SPECIFIED;
if (inSize < RC_INIT_SIZE)
return SZ_ERROR_INPUT_EOF;
LzmaDec_CONSTRUCT(&p)
RINOK(LzmaDec_AllocateProbs(&p, propData, propSize, alloc))
p.dic = dest;
p.dicBufSize = outSize;
LzmaDec_Init(&p);
*srcLen = inSize;
res = LzmaDec_DecodeToDic(&p, outSize, src, srcLen, finishMode, status);
*destLen = p.dicPos;
if (res == SZ_OK && *status == LZMA_STATUS_NEEDS_MORE_INPUT)
res = SZ_ERROR_INPUT_EOF;
LzmaDec_FreeProbs(&p, alloc);
return res;
}

237
extern/lzma/LzmaDec.h vendored
View File

@@ -1,237 +0,0 @@
/* LzmaDec.h -- LZMA Decoder
2023-04-02 : Igor Pavlov : Public domain */
#ifndef ZIP7_INC_LZMA_DEC_H
#define ZIP7_INC_LZMA_DEC_H
#include "7zTypes.h"
EXTERN_C_BEGIN
/* #define Z7_LZMA_PROB32 */
/* Z7_LZMA_PROB32 can increase the speed on some CPUs,
but memory usage for CLzmaDec::probs will be doubled in that case */
typedef
#ifdef Z7_LZMA_PROB32
UInt32
#else
UInt16
#endif
CLzmaProb;
/* ---------- LZMA Properties ---------- */
#define LZMA_PROPS_SIZE 5
typedef struct
{
Byte lc;
Byte lp;
Byte pb;
Byte _pad_;
UInt32 dicSize;
} CLzmaProps;
/* LzmaProps_Decode - decodes properties
Returns:
SZ_OK
SZ_ERROR_UNSUPPORTED - Unsupported properties
*/
SRes LzmaProps_Decode(CLzmaProps *p, const Byte *data, unsigned size);
/* ---------- LZMA Decoder state ---------- */
/* LZMA_REQUIRED_INPUT_MAX = number of required input bytes for worst case.
Num bits = log2((2^11 / 31) ^ 22) + 26 < 134 + 26 = 160; */
#define LZMA_REQUIRED_INPUT_MAX 20
typedef struct
{
/* Don't change this structure. ASM code can use it. */
CLzmaProps prop;
CLzmaProb *probs;
CLzmaProb *probs_1664;
Byte *dic;
SizeT dicBufSize;
SizeT dicPos;
const Byte *buf;
UInt32 range;
UInt32 code;
UInt32 processedPos;
UInt32 checkDicSize;
UInt32 reps[4];
UInt32 state;
UInt32 remainLen;
UInt32 numProbs;
unsigned tempBufSize;
Byte tempBuf[LZMA_REQUIRED_INPUT_MAX];
} CLzmaDec;
#define LzmaDec_CONSTRUCT(p) { (p)->dic = NULL; (p)->probs = NULL; }
#define LzmaDec_Construct(p) LzmaDec_CONSTRUCT(p)
void LzmaDec_Init(CLzmaDec *p);
/* There are two types of LZMA streams:
- Stream with end mark. That end mark adds about 6 bytes to compressed size.
- Stream without end mark. You must know exact uncompressed size to decompress such stream. */
typedef enum
{
LZMA_FINISH_ANY, /* finish at any point */
LZMA_FINISH_END /* block must be finished at the end */
} ELzmaFinishMode;
/* ELzmaFinishMode has meaning only if the decoding reaches output limit !!!
You must use LZMA_FINISH_END, when you know that current output buffer
covers last bytes of block. In other cases you must use LZMA_FINISH_ANY.
If LZMA decoder sees end marker before reaching output limit, it returns SZ_OK,
and output value of destLen will be less than output buffer size limit.
You can check status result also.
You can use multiple checks to test data integrity after full decompression:
1) Check Result and "status" variable.
2) Check that output(destLen) = uncompressedSize, if you know real uncompressedSize.
3) Check that output(srcLen) = compressedSize, if you know real compressedSize.
You must use correct finish mode in that case. */
typedef enum
{
LZMA_STATUS_NOT_SPECIFIED, /* use main error code instead */
LZMA_STATUS_FINISHED_WITH_MARK, /* stream was finished with end mark. */
LZMA_STATUS_NOT_FINISHED, /* stream was not finished */
LZMA_STATUS_NEEDS_MORE_INPUT, /* you must provide more input bytes */
LZMA_STATUS_MAYBE_FINISHED_WITHOUT_MARK /* there is probability that stream was finished without end mark */
} ELzmaStatus;
/* ELzmaStatus is used only as output value for function call */
/* ---------- Interfaces ---------- */
/* There are 3 levels of interfaces:
1) Dictionary Interface
2) Buffer Interface
3) One Call Interface
You can select any of these interfaces, but don't mix functions from different
groups for same object. */
/* There are two variants to allocate state for Dictionary Interface:
1) LzmaDec_Allocate / LzmaDec_Free
2) LzmaDec_AllocateProbs / LzmaDec_FreeProbs
You can use variant 2, if you set dictionary buffer manually.
For Buffer Interface you must always use variant 1.
LzmaDec_Allocate* can return:
SZ_OK
SZ_ERROR_MEM - Memory allocation error
SZ_ERROR_UNSUPPORTED - Unsupported properties
*/
SRes LzmaDec_AllocateProbs(CLzmaDec *p, const Byte *props, unsigned propsSize, ISzAllocPtr alloc);
void LzmaDec_FreeProbs(CLzmaDec *p, ISzAllocPtr alloc);
SRes LzmaDec_Allocate(CLzmaDec *p, const Byte *props, unsigned propsSize, ISzAllocPtr alloc);
void LzmaDec_Free(CLzmaDec *p, ISzAllocPtr alloc);
/* ---------- Dictionary Interface ---------- */
/* You can use it, if you want to eliminate the overhead for data copying from
dictionary to some other external buffer.
You must work with CLzmaDec variables directly in this interface.
STEPS:
LzmaDec_Construct()
LzmaDec_Allocate()
for (each new stream)
{
LzmaDec_Init()
while (it needs more decompression)
{
LzmaDec_DecodeToDic()
use data from CLzmaDec::dic and update CLzmaDec::dicPos
}
}
LzmaDec_Free()
*/
/* LzmaDec_DecodeToDic
The decoding to internal dictionary buffer (CLzmaDec::dic).
You must manually update CLzmaDec::dicPos, if it reaches CLzmaDec::dicBufSize !!!
finishMode:
It has meaning only if the decoding reaches output limit (dicLimit).
LZMA_FINISH_ANY - Decode just dicLimit bytes.
LZMA_FINISH_END - Stream must be finished after dicLimit.
Returns:
SZ_OK
status:
LZMA_STATUS_FINISHED_WITH_MARK
LZMA_STATUS_NOT_FINISHED
LZMA_STATUS_NEEDS_MORE_INPUT
LZMA_STATUS_MAYBE_FINISHED_WITHOUT_MARK
SZ_ERROR_DATA - Data error
SZ_ERROR_FAIL - Some unexpected error: internal error of code, memory corruption or hardware failure
*/
SRes LzmaDec_DecodeToDic(CLzmaDec *p, SizeT dicLimit,
const Byte *src, SizeT *srcLen, ELzmaFinishMode finishMode, ELzmaStatus *status);
/* ---------- Buffer Interface ---------- */
/* It's zlib-like interface.
See LzmaDec_DecodeToDic description for information about STEPS and return results,
but you must use LzmaDec_DecodeToBuf instead of LzmaDec_DecodeToDic and you don't need
to work with CLzmaDec variables manually.
finishMode:
It has meaning only if the decoding reaches output limit (*destLen).
LZMA_FINISH_ANY - Decode just destLen bytes.
LZMA_FINISH_END - Stream must be finished after (*destLen).
*/
SRes LzmaDec_DecodeToBuf(CLzmaDec *p, Byte *dest, SizeT *destLen,
const Byte *src, SizeT *srcLen, ELzmaFinishMode finishMode, ELzmaStatus *status);
/* ---------- One Call Interface ---------- */
/* LzmaDecode
finishMode:
It has meaning only if the decoding reaches output limit (*destLen).
LZMA_FINISH_ANY - Decode just destLen bytes.
LZMA_FINISH_END - Stream must be finished after (*destLen).
Returns:
SZ_OK
status:
LZMA_STATUS_FINISHED_WITH_MARK
LZMA_STATUS_NOT_FINISHED
LZMA_STATUS_MAYBE_FINISHED_WITHOUT_MARK
SZ_ERROR_DATA - Data error
SZ_ERROR_MEM - Memory allocation error
SZ_ERROR_UNSUPPORTED - Unsupported properties
SZ_ERROR_INPUT_EOF - It needs more bytes in input buffer (src).
SZ_ERROR_FAIL - Some unexpected error: internal error of code, memory corruption or hardware failure
*/
SRes LzmaDecode(Byte *dest, SizeT *destLen, const Byte *src, SizeT *srcLen,
const Byte *propData, unsigned propSize, ELzmaFinishMode finishMode,
ELzmaStatus *status, ISzAllocPtr alloc);
EXTERN_C_END
#endif

3144
extern/lzma/LzmaEnc.c vendored
View File

@@ -1,3144 +0,0 @@
/* LzmaEnc.c -- LZMA Encoder
2023-04-13: Igor Pavlov : Public domain */
#include "Precomp.h"
#include <string.h>
/* #define SHOW_STAT */
/* #define SHOW_STAT2 */
#if defined(SHOW_STAT) || defined(SHOW_STAT2)
#include <stdio.h>
#endif
#include "CpuArch.h"
#include "LzmaEnc.h"
#include "LzFind.h"
#ifndef Z7_ST
#include "LzFindMt.h"
#endif
/* the following LzmaEnc_* declarations is internal LZMA interface for LZMA2 encoder */
SRes LzmaEnc_PrepareForLzma2(CLzmaEncHandle p, ISeqInStreamPtr inStream, UInt32 keepWindowSize,
ISzAllocPtr alloc, ISzAllocPtr allocBig);
SRes LzmaEnc_MemPrepare(CLzmaEncHandle p, const Byte *src, SizeT srcLen,
UInt32 keepWindowSize, ISzAllocPtr alloc, ISzAllocPtr allocBig);
SRes LzmaEnc_CodeOneMemBlock(CLzmaEncHandle p, BoolInt reInit,
Byte *dest, size_t *destLen, UInt32 desiredPackSize, UInt32 *unpackSize);
const Byte *LzmaEnc_GetCurBuf(CLzmaEncHandle p);
void LzmaEnc_Finish(CLzmaEncHandle p);
void LzmaEnc_SaveState(CLzmaEncHandle p);
void LzmaEnc_RestoreState(CLzmaEncHandle p);
#ifdef SHOW_STAT
static unsigned g_STAT_OFFSET = 0;
#endif
/* for good normalization speed we still reserve 256 MB before 4 GB range */
#define kLzmaMaxHistorySize ((UInt32)15 << 28)
// #define kNumTopBits 24
#define kTopValue ((UInt32)1 << 24)
#define kNumBitModelTotalBits 11
#define kBitModelTotal (1 << kNumBitModelTotalBits)
#define kNumMoveBits 5
#define kProbInitValue (kBitModelTotal >> 1)
#define kNumMoveReducingBits 4
#define kNumBitPriceShiftBits 4
// #define kBitPrice (1 << kNumBitPriceShiftBits)
#define REP_LEN_COUNT 64
void LzmaEncProps_Init(CLzmaEncProps *p)
{
p->level = 5;
p->dictSize = p->mc = 0;
p->reduceSize = (UInt64)(Int64)-1;
p->lc = p->lp = p->pb = p->algo = p->fb = p->btMode = p->numHashBytes = p->numThreads = -1;
p->numHashOutBits = 0;
p->writeEndMark = 0;
p->affinity = 0;
}
void LzmaEncProps_Normalize(CLzmaEncProps *p)
{
int level = p->level;
if (level < 0) level = 5;
p->level = level;
if (p->dictSize == 0)
p->dictSize =
( level <= 3 ? ((UInt32)1 << (level * 2 + 16)) :
( level <= 6 ? ((UInt32)1 << (level + 19)) :
( level <= 7 ? ((UInt32)1 << 25) : ((UInt32)1 << 26)
)));
if (p->dictSize > p->reduceSize)
{
UInt32 v = (UInt32)p->reduceSize;
const UInt32 kReduceMin = ((UInt32)1 << 12);
if (v < kReduceMin)
v = kReduceMin;
if (p->dictSize > v)
p->dictSize = v;
}
if (p->lc < 0) p->lc = 3;
if (p->lp < 0) p->lp = 0;
if (p->pb < 0) p->pb = 2;
if (p->algo < 0) p->algo = (level < 5 ? 0 : 1);
if (p->fb < 0) p->fb = (level < 7 ? 32 : 64);
if (p->btMode < 0) p->btMode = (p->algo == 0 ? 0 : 1);
if (p->numHashBytes < 0) p->numHashBytes = (p->btMode ? 4 : 5);
if (p->mc == 0) p->mc = (16 + ((unsigned)p->fb >> 1)) >> (p->btMode ? 0 : 1);
if (p->numThreads < 0)
p->numThreads =
#ifndef Z7_ST
((p->btMode && p->algo) ? 2 : 1);
#else
1;
#endif
}
UInt32 LzmaEncProps_GetDictSize(const CLzmaEncProps *props2)
{
CLzmaEncProps props = *props2;
LzmaEncProps_Normalize(&props);
return props.dictSize;
}
/*
x86/x64:
BSR:
IF (SRC == 0) ZF = 1, DEST is undefined;
AMD : DEST is unchanged;
IF (SRC != 0) ZF = 0; DEST is index of top non-zero bit
BSR is slow in some processors
LZCNT:
IF (SRC == 0) CF = 1, DEST is size_in_bits_of_register(src) (32 or 64)
IF (SRC != 0) CF = 0, DEST = num_lead_zero_bits
IF (DEST == 0) ZF = 1;
LZCNT works only in new processors starting from Haswell.
if LZCNT is not supported by processor, then it's executed as BSR.
LZCNT can be faster than BSR, if supported.
*/
// #define LZMA_LOG_BSR
#if defined(MY_CPU_ARM_OR_ARM64) /* || defined(MY_CPU_X86_OR_AMD64) */
#if (defined(__clang__) && (__clang_major__ >= 6)) \
|| (defined(__GNUC__) && (__GNUC__ >= 6))
#define LZMA_LOG_BSR
#elif defined(_MSC_VER) && (_MSC_VER >= 1300)
// #if defined(MY_CPU_ARM_OR_ARM64)
#define LZMA_LOG_BSR
// #endif
#endif
#endif
// #include <intrin.h>
#ifdef LZMA_LOG_BSR
#if defined(__clang__) \
|| defined(__GNUC__)
/*
C code: : (30 - __builtin_clz(x))
gcc9/gcc10 for x64 /x86 : 30 - (bsr(x) xor 31)
clang10 for x64 : 31 + (bsr(x) xor -32)
*/
#define MY_clz(x) ((unsigned)__builtin_clz(x))
// __lzcnt32
// __builtin_ia32_lzcnt_u32
#else // #if defined(_MSC_VER)
#ifdef MY_CPU_ARM_OR_ARM64
#define MY_clz _CountLeadingZeros
#else // if defined(MY_CPU_X86_OR_AMD64)
// #define MY_clz __lzcnt // we can use lzcnt (unsupported by old CPU)
// _BitScanReverse code is not optimal for some MSVC compilers
#define BSR2_RET(pos, res) { unsigned long zz; _BitScanReverse(&zz, (pos)); zz--; \
res = (zz + zz) + (pos >> zz); }
#endif // MY_CPU_X86_OR_AMD64
#endif // _MSC_VER
#ifndef BSR2_RET
#define BSR2_RET(pos, res) { unsigned zz = 30 - MY_clz(pos); \
res = (zz + zz) + (pos >> zz); }
#endif
unsigned GetPosSlot1(UInt32 pos);
unsigned GetPosSlot1(UInt32 pos)
{
unsigned res;
BSR2_RET(pos, res);
return res;
}
#define GetPosSlot2(pos, res) { BSR2_RET(pos, res); }
#define GetPosSlot(pos, res) { if (pos < 2) res = pos; else BSR2_RET(pos, res); }
#else // ! LZMA_LOG_BSR
#define kNumLogBits (11 + sizeof(size_t) / 8 * 3)
#define kDicLogSizeMaxCompress ((kNumLogBits - 1) * 2 + 7)
static void LzmaEnc_FastPosInit(Byte *g_FastPos)
{
unsigned slot;
g_FastPos[0] = 0;
g_FastPos[1] = 1;
g_FastPos += 2;
for (slot = 2; slot < kNumLogBits * 2; slot++)
{
size_t k = ((size_t)1 << ((slot >> 1) - 1));
size_t j;
for (j = 0; j < k; j++)
g_FastPos[j] = (Byte)slot;
g_FastPos += k;
}
}
/* we can use ((limit - pos) >> 31) only if (pos < ((UInt32)1 << 31)) */
/*
#define BSR2_RET(pos, res) { unsigned zz = 6 + ((kNumLogBits - 1) & \
(0 - (((((UInt32)1 << (kNumLogBits + 6)) - 1) - pos) >> 31))); \
res = p->g_FastPos[pos >> zz] + (zz * 2); }
*/
/*
#define BSR2_RET(pos, res) { unsigned zz = 6 + ((kNumLogBits - 1) & \
(0 - (((((UInt32)1 << (kNumLogBits)) - 1) - (pos >> 6)) >> 31))); \
res = p->g_FastPos[pos >> zz] + (zz * 2); }
*/
#define BSR2_RET(pos, res) { unsigned zz = (pos < (1 << (kNumLogBits + 6))) ? 6 : 6 + kNumLogBits - 1; \
res = p->g_FastPos[pos >> zz] + (zz * 2); }
/*
#define BSR2_RET(pos, res) { res = (pos < (1 << (kNumLogBits + 6))) ? \
p->g_FastPos[pos >> 6] + 12 : \
p->g_FastPos[pos >> (6 + kNumLogBits - 1)] + (6 + (kNumLogBits - 1)) * 2; }
*/
#define GetPosSlot1(pos) p->g_FastPos[pos]
#define GetPosSlot2(pos, res) { BSR2_RET(pos, res); }
#define GetPosSlot(pos, res) { if (pos < kNumFullDistances) res = p->g_FastPos[pos & (kNumFullDistances - 1)]; else BSR2_RET(pos, res); }
#endif // LZMA_LOG_BSR
#define LZMA_NUM_REPS 4
typedef UInt16 CState;
typedef UInt16 CExtra;
typedef struct
{
UInt32 price;
CState state;
CExtra extra;
// 0 : normal
// 1 : LIT : MATCH
// > 1 : MATCH (extra-1) : LIT : REP0 (len)
UInt32 len;
UInt32 dist;
UInt32 reps[LZMA_NUM_REPS];
} COptimal;
// 18.06
#define kNumOpts (1 << 11)
#define kPackReserve (kNumOpts * 8)
// #define kNumOpts (1 << 12)
// #define kPackReserve (1 + kNumOpts * 2)
#define kNumLenToPosStates 4
#define kNumPosSlotBits 6
// #define kDicLogSizeMin 0
#define kDicLogSizeMax 32
#define kDistTableSizeMax (kDicLogSizeMax * 2)
#define kNumAlignBits 4
#define kAlignTableSize (1 << kNumAlignBits)
#define kAlignMask (kAlignTableSize - 1)
#define kStartPosModelIndex 4
#define kEndPosModelIndex 14
#define kNumFullDistances (1 << (kEndPosModelIndex >> 1))
typedef
#ifdef Z7_LZMA_PROB32
UInt32
#else
UInt16
#endif
CLzmaProb;
#define LZMA_PB_MAX 4
#define LZMA_LC_MAX 8
#define LZMA_LP_MAX 4
#define LZMA_NUM_PB_STATES_MAX (1 << LZMA_PB_MAX)
#define kLenNumLowBits 3
#define kLenNumLowSymbols (1 << kLenNumLowBits)
#define kLenNumHighBits 8
#define kLenNumHighSymbols (1 << kLenNumHighBits)
#define kLenNumSymbolsTotal (kLenNumLowSymbols * 2 + kLenNumHighSymbols)
#define LZMA_MATCH_LEN_MIN 2
#define LZMA_MATCH_LEN_MAX (LZMA_MATCH_LEN_MIN + kLenNumSymbolsTotal - 1)
#define kNumStates 12
typedef struct
{
CLzmaProb low[LZMA_NUM_PB_STATES_MAX << (kLenNumLowBits + 1)];
CLzmaProb high[kLenNumHighSymbols];
} CLenEnc;
typedef struct
{
unsigned tableSize;
UInt32 prices[LZMA_NUM_PB_STATES_MAX][kLenNumSymbolsTotal];
// UInt32 prices1[LZMA_NUM_PB_STATES_MAX][kLenNumLowSymbols * 2];
// UInt32 prices2[kLenNumSymbolsTotal];
} CLenPriceEnc;
#define GET_PRICE_LEN(p, posState, len) \
((p)->prices[posState][(size_t)(len) - LZMA_MATCH_LEN_MIN])
/*
#define GET_PRICE_LEN(p, posState, len) \
((p)->prices2[(size_t)(len) - 2] + ((p)->prices1[posState][((len) - 2) & (kLenNumLowSymbols * 2 - 1)] & (((len) - 2 - kLenNumLowSymbols * 2) >> 9)))
*/
typedef struct
{
UInt32 range;
unsigned cache;
UInt64 low;
UInt64 cacheSize;
Byte *buf;
Byte *bufLim;
Byte *bufBase;
ISeqOutStreamPtr outStream;
UInt64 processed;
SRes res;
} CRangeEnc;
typedef struct
{
CLzmaProb *litProbs;
unsigned state;
UInt32 reps[LZMA_NUM_REPS];
CLzmaProb posAlignEncoder[1 << kNumAlignBits];
CLzmaProb isRep[kNumStates];
CLzmaProb isRepG0[kNumStates];
CLzmaProb isRepG1[kNumStates];
CLzmaProb isRepG2[kNumStates];
CLzmaProb isMatch[kNumStates][LZMA_NUM_PB_STATES_MAX];
CLzmaProb isRep0Long[kNumStates][LZMA_NUM_PB_STATES_MAX];
CLzmaProb posSlotEncoder[kNumLenToPosStates][1 << kNumPosSlotBits];
CLzmaProb posEncoders[kNumFullDistances];
CLenEnc lenProbs;
CLenEnc repLenProbs;
} CSaveState;
typedef UInt32 CProbPrice;
struct CLzmaEnc
{
void *matchFinderObj;
IMatchFinder2 matchFinder;
unsigned optCur;
unsigned optEnd;
unsigned longestMatchLen;
unsigned numPairs;
UInt32 numAvail;
unsigned state;
unsigned numFastBytes;
unsigned additionalOffset;
UInt32 reps[LZMA_NUM_REPS];
unsigned lpMask, pbMask;
CLzmaProb *litProbs;
CRangeEnc rc;
UInt32 backRes;
unsigned lc, lp, pb;
unsigned lclp;
BoolInt fastMode;
BoolInt writeEndMark;
BoolInt finished;
BoolInt multiThread;
BoolInt needInit;
// BoolInt _maxMode;
UInt64 nowPos64;
unsigned matchPriceCount;
// unsigned alignPriceCount;
int repLenEncCounter;
unsigned distTableSize;
UInt32 dictSize;
SRes result;
#ifndef Z7_ST
BoolInt mtMode;
// begin of CMatchFinderMt is used in LZ thread
CMatchFinderMt matchFinderMt;
// end of CMatchFinderMt is used in BT and HASH threads
// #else
// CMatchFinder matchFinderBase;
#endif
CMatchFinder matchFinderBase;
// we suppose that we have 8-bytes alignment after CMatchFinder
#ifndef Z7_ST
Byte pad[128];
#endif
// LZ thread
CProbPrice ProbPrices[kBitModelTotal >> kNumMoveReducingBits];
// we want {len , dist} pairs to be 8-bytes aligned in matches array
UInt32 matches[LZMA_MATCH_LEN_MAX * 2 + 2];
// we want 8-bytes alignment here
UInt32 alignPrices[kAlignTableSize];
UInt32 posSlotPrices[kNumLenToPosStates][kDistTableSizeMax];
UInt32 distancesPrices[kNumLenToPosStates][kNumFullDistances];
CLzmaProb posAlignEncoder[1 << kNumAlignBits];
CLzmaProb isRep[kNumStates];
CLzmaProb isRepG0[kNumStates];
CLzmaProb isRepG1[kNumStates];
CLzmaProb isRepG2[kNumStates];
CLzmaProb isMatch[kNumStates][LZMA_NUM_PB_STATES_MAX];
CLzmaProb isRep0Long[kNumStates][LZMA_NUM_PB_STATES_MAX];
CLzmaProb posSlotEncoder[kNumLenToPosStates][1 << kNumPosSlotBits];
CLzmaProb posEncoders[kNumFullDistances];
CLenEnc lenProbs;
CLenEnc repLenProbs;
#ifndef LZMA_LOG_BSR
Byte g_FastPos[1 << kNumLogBits];
#endif
CLenPriceEnc lenEnc;
CLenPriceEnc repLenEnc;
COptimal opt[kNumOpts];
CSaveState saveState;
// BoolInt mf_Failure;
#ifndef Z7_ST
Byte pad2[128];
#endif
};
#define MFB (p->matchFinderBase)
/*
#ifndef Z7_ST
#define MFB (p->matchFinderMt.MatchFinder)
#endif
*/
// #define GET_CLzmaEnc_p CLzmaEnc *p = (CLzmaEnc*)(void *)p;
// #define GET_const_CLzmaEnc_p const CLzmaEnc *p = (const CLzmaEnc*)(const void *)p;
#define COPY_ARR(dest, src, arr) memcpy((dest)->arr, (src)->arr, sizeof((src)->arr));
#define COPY_LZMA_ENC_STATE(d, s, p) \
(d)->state = (s)->state; \
COPY_ARR(d, s, reps) \
COPY_ARR(d, s, posAlignEncoder) \
COPY_ARR(d, s, isRep) \
COPY_ARR(d, s, isRepG0) \
COPY_ARR(d, s, isRepG1) \
COPY_ARR(d, s, isRepG2) \
COPY_ARR(d, s, isMatch) \
COPY_ARR(d, s, isRep0Long) \
COPY_ARR(d, s, posSlotEncoder) \
COPY_ARR(d, s, posEncoders) \
(d)->lenProbs = (s)->lenProbs; \
(d)->repLenProbs = (s)->repLenProbs; \
memcpy((d)->litProbs, (s)->litProbs, ((UInt32)0x300 << (p)->lclp) * sizeof(CLzmaProb));
void LzmaEnc_SaveState(CLzmaEncHandle p)
{
// GET_CLzmaEnc_p
CSaveState *v = &p->saveState;
COPY_LZMA_ENC_STATE(v, p, p)
}
void LzmaEnc_RestoreState(CLzmaEncHandle p)
{
// GET_CLzmaEnc_p
const CSaveState *v = &p->saveState;
COPY_LZMA_ENC_STATE(p, v, p)
}
Z7_NO_INLINE
SRes LzmaEnc_SetProps(CLzmaEncHandle p, const CLzmaEncProps *props2)
{
// GET_CLzmaEnc_p
CLzmaEncProps props = *props2;
LzmaEncProps_Normalize(&props);
if (props.lc > LZMA_LC_MAX
|| props.lp > LZMA_LP_MAX
|| props.pb > LZMA_PB_MAX)
return SZ_ERROR_PARAM;
if (props.dictSize > kLzmaMaxHistorySize)
props.dictSize = kLzmaMaxHistorySize;
#ifndef LZMA_LOG_BSR
{
const UInt64 dict64 = props.dictSize;
if (dict64 > ((UInt64)1 << kDicLogSizeMaxCompress))
return SZ_ERROR_PARAM;
}
#endif
p->dictSize = props.dictSize;
{
unsigned fb = (unsigned)props.fb;
if (fb < 5)
fb = 5;
if (fb > LZMA_MATCH_LEN_MAX)
fb = LZMA_MATCH_LEN_MAX;
p->numFastBytes = fb;
}
p->lc = (unsigned)props.lc;
p->lp = (unsigned)props.lp;
p->pb = (unsigned)props.pb;
p->fastMode = (props.algo == 0);
// p->_maxMode = True;
MFB.btMode = (Byte)(props.btMode ? 1 : 0);
// MFB.btMode = (Byte)(props.btMode);
{
unsigned numHashBytes = 4;
if (props.btMode)
{
if (props.numHashBytes < 2) numHashBytes = 2;
else if (props.numHashBytes < 4) numHashBytes = (unsigned)props.numHashBytes;
}
if (props.numHashBytes >= 5) numHashBytes = 5;
MFB.numHashBytes = numHashBytes;
// MFB.numHashBytes_Min = 2;
MFB.numHashOutBits = (Byte)props.numHashOutBits;
}
MFB.cutValue = props.mc;
p->writeEndMark = (BoolInt)props.writeEndMark;
#ifndef Z7_ST
/*
if (newMultiThread != _multiThread)
{
ReleaseMatchFinder();
_multiThread = newMultiThread;
}
*/
p->multiThread = (props.numThreads > 1);
p->matchFinderMt.btSync.affinity =
p->matchFinderMt.hashSync.affinity = props.affinity;
#endif
return SZ_OK;
}
void LzmaEnc_SetDataSize(CLzmaEncHandle p, UInt64 expectedDataSiize)
{
// GET_CLzmaEnc_p
MFB.expectedDataSize = expectedDataSiize;
}
#define kState_Start 0
#define kState_LitAfterMatch 4
#define kState_LitAfterRep 5
#define kState_MatchAfterLit 7
#define kState_RepAfterLit 8
static const Byte kLiteralNextStates[kNumStates] = {0, 0, 0, 0, 1, 2, 3, 4, 5, 6, 4, 5};
static const Byte kMatchNextStates[kNumStates] = {7, 7, 7, 7, 7, 7, 7, 10, 10, 10, 10, 10};
static const Byte kRepNextStates[kNumStates] = {8, 8, 8, 8, 8, 8, 8, 11, 11, 11, 11, 11};
static const Byte kShortRepNextStates[kNumStates]= {9, 9, 9, 9, 9, 9, 9, 11, 11, 11, 11, 11};
#define IsLitState(s) ((s) < 7)
#define GetLenToPosState2(len) (((len) < kNumLenToPosStates - 1) ? (len) : kNumLenToPosStates - 1)
#define GetLenToPosState(len) (((len) < kNumLenToPosStates + 1) ? (len) - 2 : kNumLenToPosStates - 1)
#define kInfinityPrice (1 << 30)
static void RangeEnc_Construct(CRangeEnc *p)
{
p->outStream = NULL;
p->bufBase = NULL;
}
#define RangeEnc_GetProcessed(p) ( (p)->processed + (size_t)((p)->buf - (p)->bufBase) + (p)->cacheSize)
#define RangeEnc_GetProcessed_sizet(p) ((size_t)(p)->processed + (size_t)((p)->buf - (p)->bufBase) + (size_t)(p)->cacheSize)
#define RC_BUF_SIZE (1 << 16)
static int RangeEnc_Alloc(CRangeEnc *p, ISzAllocPtr alloc)
{
if (!p->bufBase)
{
p->bufBase = (Byte *)ISzAlloc_Alloc(alloc, RC_BUF_SIZE);
if (!p->bufBase)
return 0;
p->bufLim = p->bufBase + RC_BUF_SIZE;
}
return 1;
}
static void RangeEnc_Free(CRangeEnc *p, ISzAllocPtr alloc)
{
ISzAlloc_Free(alloc, p->bufBase);
p->bufBase = NULL;
}
static void RangeEnc_Init(CRangeEnc *p)
{
p->range = 0xFFFFFFFF;
p->cache = 0;
p->low = 0;
p->cacheSize = 0;
p->buf = p->bufBase;
p->processed = 0;
p->res = SZ_OK;
}
Z7_NO_INLINE static void RangeEnc_FlushStream(CRangeEnc *p)
{
const size_t num = (size_t)(p->buf - p->bufBase);
if (p->res == SZ_OK)
{
if (num != ISeqOutStream_Write(p->outStream, p->bufBase, num))
p->res = SZ_ERROR_WRITE;
}
p->processed += num;
p->buf = p->bufBase;
}
Z7_NO_INLINE static void Z7_FASTCALL RangeEnc_ShiftLow(CRangeEnc *p)
{
UInt32 low = (UInt32)p->low;
unsigned high = (unsigned)(p->low >> 32);
p->low = (UInt32)(low << 8);
if (low < (UInt32)0xFF000000 || high != 0)
{
{
Byte *buf = p->buf;
*buf++ = (Byte)(p->cache + high);
p->cache = (unsigned)(low >> 24);
p->buf = buf;
if (buf == p->bufLim)
RangeEnc_FlushStream(p);
if (p->cacheSize == 0)
return;
}
high += 0xFF;
for (;;)
{
Byte *buf = p->buf;
*buf++ = (Byte)(high);
p->buf = buf;
if (buf == p->bufLim)
RangeEnc_FlushStream(p);
if (--p->cacheSize == 0)
return;
}
}
p->cacheSize++;
}
static void RangeEnc_FlushData(CRangeEnc *p)
{
int i;
for (i = 0; i < 5; i++)
RangeEnc_ShiftLow(p);
}
#define RC_NORM(p) if (range < kTopValue) { range <<= 8; RangeEnc_ShiftLow(p); }
#define RC_BIT_PRE(p, prob) \
ttt = *(prob); \
newBound = (range >> kNumBitModelTotalBits) * ttt;
// #define Z7_LZMA_ENC_USE_BRANCH
#ifdef Z7_LZMA_ENC_USE_BRANCH
#define RC_BIT(p, prob, bit) { \
RC_BIT_PRE(p, prob) \
if (bit == 0) { range = newBound; ttt += (kBitModelTotal - ttt) >> kNumMoveBits; } \
else { (p)->low += newBound; range -= newBound; ttt -= ttt >> kNumMoveBits; } \
*(prob) = (CLzmaProb)ttt; \
RC_NORM(p) \
}
#else
#define RC_BIT(p, prob, bit) { \
UInt32 mask; \
RC_BIT_PRE(p, prob) \
mask = 0 - (UInt32)bit; \
range &= mask; \
mask &= newBound; \
range -= mask; \
(p)->low += mask; \
mask = (UInt32)bit - 1; \
range += newBound & mask; \
mask &= (kBitModelTotal - ((1 << kNumMoveBits) - 1)); \
mask += ((1 << kNumMoveBits) - 1); \
ttt += (UInt32)((Int32)(mask - ttt) >> kNumMoveBits); \
*(prob) = (CLzmaProb)ttt; \
RC_NORM(p) \
}
#endif
#define RC_BIT_0_BASE(p, prob) \
range = newBound; *(prob) = (CLzmaProb)(ttt + ((kBitModelTotal - ttt) >> kNumMoveBits));
#define RC_BIT_1_BASE(p, prob) \
range -= newBound; (p)->low += newBound; *(prob) = (CLzmaProb)(ttt - (ttt >> kNumMoveBits)); \
#define RC_BIT_0(p, prob) \
RC_BIT_0_BASE(p, prob) \
RC_NORM(p)
#define RC_BIT_1(p, prob) \
RC_BIT_1_BASE(p, prob) \
RC_NORM(p)
static void RangeEnc_EncodeBit_0(CRangeEnc *p, CLzmaProb *prob)
{
UInt32 range, ttt, newBound;
range = p->range;
RC_BIT_PRE(p, prob)
RC_BIT_0(p, prob)
p->range = range;
}
static void LitEnc_Encode(CRangeEnc *p, CLzmaProb *probs, UInt32 sym)
{
UInt32 range = p->range;
sym |= 0x100;
do
{
UInt32 ttt, newBound;
// RangeEnc_EncodeBit(p, probs + (sym >> 8), (sym >> 7) & 1);
CLzmaProb *prob = probs + (sym >> 8);
UInt32 bit = (sym >> 7) & 1;
sym <<= 1;
RC_BIT(p, prob, bit)
}
while (sym < 0x10000);
p->range = range;
}
static void LitEnc_EncodeMatched(CRangeEnc *p, CLzmaProb *probs, UInt32 sym, UInt32 matchByte)
{
UInt32 range = p->range;
UInt32 offs = 0x100;
sym |= 0x100;
do
{
UInt32 ttt, newBound;
CLzmaProb *prob;
UInt32 bit;
matchByte <<= 1;
// RangeEnc_EncodeBit(p, probs + (offs + (matchByte & offs) + (sym >> 8)), (sym >> 7) & 1);
prob = probs + (offs + (matchByte & offs) + (sym >> 8));
bit = (sym >> 7) & 1;
sym <<= 1;
offs &= ~(matchByte ^ sym);
RC_BIT(p, prob, bit)
}
while (sym < 0x10000);
p->range = range;
}
static void LzmaEnc_InitPriceTables(CProbPrice *ProbPrices)
{
UInt32 i;
for (i = 0; i < (kBitModelTotal >> kNumMoveReducingBits); i++)
{
const unsigned kCyclesBits = kNumBitPriceShiftBits;
UInt32 w = (i << kNumMoveReducingBits) + (1 << (kNumMoveReducingBits - 1));
unsigned bitCount = 0;
unsigned j;
for (j = 0; j < kCyclesBits; j++)
{
w = w * w;
bitCount <<= 1;
while (w >= ((UInt32)1 << 16))
{
w >>= 1;
bitCount++;
}
}
ProbPrices[i] = (CProbPrice)(((unsigned)kNumBitModelTotalBits << kCyclesBits) - 15 - bitCount);
// printf("\n%3d: %5d", i, ProbPrices[i]);
}
}
#define GET_PRICE(prob, bit) \
p->ProbPrices[((prob) ^ (unsigned)(((-(int)(bit))) & (kBitModelTotal - 1))) >> kNumMoveReducingBits]
#define GET_PRICEa(prob, bit) \
ProbPrices[((prob) ^ (unsigned)((-((int)(bit))) & (kBitModelTotal - 1))) >> kNumMoveReducingBits]
#define GET_PRICE_0(prob) p->ProbPrices[(prob) >> kNumMoveReducingBits]
#define GET_PRICE_1(prob) p->ProbPrices[((prob) ^ (kBitModelTotal - 1)) >> kNumMoveReducingBits]
#define GET_PRICEa_0(prob) ProbPrices[(prob) >> kNumMoveReducingBits]
#define GET_PRICEa_1(prob) ProbPrices[((prob) ^ (kBitModelTotal - 1)) >> kNumMoveReducingBits]
static UInt32 LitEnc_GetPrice(const CLzmaProb *probs, UInt32 sym, const CProbPrice *ProbPrices)
{
UInt32 price = 0;
sym |= 0x100;
do
{
unsigned bit = sym & 1;
sym >>= 1;
price += GET_PRICEa(probs[sym], bit);
}
while (sym >= 2);
return price;
}
static UInt32 LitEnc_Matched_GetPrice(const CLzmaProb *probs, UInt32 sym, UInt32 matchByte, const CProbPrice *ProbPrices)
{
UInt32 price = 0;
UInt32 offs = 0x100;
sym |= 0x100;
do
{
matchByte <<= 1;
price += GET_PRICEa(probs[offs + (matchByte & offs) + (sym >> 8)], (sym >> 7) & 1);
sym <<= 1;
offs &= ~(matchByte ^ sym);
}
while (sym < 0x10000);
return price;
}
static void RcTree_ReverseEncode(CRangeEnc *rc, CLzmaProb *probs, unsigned numBits, unsigned sym)
{
UInt32 range = rc->range;
unsigned m = 1;
do
{
UInt32 ttt, newBound;
unsigned bit = sym & 1;
// RangeEnc_EncodeBit(rc, probs + m, bit);
sym >>= 1;
RC_BIT(rc, probs + m, bit)
m = (m << 1) | bit;
}
while (--numBits);
rc->range = range;
}
static void LenEnc_Init(CLenEnc *p)
{
unsigned i;
for (i = 0; i < (LZMA_NUM_PB_STATES_MAX << (kLenNumLowBits + 1)); i++)
p->low[i] = kProbInitValue;
for (i = 0; i < kLenNumHighSymbols; i++)
p->high[i] = kProbInitValue;
}
static void LenEnc_Encode(CLenEnc *p, CRangeEnc *rc, unsigned sym, unsigned posState)
{
UInt32 range, ttt, newBound;
CLzmaProb *probs = p->low;
range = rc->range;
RC_BIT_PRE(rc, probs)
if (sym >= kLenNumLowSymbols)
{
RC_BIT_1(rc, probs)
probs += kLenNumLowSymbols;
RC_BIT_PRE(rc, probs)
if (sym >= kLenNumLowSymbols * 2)
{
RC_BIT_1(rc, probs)
rc->range = range;
// RcTree_Encode(rc, p->high, kLenNumHighBits, sym - kLenNumLowSymbols * 2);
LitEnc_Encode(rc, p->high, sym - kLenNumLowSymbols * 2);
return;
}
sym -= kLenNumLowSymbols;
}
// RcTree_Encode(rc, probs + (posState << kLenNumLowBits), kLenNumLowBits, sym);
{
unsigned m;
unsigned bit;
RC_BIT_0(rc, probs)
probs += (posState << (1 + kLenNumLowBits));
bit = (sym >> 2) ; RC_BIT(rc, probs + 1, bit) m = (1 << 1) + bit;
bit = (sym >> 1) & 1; RC_BIT(rc, probs + m, bit) m = (m << 1) + bit;
bit = sym & 1; RC_BIT(rc, probs + m, bit)
rc->range = range;
}
}
static void SetPrices_3(const CLzmaProb *probs, UInt32 startPrice, UInt32 *prices, const CProbPrice *ProbPrices)
{
unsigned i;
for (i = 0; i < 8; i += 2)
{
UInt32 price = startPrice;
UInt32 prob;
price += GET_PRICEa(probs[1 ], (i >> 2));
price += GET_PRICEa(probs[2 + (i >> 2)], (i >> 1) & 1);
prob = probs[4 + (i >> 1)];
prices[i ] = price + GET_PRICEa_0(prob);
prices[i + 1] = price + GET_PRICEa_1(prob);
}
}
Z7_NO_INLINE static void Z7_FASTCALL LenPriceEnc_UpdateTables(
CLenPriceEnc *p,
unsigned numPosStates,
const CLenEnc *enc,
const CProbPrice *ProbPrices)
{
UInt32 b;
{
unsigned prob = enc->low[0];
UInt32 a, c;
unsigned posState;
b = GET_PRICEa_1(prob);
a = GET_PRICEa_0(prob);
c = b + GET_PRICEa_0(enc->low[kLenNumLowSymbols]);
for (posState = 0; posState < numPosStates; posState++)
{
UInt32 *prices = p->prices[posState];
const CLzmaProb *probs = enc->low + (posState << (1 + kLenNumLowBits));
SetPrices_3(probs, a, prices, ProbPrices);
SetPrices_3(probs + kLenNumLowSymbols, c, prices + kLenNumLowSymbols, ProbPrices);
}
}
/*
{
unsigned i;
UInt32 b;
a = GET_PRICEa_0(enc->low[0]);
for (i = 0; i < kLenNumLowSymbols; i++)
p->prices2[i] = a;
a = GET_PRICEa_1(enc->low[0]);
b = a + GET_PRICEa_0(enc->low[kLenNumLowSymbols]);
for (i = kLenNumLowSymbols; i < kLenNumLowSymbols * 2; i++)
p->prices2[i] = b;
a += GET_PRICEa_1(enc->low[kLenNumLowSymbols]);
}
*/
// p->counter = numSymbols;
// p->counter = 64;
{
unsigned i = p->tableSize;
if (i > kLenNumLowSymbols * 2)
{
const CLzmaProb *probs = enc->high;
UInt32 *prices = p->prices[0] + kLenNumLowSymbols * 2;
i -= kLenNumLowSymbols * 2 - 1;
i >>= 1;
b += GET_PRICEa_1(enc->low[kLenNumLowSymbols]);
do
{
/*
p->prices2[i] = a +
// RcTree_GetPrice(enc->high, kLenNumHighBits, i - kLenNumLowSymbols * 2, ProbPrices);
LitEnc_GetPrice(probs, i - kLenNumLowSymbols * 2, ProbPrices);
*/
// UInt32 price = a + RcTree_GetPrice(probs, kLenNumHighBits - 1, sym, ProbPrices);
unsigned sym = --i + (1 << (kLenNumHighBits - 1));
UInt32 price = b;
do
{
unsigned bit = sym & 1;
sym >>= 1;
price += GET_PRICEa(probs[sym], bit);
}
while (sym >= 2);
{
unsigned prob = probs[(size_t)i + (1 << (kLenNumHighBits - 1))];
prices[(size_t)i * 2 ] = price + GET_PRICEa_0(prob);
prices[(size_t)i * 2 + 1] = price + GET_PRICEa_1(prob);
}
}
while (i);
{
unsigned posState;
size_t num = (p->tableSize - kLenNumLowSymbols * 2) * sizeof(p->prices[0][0]);
for (posState = 1; posState < numPosStates; posState++)
memcpy(p->prices[posState] + kLenNumLowSymbols * 2, p->prices[0] + kLenNumLowSymbols * 2, num);
}
}
}
}
/*
#ifdef SHOW_STAT
g_STAT_OFFSET += num;
printf("\n MovePos %u", num);
#endif
*/
#define MOVE_POS(p, num) { \
p->additionalOffset += (num); \
p->matchFinder.Skip(p->matchFinderObj, (UInt32)(num)); }
static unsigned ReadMatchDistances(CLzmaEnc *p, unsigned *numPairsRes)
{
unsigned numPairs;
p->additionalOffset++;
p->numAvail = p->matchFinder.GetNumAvailableBytes(p->matchFinderObj);
{
const UInt32 *d = p->matchFinder.GetMatches(p->matchFinderObj, p->matches);
// if (!d) { p->mf_Failure = True; *numPairsRes = 0; return 0; }
numPairs = (unsigned)(d - p->matches);
}
*numPairsRes = numPairs;
#ifdef SHOW_STAT
printf("\n i = %u numPairs = %u ", g_STAT_OFFSET, numPairs / 2);
g_STAT_OFFSET++;
{
unsigned i;
for (i = 0; i < numPairs; i += 2)
printf("%2u %6u | ", p->matches[i], p->matches[i + 1]);
}
#endif
if (numPairs == 0)
return 0;
{
const unsigned len = p->matches[(size_t)numPairs - 2];
if (len != p->numFastBytes)
return len;
{
UInt32 numAvail = p->numAvail;
if (numAvail > LZMA_MATCH_LEN_MAX)
numAvail = LZMA_MATCH_LEN_MAX;
{
const Byte *p1 = p->matchFinder.GetPointerToCurrentPos(p->matchFinderObj) - 1;
const Byte *p2 = p1 + len;
const ptrdiff_t dif = (ptrdiff_t)-1 - (ptrdiff_t)p->matches[(size_t)numPairs - 1];
const Byte *lim = p1 + numAvail;
for (; p2 != lim && *p2 == p2[dif]; p2++)
{}
return (unsigned)(p2 - p1);
}
}
}
}
#define MARK_LIT ((UInt32)(Int32)-1)
#define MakeAs_Lit(p) { (p)->dist = MARK_LIT; (p)->extra = 0; }
#define MakeAs_ShortRep(p) { (p)->dist = 0; (p)->extra = 0; }
#define IsShortRep(p) ((p)->dist == 0)
#define GetPrice_ShortRep(p, state, posState) \
( GET_PRICE_0(p->isRepG0[state]) + GET_PRICE_0(p->isRep0Long[state][posState]))
#define GetPrice_Rep_0(p, state, posState) ( \
GET_PRICE_1(p->isMatch[state][posState]) \
+ GET_PRICE_1(p->isRep0Long[state][posState])) \
+ GET_PRICE_1(p->isRep[state]) \
+ GET_PRICE_0(p->isRepG0[state])
Z7_FORCE_INLINE
static UInt32 GetPrice_PureRep(const CLzmaEnc *p, unsigned repIndex, size_t state, size_t posState)
{
UInt32 price;
UInt32 prob = p->isRepG0[state];
if (repIndex == 0)
{
price = GET_PRICE_0(prob);
price += GET_PRICE_1(p->isRep0Long[state][posState]);
}
else
{
price = GET_PRICE_1(prob);
prob = p->isRepG1[state];
if (repIndex == 1)
price += GET_PRICE_0(prob);
else
{
price += GET_PRICE_1(prob);
price += GET_PRICE(p->isRepG2[state], repIndex - 2);
}
}
return price;
}
static unsigned Backward(CLzmaEnc *p, unsigned cur)
{
unsigned wr = cur + 1;
p->optEnd = wr;
for (;;)
{
UInt32 dist = p->opt[cur].dist;
unsigned len = (unsigned)p->opt[cur].len;
unsigned extra = (unsigned)p->opt[cur].extra;
cur -= len;
if (extra)
{
wr--;
p->opt[wr].len = (UInt32)len;
cur -= extra;
len = extra;
if (extra == 1)
{
p->opt[wr].dist = dist;
dist = MARK_LIT;
}
else
{
p->opt[wr].dist = 0;
len--;
wr--;
p->opt[wr].dist = MARK_LIT;
p->opt[wr].len = 1;
}
}
if (cur == 0)
{
p->backRes = dist;
p->optCur = wr;
return len;
}
wr--;
p->opt[wr].dist = dist;
p->opt[wr].len = (UInt32)len;
}
}
#define LIT_PROBS(pos, prevByte) \
(p->litProbs + (UInt32)3 * (((((pos) << 8) + (prevByte)) & p->lpMask) << p->lc))
static unsigned GetOptimum(CLzmaEnc *p, UInt32 position)
{
unsigned last, cur;
UInt32 reps[LZMA_NUM_REPS];
unsigned repLens[LZMA_NUM_REPS];
UInt32 *matches;
{
UInt32 numAvail;
unsigned numPairs, mainLen, repMaxIndex, i, posState;
UInt32 matchPrice, repMatchPrice;
const Byte *data;
Byte curByte, matchByte;
p->optCur = p->optEnd = 0;
if (p->additionalOffset == 0)
mainLen = ReadMatchDistances(p, &numPairs);
else
{
mainLen = p->longestMatchLen;
numPairs = p->numPairs;
}
numAvail = p->numAvail;
if (numAvail < 2)
{
p->backRes = MARK_LIT;
return 1;
}
if (numAvail > LZMA_MATCH_LEN_MAX)
numAvail = LZMA_MATCH_LEN_MAX;
data = p->matchFinder.GetPointerToCurrentPos(p->matchFinderObj) - 1;
repMaxIndex = 0;
for (i = 0; i < LZMA_NUM_REPS; i++)
{
unsigned len;
const Byte *data2;
reps[i] = p->reps[i];
data2 = data - reps[i];
if (data[0] != data2[0] || data[1] != data2[1])
{
repLens[i] = 0;
continue;
}
for (len = 2; len < numAvail && data[len] == data2[len]; len++)
{}
repLens[i] = len;
if (len > repLens[repMaxIndex])
repMaxIndex = i;
if (len == LZMA_MATCH_LEN_MAX) // 21.03 : optimization
break;
}
if (repLens[repMaxIndex] >= p->numFastBytes)
{
unsigned len;
p->backRes = (UInt32)repMaxIndex;
len = repLens[repMaxIndex];
MOVE_POS(p, len - 1)
return len;
}
matches = p->matches;
#define MATCHES matches
// #define MATCHES p->matches
if (mainLen >= p->numFastBytes)
{
p->backRes = MATCHES[(size_t)numPairs - 1] + LZMA_NUM_REPS;
MOVE_POS(p, mainLen - 1)
return mainLen;
}
curByte = *data;
matchByte = *(data - reps[0]);
last = repLens[repMaxIndex];
if (last <= mainLen)
last = mainLen;
if (last < 2 && curByte != matchByte)
{
p->backRes = MARK_LIT;
return 1;
}
p->opt[0].state = (CState)p->state;
posState = (position & p->pbMask);
{
const CLzmaProb *probs = LIT_PROBS(position, *(data - 1));
p->opt[1].price = GET_PRICE_0(p->isMatch[p->state][posState]) +
(!IsLitState(p->state) ?
LitEnc_Matched_GetPrice(probs, curByte, matchByte, p->ProbPrices) :
LitEnc_GetPrice(probs, curByte, p->ProbPrices));
}
MakeAs_Lit(&p->opt[1])
matchPrice = GET_PRICE_1(p->isMatch[p->state][posState]);
repMatchPrice = matchPrice + GET_PRICE_1(p->isRep[p->state]);
// 18.06
if (matchByte == curByte && repLens[0] == 0)
{
UInt32 shortRepPrice = repMatchPrice + GetPrice_ShortRep(p, p->state, posState);
if (shortRepPrice < p->opt[1].price)
{
p->opt[1].price = shortRepPrice;
MakeAs_ShortRep(&p->opt[1])
}
if (last < 2)
{
p->backRes = p->opt[1].dist;
return 1;
}
}
p->opt[1].len = 1;
p->opt[0].reps[0] = reps[0];
p->opt[0].reps[1] = reps[1];
p->opt[0].reps[2] = reps[2];
p->opt[0].reps[3] = reps[3];
// ---------- REP ----------
for (i = 0; i < LZMA_NUM_REPS; i++)
{
unsigned repLen = repLens[i];
UInt32 price;
if (repLen < 2)
continue;
price = repMatchPrice + GetPrice_PureRep(p, i, p->state, posState);
do
{
UInt32 price2 = price + GET_PRICE_LEN(&p->repLenEnc, posState, repLen);
COptimal *opt = &p->opt[repLen];
if (price2 < opt->price)
{
opt->price = price2;
opt->len = (UInt32)repLen;
opt->dist = (UInt32)i;
opt->extra = 0;
}
}
while (--repLen >= 2);
}
// ---------- MATCH ----------
{
unsigned len = repLens[0] + 1;
if (len <= mainLen)
{
unsigned offs = 0;
UInt32 normalMatchPrice = matchPrice + GET_PRICE_0(p->isRep[p->state]);
if (len < 2)
len = 2;
else
while (len > MATCHES[offs])
offs += 2;
for (; ; len++)
{
COptimal *opt;
UInt32 dist = MATCHES[(size_t)offs + 1];
UInt32 price = normalMatchPrice + GET_PRICE_LEN(&p->lenEnc, posState, len);
unsigned lenToPosState = GetLenToPosState(len);
if (dist < kNumFullDistances)
price += p->distancesPrices[lenToPosState][dist & (kNumFullDistances - 1)];
else
{
unsigned slot;
GetPosSlot2(dist, slot)
price += p->alignPrices[dist & kAlignMask];
price += p->posSlotPrices[lenToPosState][slot];
}
opt = &p->opt[len];
if (price < opt->price)
{
opt->price = price;
opt->len = (UInt32)len;
opt->dist = dist + LZMA_NUM_REPS;
opt->extra = 0;
}
if (len == MATCHES[offs])
{
offs += 2;
if (offs == numPairs)
break;
}
}
}
}
cur = 0;
#ifdef SHOW_STAT2
/* if (position >= 0) */
{
unsigned i;
printf("\n pos = %4X", position);
for (i = cur; i <= last; i++)
printf("\nprice[%4X] = %u", position - cur + i, p->opt[i].price);
}
#endif
}
// ---------- Optimal Parsing ----------
for (;;)
{
unsigned numAvail;
UInt32 numAvailFull;
unsigned newLen, numPairs, prev, state, posState, startLen;
UInt32 litPrice, matchPrice, repMatchPrice;
BoolInt nextIsLit;
Byte curByte, matchByte;
const Byte *data;
COptimal *curOpt, *nextOpt;
if (++cur == last)
break;
// 18.06
if (cur >= kNumOpts - 64)
{
unsigned j, best;
UInt32 price = p->opt[cur].price;
best = cur;
for (j = cur + 1; j <= last; j++)
{
UInt32 price2 = p->opt[j].price;
if (price >= price2)
{
price = price2;
best = j;
}
}
{
unsigned delta = best - cur;
if (delta != 0)
{
MOVE_POS(p, delta)
}
}
cur = best;
break;
}
newLen = ReadMatchDistances(p, &numPairs);
if (newLen >= p->numFastBytes)
{
p->numPairs = numPairs;
p->longestMatchLen = newLen;
break;
}
curOpt = &p->opt[cur];
position++;
// we need that check here, if skip_items in p->opt are possible
/*
if (curOpt->price >= kInfinityPrice)
continue;
*/
prev = cur - curOpt->len;
if (curOpt->len == 1)
{
state = (unsigned)p->opt[prev].state;
if (IsShortRep(curOpt))
state = kShortRepNextStates[state];
else
state = kLiteralNextStates[state];
}
else
{
const COptimal *prevOpt;
UInt32 b0;
UInt32 dist = curOpt->dist;
if (curOpt->extra)
{
prev -= (unsigned)curOpt->extra;
state = kState_RepAfterLit;
if (curOpt->extra == 1)
state = (dist < LZMA_NUM_REPS ? kState_RepAfterLit : kState_MatchAfterLit);
}
else
{
state = (unsigned)p->opt[prev].state;
if (dist < LZMA_NUM_REPS)
state = kRepNextStates[state];
else
state = kMatchNextStates[state];
}
prevOpt = &p->opt[prev];
b0 = prevOpt->reps[0];
if (dist < LZMA_NUM_REPS)
{
if (dist == 0)
{
reps[0] = b0;
reps[1] = prevOpt->reps[1];
reps[2] = prevOpt->reps[2];
reps[3] = prevOpt->reps[3];
}
else
{
reps[1] = b0;
b0 = prevOpt->reps[1];
if (dist == 1)
{
reps[0] = b0;
reps[2] = prevOpt->reps[2];
reps[3] = prevOpt->reps[3];
}
else
{
reps[2] = b0;
reps[0] = prevOpt->reps[dist];
reps[3] = prevOpt->reps[dist ^ 1];
}
}
}
else
{
reps[0] = (dist - LZMA_NUM_REPS + 1);
reps[1] = b0;
reps[2] = prevOpt->reps[1];
reps[3] = prevOpt->reps[2];
}
}
curOpt->state = (CState)state;
curOpt->reps[0] = reps[0];
curOpt->reps[1] = reps[1];
curOpt->reps[2] = reps[2];
curOpt->reps[3] = reps[3];
data = p->matchFinder.GetPointerToCurrentPos(p->matchFinderObj) - 1;
curByte = *data;
matchByte = *(data - reps[0]);
posState = (position & p->pbMask);
/*
The order of Price checks:
< LIT
<= SHORT_REP
< LIT : REP_0
< REP [ : LIT : REP_0 ]
< MATCH [ : LIT : REP_0 ]
*/
{
UInt32 curPrice = curOpt->price;
unsigned prob = p->isMatch[state][posState];
matchPrice = curPrice + GET_PRICE_1(prob);
litPrice = curPrice + GET_PRICE_0(prob);
}
nextOpt = &p->opt[(size_t)cur + 1];
nextIsLit = False;
// here we can allow skip_items in p->opt, if we don't check (nextOpt->price < kInfinityPrice)
// 18.new.06
if ((nextOpt->price < kInfinityPrice
// && !IsLitState(state)
&& matchByte == curByte)
|| litPrice > nextOpt->price
)
litPrice = 0;
else
{
const CLzmaProb *probs = LIT_PROBS(position, *(data - 1));
litPrice += (!IsLitState(state) ?
LitEnc_Matched_GetPrice(probs, curByte, matchByte, p->ProbPrices) :
LitEnc_GetPrice(probs, curByte, p->ProbPrices));
if (litPrice < nextOpt->price)
{
nextOpt->price = litPrice;
nextOpt->len = 1;
MakeAs_Lit(nextOpt)
nextIsLit = True;
}
}
repMatchPrice = matchPrice + GET_PRICE_1(p->isRep[state]);
numAvailFull = p->numAvail;
{
unsigned temp = kNumOpts - 1 - cur;
if (numAvailFull > temp)
numAvailFull = (UInt32)temp;
}
// 18.06
// ---------- SHORT_REP ----------
if (IsLitState(state)) // 18.new
if (matchByte == curByte)
if (repMatchPrice < nextOpt->price) // 18.new
// if (numAvailFull < 2 || data[1] != *(data - reps[0] + 1))
if (
// nextOpt->price >= kInfinityPrice ||
nextOpt->len < 2 // we can check nextOpt->len, if skip items are not allowed in p->opt
|| (nextOpt->dist != 0
// && nextOpt->extra <= 1 // 17.old
)
)
{
UInt32 shortRepPrice = repMatchPrice + GetPrice_ShortRep(p, state, posState);
// if (shortRepPrice <= nextOpt->price) // 17.old
if (shortRepPrice < nextOpt->price) // 18.new
{
nextOpt->price = shortRepPrice;
nextOpt->len = 1;
MakeAs_ShortRep(nextOpt)
nextIsLit = False;
}
}
if (numAvailFull < 2)
continue;
numAvail = (numAvailFull <= p->numFastBytes ? numAvailFull : p->numFastBytes);
// numAvail <= p->numFastBytes
// ---------- LIT : REP_0 ----------
if (!nextIsLit
&& litPrice != 0 // 18.new
&& matchByte != curByte
&& numAvailFull > 2)
{
const Byte *data2 = data - reps[0];
if (data[1] == data2[1] && data[2] == data2[2])
{
unsigned len;
unsigned limit = p->numFastBytes + 1;
if (limit > numAvailFull)
limit = numAvailFull;
for (len = 3; len < limit && data[len] == data2[len]; len++)
{}
{
unsigned state2 = kLiteralNextStates[state];
unsigned posState2 = (position + 1) & p->pbMask;
UInt32 price = litPrice + GetPrice_Rep_0(p, state2, posState2);
{
unsigned offset = cur + len;
if (last < offset)
last = offset;
// do
{
UInt32 price2;
COptimal *opt;
len--;
// price2 = price + GetPrice_Len_Rep_0(p, len, state2, posState2);
price2 = price + GET_PRICE_LEN(&p->repLenEnc, posState2, len);
opt = &p->opt[offset];
// offset--;
if (price2 < opt->price)
{
opt->price = price2;
opt->len = (UInt32)len;
opt->dist = 0;
opt->extra = 1;
}
}
// while (len >= 3);
}
}
}
}
startLen = 2; /* speed optimization */
{
// ---------- REP ----------
unsigned repIndex = 0; // 17.old
// unsigned repIndex = IsLitState(state) ? 0 : 1; // 18.notused
for (; repIndex < LZMA_NUM_REPS; repIndex++)
{
unsigned len;
UInt32 price;
const Byte *data2 = data - reps[repIndex];
if (data[0] != data2[0] || data[1] != data2[1])
continue;
for (len = 2; len < numAvail && data[len] == data2[len]; len++)
{}
// if (len < startLen) continue; // 18.new: speed optimization
{
unsigned offset = cur + len;
if (last < offset)
last = offset;
}
{
unsigned len2 = len;
price = repMatchPrice + GetPrice_PureRep(p, repIndex, state, posState);
do
{
UInt32 price2 = price + GET_PRICE_LEN(&p->repLenEnc, posState, len2);
COptimal *opt = &p->opt[cur + len2];
if (price2 < opt->price)
{
opt->price = price2;
opt->len = (UInt32)len2;
opt->dist = (UInt32)repIndex;
opt->extra = 0;
}
}
while (--len2 >= 2);
}
if (repIndex == 0) startLen = len + 1; // 17.old
// startLen = len + 1; // 18.new
/* if (_maxMode) */
{
// ---------- REP : LIT : REP_0 ----------
// numFastBytes + 1 + numFastBytes
unsigned len2 = len + 1;
unsigned limit = len2 + p->numFastBytes;
if (limit > numAvailFull)
limit = numAvailFull;
len2 += 2;
if (len2 <= limit)
if (data[len2 - 2] == data2[len2 - 2])
if (data[len2 - 1] == data2[len2 - 1])
{
unsigned state2 = kRepNextStates[state];
unsigned posState2 = (position + len) & p->pbMask;
price += GET_PRICE_LEN(&p->repLenEnc, posState, len)
+ GET_PRICE_0(p->isMatch[state2][posState2])
+ LitEnc_Matched_GetPrice(LIT_PROBS(position + len, data[(size_t)len - 1]),
data[len], data2[len], p->ProbPrices);
// state2 = kLiteralNextStates[state2];
state2 = kState_LitAfterRep;
posState2 = (posState2 + 1) & p->pbMask;
price += GetPrice_Rep_0(p, state2, posState2);
for (; len2 < limit && data[len2] == data2[len2]; len2++)
{}
len2 -= len;
// if (len2 >= 3)
{
{
unsigned offset = cur + len + len2;
if (last < offset)
last = offset;
// do
{
UInt32 price2;
COptimal *opt;
len2--;
// price2 = price + GetPrice_Len_Rep_0(p, len2, state2, posState2);
price2 = price + GET_PRICE_LEN(&p->repLenEnc, posState2, len2);
opt = &p->opt[offset];
// offset--;
if (price2 < opt->price)
{
opt->price = price2;
opt->len = (UInt32)len2;
opt->extra = (CExtra)(len + 1);
opt->dist = (UInt32)repIndex;
}
}
// while (len2 >= 3);
}
}
}
}
}
}
// ---------- MATCH ----------
/* for (unsigned len = 2; len <= newLen; len++) */
if (newLen > numAvail)
{
newLen = numAvail;
for (numPairs = 0; newLen > MATCHES[numPairs]; numPairs += 2);
MATCHES[numPairs] = (UInt32)newLen;
numPairs += 2;
}
// startLen = 2; /* speed optimization */
if (newLen >= startLen)
{
UInt32 normalMatchPrice = matchPrice + GET_PRICE_0(p->isRep[state]);
UInt32 dist;
unsigned offs, posSlot, len;
{
unsigned offset = cur + newLen;
if (last < offset)
last = offset;
}
offs = 0;
while (startLen > MATCHES[offs])
offs += 2;
dist = MATCHES[(size_t)offs + 1];
// if (dist >= kNumFullDistances)
GetPosSlot2(dist, posSlot)
for (len = /*2*/ startLen; ; len++)
{
UInt32 price = normalMatchPrice + GET_PRICE_LEN(&p->lenEnc, posState, len);
{
COptimal *opt;
unsigned lenNorm = len - 2;
lenNorm = GetLenToPosState2(lenNorm);
if (dist < kNumFullDistances)
price += p->distancesPrices[lenNorm][dist & (kNumFullDistances - 1)];
else
price += p->posSlotPrices[lenNorm][posSlot] + p->alignPrices[dist & kAlignMask];
opt = &p->opt[cur + len];
if (price < opt->price)
{
opt->price = price;
opt->len = (UInt32)len;
opt->dist = dist + LZMA_NUM_REPS;
opt->extra = 0;
}
}
if (len == MATCHES[offs])
{
// if (p->_maxMode) {
// MATCH : LIT : REP_0
const Byte *data2 = data - dist - 1;
unsigned len2 = len + 1;
unsigned limit = len2 + p->numFastBytes;
if (limit > numAvailFull)
limit = numAvailFull;
len2 += 2;
if (len2 <= limit)
if (data[len2 - 2] == data2[len2 - 2])
if (data[len2 - 1] == data2[len2 - 1])
{
for (; len2 < limit && data[len2] == data2[len2]; len2++)
{}
len2 -= len;
// if (len2 >= 3)
{
unsigned state2 = kMatchNextStates[state];
unsigned posState2 = (position + len) & p->pbMask;
unsigned offset;
price += GET_PRICE_0(p->isMatch[state2][posState2]);
price += LitEnc_Matched_GetPrice(LIT_PROBS(position + len, data[(size_t)len - 1]),
data[len], data2[len], p->ProbPrices);
// state2 = kLiteralNextStates[state2];
state2 = kState_LitAfterMatch;
posState2 = (posState2 + 1) & p->pbMask;
price += GetPrice_Rep_0(p, state2, posState2);
offset = cur + len + len2;
if (last < offset)
last = offset;
// do
{
UInt32 price2;
COptimal *opt;
len2--;
// price2 = price + GetPrice_Len_Rep_0(p, len2, state2, posState2);
price2 = price + GET_PRICE_LEN(&p->repLenEnc, posState2, len2);
opt = &p->opt[offset];
// offset--;
if (price2 < opt->price)
{
opt->price = price2;
opt->len = (UInt32)len2;
opt->extra = (CExtra)(len + 1);
opt->dist = dist + LZMA_NUM_REPS;
}
}
// while (len2 >= 3);
}
}
offs += 2;
if (offs == numPairs)
break;
dist = MATCHES[(size_t)offs + 1];
// if (dist >= kNumFullDistances)
GetPosSlot2(dist, posSlot)
}
}
}
}
do
p->opt[last].price = kInfinityPrice;
while (--last);
return Backward(p, cur);
}
#define ChangePair(smallDist, bigDist) (((bigDist) >> 7) > (smallDist))
static unsigned GetOptimumFast(CLzmaEnc *p)
{
UInt32 numAvail, mainDist;
unsigned mainLen, numPairs, repIndex, repLen, i;
const Byte *data;
if (p->additionalOffset == 0)
mainLen = ReadMatchDistances(p, &numPairs);
else
{
mainLen = p->longestMatchLen;
numPairs = p->numPairs;
}
numAvail = p->numAvail;
p->backRes = MARK_LIT;
if (numAvail < 2)
return 1;
// if (mainLen < 2 && p->state == 0) return 1; // 18.06.notused
if (numAvail > LZMA_MATCH_LEN_MAX)
numAvail = LZMA_MATCH_LEN_MAX;
data = p->matchFinder.GetPointerToCurrentPos(p->matchFinderObj) - 1;
repLen = repIndex = 0;
for (i = 0; i < LZMA_NUM_REPS; i++)
{
unsigned len;
const Byte *data2 = data - p->reps[i];
if (data[0] != data2[0] || data[1] != data2[1])
continue;
for (len = 2; len < numAvail && data[len] == data2[len]; len++)
{}
if (len >= p->numFastBytes)
{
p->backRes = (UInt32)i;
MOVE_POS(p, len - 1)
return len;
}
if (len > repLen)
{
repIndex = i;
repLen = len;
}
}
if (mainLen >= p->numFastBytes)
{
p->backRes = p->matches[(size_t)numPairs - 1] + LZMA_NUM_REPS;
MOVE_POS(p, mainLen - 1)
return mainLen;
}
mainDist = 0; /* for GCC */
if (mainLen >= 2)
{
mainDist = p->matches[(size_t)numPairs - 1];
while (numPairs > 2)
{
UInt32 dist2;
if (mainLen != p->matches[(size_t)numPairs - 4] + 1)
break;
dist2 = p->matches[(size_t)numPairs - 3];
if (!ChangePair(dist2, mainDist))
break;
numPairs -= 2;
mainLen--;
mainDist = dist2;
}
if (mainLen == 2 && mainDist >= 0x80)
mainLen = 1;
}
if (repLen >= 2)
if ( repLen + 1 >= mainLen
|| (repLen + 2 >= mainLen && mainDist >= (1 << 9))
|| (repLen + 3 >= mainLen && mainDist >= (1 << 15)))
{
p->backRes = (UInt32)repIndex;
MOVE_POS(p, repLen - 1)
return repLen;
}
if (mainLen < 2 || numAvail <= 2)
return 1;
{
unsigned len1 = ReadMatchDistances(p, &p->numPairs);
p->longestMatchLen = len1;
if (len1 >= 2)
{
UInt32 newDist = p->matches[(size_t)p->numPairs - 1];
if ( (len1 >= mainLen && newDist < mainDist)
|| (len1 == mainLen + 1 && !ChangePair(mainDist, newDist))
|| (len1 > mainLen + 1)
|| (len1 + 1 >= mainLen && mainLen >= 3 && ChangePair(newDist, mainDist)))
return 1;
}
}
data = p->matchFinder.GetPointerToCurrentPos(p->matchFinderObj) - 1;
for (i = 0; i < LZMA_NUM_REPS; i++)
{
unsigned len, limit;
const Byte *data2 = data - p->reps[i];
if (data[0] != data2[0] || data[1] != data2[1])
continue;
limit = mainLen - 1;
for (len = 2;; len++)
{
if (len >= limit)
return 1;
if (data[len] != data2[len])
break;
}
}
p->backRes = mainDist + LZMA_NUM_REPS;
if (mainLen != 2)
{
MOVE_POS(p, mainLen - 2)
}
return mainLen;
}
static void WriteEndMarker(CLzmaEnc *p, unsigned posState)
{
UInt32 range;
range = p->rc.range;
{
UInt32 ttt, newBound;
CLzmaProb *prob = &p->isMatch[p->state][posState];
RC_BIT_PRE(&p->rc, prob)
RC_BIT_1(&p->rc, prob)
prob = &p->isRep[p->state];
RC_BIT_PRE(&p->rc, prob)
RC_BIT_0(&p->rc, prob)
}
p->state = kMatchNextStates[p->state];
p->rc.range = range;
LenEnc_Encode(&p->lenProbs, &p->rc, 0, posState);
range = p->rc.range;
{
// RcTree_Encode_PosSlot(&p->rc, p->posSlotEncoder[0], (1 << kNumPosSlotBits) - 1);
CLzmaProb *probs = p->posSlotEncoder[0];
unsigned m = 1;
do
{
UInt32 ttt, newBound;
RC_BIT_PRE(p, probs + m)
RC_BIT_1(&p->rc, probs + m)
m = (m << 1) + 1;
}
while (m < (1 << kNumPosSlotBits));
}
{
// RangeEnc_EncodeDirectBits(&p->rc, ((UInt32)1 << (30 - kNumAlignBits)) - 1, 30 - kNumAlignBits); UInt32 range = p->range;
unsigned numBits = 30 - kNumAlignBits;
do
{
range >>= 1;
p->rc.low += range;
RC_NORM(&p->rc)
}
while (--numBits);
}
{
// RcTree_ReverseEncode(&p->rc, p->posAlignEncoder, kNumAlignBits, kAlignMask);
CLzmaProb *probs = p->posAlignEncoder;
unsigned m = 1;
do
{
UInt32 ttt, newBound;
RC_BIT_PRE(p, probs + m)
RC_BIT_1(&p->rc, probs + m)
m = (m << 1) + 1;
}
while (m < kAlignTableSize);
}
p->rc.range = range;
}
static SRes CheckErrors(CLzmaEnc *p)
{
if (p->result != SZ_OK)
return p->result;
if (p->rc.res != SZ_OK)
p->result = SZ_ERROR_WRITE;
#ifndef Z7_ST
if (
// p->mf_Failure ||
(p->mtMode &&
( // p->matchFinderMt.failure_LZ_LZ ||
p->matchFinderMt.failure_LZ_BT))
)
{
p->result = MY_HRES_ERROR_INTERNAL_ERROR;
// printf("\nCheckErrors p->matchFinderMt.failureLZ\n");
}
#endif
if (MFB.result != SZ_OK)
p->result = SZ_ERROR_READ;
if (p->result != SZ_OK)
p->finished = True;
return p->result;
}
Z7_NO_INLINE static SRes Flush(CLzmaEnc *p, UInt32 nowPos)
{
/* ReleaseMFStream(); */
p->finished = True;
if (p->writeEndMark)
WriteEndMarker(p, nowPos & p->pbMask);
RangeEnc_FlushData(&p->rc);
RangeEnc_FlushStream(&p->rc);
return CheckErrors(p);
}
Z7_NO_INLINE static void FillAlignPrices(CLzmaEnc *p)
{
unsigned i;
const CProbPrice *ProbPrices = p->ProbPrices;
const CLzmaProb *probs = p->posAlignEncoder;
// p->alignPriceCount = 0;
for (i = 0; i < kAlignTableSize / 2; i++)
{
UInt32 price = 0;
unsigned sym = i;
unsigned m = 1;
unsigned bit;
UInt32 prob;
bit = sym & 1; sym >>= 1; price += GET_PRICEa(probs[m], bit); m = (m << 1) + bit;
bit = sym & 1; sym >>= 1; price += GET_PRICEa(probs[m], bit); m = (m << 1) + bit;
bit = sym & 1; sym >>= 1; price += GET_PRICEa(probs[m], bit); m = (m << 1) + bit;
prob = probs[m];
p->alignPrices[i ] = price + GET_PRICEa_0(prob);
p->alignPrices[i + 8] = price + GET_PRICEa_1(prob);
// p->alignPrices[i] = RcTree_ReverseGetPrice(p->posAlignEncoder, kNumAlignBits, i, p->ProbPrices);
}
}
Z7_NO_INLINE static void FillDistancesPrices(CLzmaEnc *p)
{
// int y; for (y = 0; y < 100; y++) {
UInt32 tempPrices[kNumFullDistances];
unsigned i, lps;
const CProbPrice *ProbPrices = p->ProbPrices;
p->matchPriceCount = 0;
for (i = kStartPosModelIndex / 2; i < kNumFullDistances / 2; i++)
{
unsigned posSlot = GetPosSlot1(i);
unsigned footerBits = (posSlot >> 1) - 1;
unsigned base = ((2 | (posSlot & 1)) << footerBits);
const CLzmaProb *probs = p->posEncoders + (size_t)base * 2;
// tempPrices[i] = RcTree_ReverseGetPrice(p->posEncoders + base, footerBits, i - base, p->ProbPrices);
UInt32 price = 0;
unsigned m = 1;
unsigned sym = i;
unsigned offset = (unsigned)1 << footerBits;
base += i;
if (footerBits)
do
{
unsigned bit = sym & 1;
sym >>= 1;
price += GET_PRICEa(probs[m], bit);
m = (m << 1) + bit;
}
while (--footerBits);
{
unsigned prob = probs[m];
tempPrices[base ] = price + GET_PRICEa_0(prob);
tempPrices[base + offset] = price + GET_PRICEa_1(prob);
}
}
for (lps = 0; lps < kNumLenToPosStates; lps++)
{
unsigned slot;
unsigned distTableSize2 = (p->distTableSize + 1) >> 1;
UInt32 *posSlotPrices = p->posSlotPrices[lps];
const CLzmaProb *probs = p->posSlotEncoder[lps];
for (slot = 0; slot < distTableSize2; slot++)
{
// posSlotPrices[slot] = RcTree_GetPrice(encoder, kNumPosSlotBits, slot, p->ProbPrices);
UInt32 price;
unsigned bit;
unsigned sym = slot + (1 << (kNumPosSlotBits - 1));
unsigned prob;
bit = sym & 1; sym >>= 1; price = GET_PRICEa(probs[sym], bit);
bit = sym & 1; sym >>= 1; price += GET_PRICEa(probs[sym], bit);
bit = sym & 1; sym >>= 1; price += GET_PRICEa(probs[sym], bit);
bit = sym & 1; sym >>= 1; price += GET_PRICEa(probs[sym], bit);
bit = sym & 1; sym >>= 1; price += GET_PRICEa(probs[sym], bit);
prob = probs[(size_t)slot + (1 << (kNumPosSlotBits - 1))];
posSlotPrices[(size_t)slot * 2 ] = price + GET_PRICEa_0(prob);
posSlotPrices[(size_t)slot * 2 + 1] = price + GET_PRICEa_1(prob);
}
{
UInt32 delta = ((UInt32)((kEndPosModelIndex / 2 - 1) - kNumAlignBits) << kNumBitPriceShiftBits);
for (slot = kEndPosModelIndex / 2; slot < distTableSize2; slot++)
{
posSlotPrices[(size_t)slot * 2 ] += delta;
posSlotPrices[(size_t)slot * 2 + 1] += delta;
delta += ((UInt32)1 << kNumBitPriceShiftBits);
}
}
{
UInt32 *dp = p->distancesPrices[lps];
dp[0] = posSlotPrices[0];
dp[1] = posSlotPrices[1];
dp[2] = posSlotPrices[2];
dp[3] = posSlotPrices[3];
for (i = 4; i < kNumFullDistances; i += 2)
{
UInt32 slotPrice = posSlotPrices[GetPosSlot1(i)];
dp[i ] = slotPrice + tempPrices[i];
dp[i + 1] = slotPrice + tempPrices[i + 1];
}
}
}
// }
}
static void LzmaEnc_Construct(CLzmaEnc *p)
{
RangeEnc_Construct(&p->rc);
MatchFinder_Construct(&MFB);
#ifndef Z7_ST
p->matchFinderMt.MatchFinder = &MFB;
MatchFinderMt_Construct(&p->matchFinderMt);
#endif
{
CLzmaEncProps props;
LzmaEncProps_Init(&props);
LzmaEnc_SetProps((CLzmaEncHandle)(void *)p, &props);
}
#ifndef LZMA_LOG_BSR
LzmaEnc_FastPosInit(p->g_FastPos);
#endif
LzmaEnc_InitPriceTables(p->ProbPrices);
p->litProbs = NULL;
p->saveState.litProbs = NULL;
}
CLzmaEncHandle LzmaEnc_Create(ISzAllocPtr alloc)
{
void *p;
p = ISzAlloc_Alloc(alloc, sizeof(CLzmaEnc));
if (p)
LzmaEnc_Construct((CLzmaEnc *)p);
return p;
}
static void LzmaEnc_FreeLits(CLzmaEnc *p, ISzAllocPtr alloc)
{
ISzAlloc_Free(alloc, p->litProbs);
ISzAlloc_Free(alloc, p->saveState.litProbs);
p->litProbs = NULL;
p->saveState.litProbs = NULL;
}
static void LzmaEnc_Destruct(CLzmaEnc *p, ISzAllocPtr alloc, ISzAllocPtr allocBig)
{
#ifndef Z7_ST
MatchFinderMt_Destruct(&p->matchFinderMt, allocBig);
#endif
MatchFinder_Free(&MFB, allocBig);
LzmaEnc_FreeLits(p, alloc);
RangeEnc_Free(&p->rc, alloc);
}
void LzmaEnc_Destroy(CLzmaEncHandle p, ISzAllocPtr alloc, ISzAllocPtr allocBig)
{
// GET_CLzmaEnc_p
LzmaEnc_Destruct(p, alloc, allocBig);
ISzAlloc_Free(alloc, p);
}
Z7_NO_INLINE
static SRes LzmaEnc_CodeOneBlock(CLzmaEnc *p, UInt32 maxPackSize, UInt32 maxUnpackSize)
{
UInt32 nowPos32, startPos32;
if (p->needInit)
{
#ifndef Z7_ST
if (p->mtMode)
{
RINOK(MatchFinderMt_InitMt(&p->matchFinderMt))
}
#endif
p->matchFinder.Init(p->matchFinderObj);
p->needInit = 0;
}
if (p->finished)
return p->result;
RINOK(CheckErrors(p))
nowPos32 = (UInt32)p->nowPos64;
startPos32 = nowPos32;
if (p->nowPos64 == 0)
{
unsigned numPairs;
Byte curByte;
if (p->matchFinder.GetNumAvailableBytes(p->matchFinderObj) == 0)
return Flush(p, nowPos32);
ReadMatchDistances(p, &numPairs);
RangeEnc_EncodeBit_0(&p->rc, &p->isMatch[kState_Start][0]);
// p->state = kLiteralNextStates[p->state];
curByte = *(p->matchFinder.GetPointerToCurrentPos(p->matchFinderObj) - p->additionalOffset);
LitEnc_Encode(&p->rc, p->litProbs, curByte);
p->additionalOffset--;
nowPos32++;
}
if (p->matchFinder.GetNumAvailableBytes(p->matchFinderObj) != 0)
for (;;)
{
UInt32 dist;
unsigned len, posState;
UInt32 range, ttt, newBound;
CLzmaProb *probs;
if (p->fastMode)
len = GetOptimumFast(p);
else
{
unsigned oci = p->optCur;
if (p->optEnd == oci)
len = GetOptimum(p, nowPos32);
else
{
const COptimal *opt = &p->opt[oci];
len = opt->len;
p->backRes = opt->dist;
p->optCur = oci + 1;
}
}
posState = (unsigned)nowPos32 & p->pbMask;
range = p->rc.range;
probs = &p->isMatch[p->state][posState];
RC_BIT_PRE(&p->rc, probs)
dist = p->backRes;
#ifdef SHOW_STAT2
printf("\n pos = %6X, len = %3u pos = %6u", nowPos32, len, dist);
#endif
if (dist == MARK_LIT)
{
Byte curByte;
const Byte *data;
unsigned state;
RC_BIT_0(&p->rc, probs)
p->rc.range = range;
data = p->matchFinder.GetPointerToCurrentPos(p->matchFinderObj) - p->additionalOffset;
probs = LIT_PROBS(nowPos32, *(data - 1));
curByte = *data;
state = p->state;
p->state = kLiteralNextStates[state];
if (IsLitState(state))
LitEnc_Encode(&p->rc, probs, curByte);
else
LitEnc_EncodeMatched(&p->rc, probs, curByte, *(data - p->reps[0]));
}
else
{
RC_BIT_1(&p->rc, probs)
probs = &p->isRep[p->state];
RC_BIT_PRE(&p->rc, probs)
if (dist < LZMA_NUM_REPS)
{
RC_BIT_1(&p->rc, probs)
probs = &p->isRepG0[p->state];
RC_BIT_PRE(&p->rc, probs)
if (dist == 0)
{
RC_BIT_0(&p->rc, probs)
probs = &p->isRep0Long[p->state][posState];
RC_BIT_PRE(&p->rc, probs)
if (len != 1)
{
RC_BIT_1_BASE(&p->rc, probs)
}
else
{
RC_BIT_0_BASE(&p->rc, probs)
p->state = kShortRepNextStates[p->state];
}
}
else
{
RC_BIT_1(&p->rc, probs)
probs = &p->isRepG1[p->state];
RC_BIT_PRE(&p->rc, probs)
if (dist == 1)
{
RC_BIT_0_BASE(&p->rc, probs)
dist = p->reps[1];
}
else
{
RC_BIT_1(&p->rc, probs)
probs = &p->isRepG2[p->state];
RC_BIT_PRE(&p->rc, probs)
if (dist == 2)
{
RC_BIT_0_BASE(&p->rc, probs)
dist = p->reps[2];
}
else
{
RC_BIT_1_BASE(&p->rc, probs)
dist = p->reps[3];
p->reps[3] = p->reps[2];
}
p->reps[2] = p->reps[1];
}
p->reps[1] = p->reps[0];
p->reps[0] = dist;
}
RC_NORM(&p->rc)
p->rc.range = range;
if (len != 1)
{
LenEnc_Encode(&p->repLenProbs, &p->rc, len - LZMA_MATCH_LEN_MIN, posState);
--p->repLenEncCounter;
p->state = kRepNextStates[p->state];
}
}
else
{
unsigned posSlot;
RC_BIT_0(&p->rc, probs)
p->rc.range = range;
p->state = kMatchNextStates[p->state];
LenEnc_Encode(&p->lenProbs, &p->rc, len - LZMA_MATCH_LEN_MIN, posState);
// --p->lenEnc.counter;
dist -= LZMA_NUM_REPS;
p->reps[3] = p->reps[2];
p->reps[2] = p->reps[1];
p->reps[1] = p->reps[0];
p->reps[0] = dist + 1;
p->matchPriceCount++;
GetPosSlot(dist, posSlot)
// RcTree_Encode_PosSlot(&p->rc, p->posSlotEncoder[GetLenToPosState(len)], posSlot);
{
UInt32 sym = (UInt32)posSlot + (1 << kNumPosSlotBits);
range = p->rc.range;
probs = p->posSlotEncoder[GetLenToPosState(len)];
do
{
CLzmaProb *prob = probs + (sym >> kNumPosSlotBits);
UInt32 bit = (sym >> (kNumPosSlotBits - 1)) & 1;
sym <<= 1;
RC_BIT(&p->rc, prob, bit)
}
while (sym < (1 << kNumPosSlotBits * 2));
p->rc.range = range;
}
if (dist >= kStartPosModelIndex)
{
unsigned footerBits = ((posSlot >> 1) - 1);
if (dist < kNumFullDistances)
{
unsigned base = ((2 | (posSlot & 1)) << footerBits);
RcTree_ReverseEncode(&p->rc, p->posEncoders + base, footerBits, (unsigned)(dist /* - base */));
}
else
{
UInt32 pos2 = (dist | 0xF) << (32 - footerBits);
range = p->rc.range;
// RangeEnc_EncodeDirectBits(&p->rc, posReduced >> kNumAlignBits, footerBits - kNumAlignBits);
/*
do
{
range >>= 1;
p->rc.low += range & (0 - ((dist >> --footerBits) & 1));
RC_NORM(&p->rc)
}
while (footerBits > kNumAlignBits);
*/
do
{
range >>= 1;
p->rc.low += range & (0 - (pos2 >> 31));
pos2 += pos2;
RC_NORM(&p->rc)
}
while (pos2 != 0xF0000000);
// RcTree_ReverseEncode(&p->rc, p->posAlignEncoder, kNumAlignBits, posReduced & kAlignMask);
{
unsigned m = 1;
unsigned bit;
bit = dist & 1; dist >>= 1; RC_BIT(&p->rc, p->posAlignEncoder + m, bit) m = (m << 1) + bit;
bit = dist & 1; dist >>= 1; RC_BIT(&p->rc, p->posAlignEncoder + m, bit) m = (m << 1) + bit;
bit = dist & 1; dist >>= 1; RC_BIT(&p->rc, p->posAlignEncoder + m, bit) m = (m << 1) + bit;
bit = dist & 1; RC_BIT(&p->rc, p->posAlignEncoder + m, bit)
p->rc.range = range;
// p->alignPriceCount++;
}
}
}
}
}
nowPos32 += (UInt32)len;
p->additionalOffset -= len;
if (p->additionalOffset == 0)
{
UInt32 processed;
if (!p->fastMode)
{
/*
if (p->alignPriceCount >= 16) // kAlignTableSize
FillAlignPrices(p);
if (p->matchPriceCount >= 128)
FillDistancesPrices(p);
if (p->lenEnc.counter <= 0)
LenPriceEnc_UpdateTables(&p->lenEnc, 1 << p->pb, &p->lenProbs, p->ProbPrices);
*/
if (p->matchPriceCount >= 64)
{
FillAlignPrices(p);
// { int y; for (y = 0; y < 100; y++) {
FillDistancesPrices(p);
// }}
LenPriceEnc_UpdateTables(&p->lenEnc, (unsigned)1 << p->pb, &p->lenProbs, p->ProbPrices);
}
if (p->repLenEncCounter <= 0)
{
p->repLenEncCounter = REP_LEN_COUNT;
LenPriceEnc_UpdateTables(&p->repLenEnc, (unsigned)1 << p->pb, &p->repLenProbs, p->ProbPrices);
}
}
if (p->matchFinder.GetNumAvailableBytes(p->matchFinderObj) == 0)
break;
processed = nowPos32 - startPos32;
if (maxPackSize)
{
if (processed + kNumOpts + 300 >= maxUnpackSize
|| RangeEnc_GetProcessed_sizet(&p->rc) + kPackReserve >= maxPackSize)
break;
}
else if (processed >= (1 << 17))
{
p->nowPos64 += nowPos32 - startPos32;
return CheckErrors(p);
}
}
}
p->nowPos64 += nowPos32 - startPos32;
return Flush(p, nowPos32);
}
#define kBigHashDicLimit ((UInt32)1 << 24)
static SRes LzmaEnc_Alloc(CLzmaEnc *p, UInt32 keepWindowSize, ISzAllocPtr alloc, ISzAllocPtr allocBig)
{
UInt32 beforeSize = kNumOpts;
UInt32 dictSize;
if (!RangeEnc_Alloc(&p->rc, alloc))
return SZ_ERROR_MEM;
#ifndef Z7_ST
p->mtMode = (p->multiThread && !p->fastMode && (MFB.btMode != 0));
#endif
{
unsigned lclp = p->lc + p->lp;
if (!p->litProbs || !p->saveState.litProbs || p->lclp != lclp)
{
LzmaEnc_FreeLits(p, alloc);
p->litProbs = (CLzmaProb *)ISzAlloc_Alloc(alloc, ((UInt32)0x300 << lclp) * sizeof(CLzmaProb));
p->saveState.litProbs = (CLzmaProb *)ISzAlloc_Alloc(alloc, ((UInt32)0x300 << lclp) * sizeof(CLzmaProb));
if (!p->litProbs || !p->saveState.litProbs)
{
LzmaEnc_FreeLits(p, alloc);
return SZ_ERROR_MEM;
}
p->lclp = lclp;
}
}
MFB.bigHash = (Byte)(p->dictSize > kBigHashDicLimit ? 1 : 0);
dictSize = p->dictSize;
if (dictSize == ((UInt32)2 << 30) ||
dictSize == ((UInt32)3 << 30))
{
/* 21.03 : here we reduce the dictionary for 2 reasons:
1) we don't want 32-bit back_distance matches in decoder for 2 GB dictionary.
2) we want to elimate useless last MatchFinder_Normalize3() for corner cases,
where data size is aligned for 1 GB: 5/6/8 GB.
That reducing must be >= 1 for such corner cases. */
dictSize -= 1;
}
if (beforeSize + dictSize < keepWindowSize)
beforeSize = keepWindowSize - dictSize;
/* in worst case we can look ahead for
max(LZMA_MATCH_LEN_MAX, numFastBytes + 1 + numFastBytes) bytes.
we send larger value for (keepAfter) to MantchFinder_Create():
(numFastBytes + LZMA_MATCH_LEN_MAX + 1)
*/
#ifndef Z7_ST
if (p->mtMode)
{
RINOK(MatchFinderMt_Create(&p->matchFinderMt, dictSize, beforeSize,
p->numFastBytes, LZMA_MATCH_LEN_MAX + 1 /* 18.04 */
, allocBig))
p->matchFinderObj = &p->matchFinderMt;
MFB.bigHash = (Byte)(MFB.hashMask >= 0xFFFFFF ? 1 : 0);
MatchFinderMt_CreateVTable(&p->matchFinderMt, &p->matchFinder);
}
else
#endif
{
if (!MatchFinder_Create(&MFB, dictSize, beforeSize,
p->numFastBytes, LZMA_MATCH_LEN_MAX + 1 /* 21.03 */
, allocBig))
return SZ_ERROR_MEM;
p->matchFinderObj = &MFB;
MatchFinder_CreateVTable(&MFB, &p->matchFinder);
}
return SZ_OK;
}
static void LzmaEnc_Init(CLzmaEnc *p)
{
unsigned i;
p->state = 0;
p->reps[0] =
p->reps[1] =
p->reps[2] =
p->reps[3] = 1;
RangeEnc_Init(&p->rc);
for (i = 0; i < (1 << kNumAlignBits); i++)
p->posAlignEncoder[i] = kProbInitValue;
for (i = 0; i < kNumStates; i++)
{
unsigned j;
for (j = 0; j < LZMA_NUM_PB_STATES_MAX; j++)
{
p->isMatch[i][j] = kProbInitValue;
p->isRep0Long[i][j] = kProbInitValue;
}
p->isRep[i] = kProbInitValue;
p->isRepG0[i] = kProbInitValue;
p->isRepG1[i] = kProbInitValue;
p->isRepG2[i] = kProbInitValue;
}
{
for (i = 0; i < kNumLenToPosStates; i++)
{
CLzmaProb *probs = p->posSlotEncoder[i];
unsigned j;
for (j = 0; j < (1 << kNumPosSlotBits); j++)
probs[j] = kProbInitValue;
}
}
{
for (i = 0; i < kNumFullDistances; i++)
p->posEncoders[i] = kProbInitValue;
}
{
UInt32 num = (UInt32)0x300 << (p->lp + p->lc);
UInt32 k;
CLzmaProb *probs = p->litProbs;
for (k = 0; k < num; k++)
probs[k] = kProbInitValue;
}
LenEnc_Init(&p->lenProbs);
LenEnc_Init(&p->repLenProbs);
p->optEnd = 0;
p->optCur = 0;
{
for (i = 0; i < kNumOpts; i++)
p->opt[i].price = kInfinityPrice;
}
p->additionalOffset = 0;
p->pbMask = ((unsigned)1 << p->pb) - 1;
p->lpMask = ((UInt32)0x100 << p->lp) - ((unsigned)0x100 >> p->lc);
// p->mf_Failure = False;
}
static void LzmaEnc_InitPrices(CLzmaEnc *p)
{
if (!p->fastMode)
{
FillDistancesPrices(p);
FillAlignPrices(p);
}
p->lenEnc.tableSize =
p->repLenEnc.tableSize =
p->numFastBytes + 1 - LZMA_MATCH_LEN_MIN;
p->repLenEncCounter = REP_LEN_COUNT;
LenPriceEnc_UpdateTables(&p->lenEnc, (unsigned)1 << p->pb, &p->lenProbs, p->ProbPrices);
LenPriceEnc_UpdateTables(&p->repLenEnc, (unsigned)1 << p->pb, &p->repLenProbs, p->ProbPrices);
}
static SRes LzmaEnc_AllocAndInit(CLzmaEnc *p, UInt32 keepWindowSize, ISzAllocPtr alloc, ISzAllocPtr allocBig)
{
unsigned i;
for (i = kEndPosModelIndex / 2; i < kDicLogSizeMax; i++)
if (p->dictSize <= ((UInt32)1 << i))
break;
p->distTableSize = i * 2;
p->finished = False;
p->result = SZ_OK;
p->nowPos64 = 0;
p->needInit = 1;
RINOK(LzmaEnc_Alloc(p, keepWindowSize, alloc, allocBig))
LzmaEnc_Init(p);
LzmaEnc_InitPrices(p);
return SZ_OK;
}
static SRes LzmaEnc_Prepare(CLzmaEncHandle p,
ISeqOutStreamPtr outStream,
ISeqInStreamPtr inStream,
ISzAllocPtr alloc, ISzAllocPtr allocBig)
{
// GET_CLzmaEnc_p
MatchFinder_SET_STREAM(&MFB, inStream)
p->rc.outStream = outStream;
return LzmaEnc_AllocAndInit(p, 0, alloc, allocBig);
}
SRes LzmaEnc_PrepareForLzma2(CLzmaEncHandle p,
ISeqInStreamPtr inStream, UInt32 keepWindowSize,
ISzAllocPtr alloc, ISzAllocPtr allocBig)
{
// GET_CLzmaEnc_p
MatchFinder_SET_STREAM(&MFB, inStream)
return LzmaEnc_AllocAndInit(p, keepWindowSize, alloc, allocBig);
}
SRes LzmaEnc_MemPrepare(CLzmaEncHandle p,
const Byte *src, SizeT srcLen,
UInt32 keepWindowSize,
ISzAllocPtr alloc, ISzAllocPtr allocBig)
{
// GET_CLzmaEnc_p
MatchFinder_SET_DIRECT_INPUT_BUF(&MFB, src, srcLen)
LzmaEnc_SetDataSize(p, srcLen);
return LzmaEnc_AllocAndInit(p, keepWindowSize, alloc, allocBig);
}
void LzmaEnc_Finish(CLzmaEncHandle p)
{
#ifndef Z7_ST
// GET_CLzmaEnc_p
if (p->mtMode)
MatchFinderMt_ReleaseStream(&p->matchFinderMt);
#else
UNUSED_VAR(p)
#endif
}
typedef struct
{
ISeqOutStream vt;
Byte *data;
size_t rem;
BoolInt overflow;
} CLzmaEnc_SeqOutStreamBuf;
static size_t SeqOutStreamBuf_Write(ISeqOutStreamPtr pp, const void *data, size_t size)
{
Z7_CONTAINER_FROM_VTBL_TO_DECL_VAR_pp_vt_p(CLzmaEnc_SeqOutStreamBuf)
if (p->rem < size)
{
size = p->rem;
p->overflow = True;
}
if (size != 0)
{
memcpy(p->data, data, size);
p->rem -= size;
p->data += size;
}
return size;
}
/*
UInt32 LzmaEnc_GetNumAvailableBytes(CLzmaEncHandle p)
{
GET_const_CLzmaEnc_p
return p->matchFinder.GetNumAvailableBytes(p->matchFinderObj);
}
*/
const Byte *LzmaEnc_GetCurBuf(CLzmaEncHandle p)
{
// GET_const_CLzmaEnc_p
return p->matchFinder.GetPointerToCurrentPos(p->matchFinderObj) - p->additionalOffset;
}
// (desiredPackSize == 0) is not allowed
SRes LzmaEnc_CodeOneMemBlock(CLzmaEncHandle p, BoolInt reInit,
Byte *dest, size_t *destLen, UInt32 desiredPackSize, UInt32 *unpackSize)
{
// GET_CLzmaEnc_p
UInt64 nowPos64;
SRes res;
CLzmaEnc_SeqOutStreamBuf outStream;
outStream.vt.Write = SeqOutStreamBuf_Write;
outStream.data = dest;
outStream.rem = *destLen;
outStream.overflow = False;
p->writeEndMark = False;
p->finished = False;
p->result = SZ_OK;
if (reInit)
LzmaEnc_Init(p);
LzmaEnc_InitPrices(p);
RangeEnc_Init(&p->rc);
p->rc.outStream = &outStream.vt;
nowPos64 = p->nowPos64;
res = LzmaEnc_CodeOneBlock(p, desiredPackSize, *unpackSize);
*unpackSize = (UInt32)(p->nowPos64 - nowPos64);
*destLen -= outStream.rem;
if (outStream.overflow)
return SZ_ERROR_OUTPUT_EOF;
return res;
}
Z7_NO_INLINE
static SRes LzmaEnc_Encode2(CLzmaEnc *p, ICompressProgressPtr progress)
{
SRes res = SZ_OK;
#ifndef Z7_ST
Byte allocaDummy[0x300];
allocaDummy[0] = 0;
allocaDummy[1] = allocaDummy[0];
#endif
for (;;)
{
res = LzmaEnc_CodeOneBlock(p, 0, 0);
if (res != SZ_OK || p->finished)
break;
if (progress)
{
res = ICompressProgress_Progress(progress, p->nowPos64, RangeEnc_GetProcessed(&p->rc));
if (res != SZ_OK)
{
res = SZ_ERROR_PROGRESS;
break;
}
}
}
LzmaEnc_Finish((CLzmaEncHandle)(void *)p);
/*
if (res == SZ_OK && !Inline_MatchFinder_IsFinishedOK(&MFB))
res = SZ_ERROR_FAIL;
}
*/
return res;
}
SRes LzmaEnc_Encode(CLzmaEncHandle p, ISeqOutStreamPtr outStream, ISeqInStreamPtr inStream, ICompressProgressPtr progress,
ISzAllocPtr alloc, ISzAllocPtr allocBig)
{
// GET_CLzmaEnc_p
RINOK(LzmaEnc_Prepare(p, outStream, inStream, alloc, allocBig))
return LzmaEnc_Encode2(p, progress);
}
SRes LzmaEnc_WriteProperties(CLzmaEncHandle p, Byte *props, SizeT *size)
{
if (*size < LZMA_PROPS_SIZE)
return SZ_ERROR_PARAM;
*size = LZMA_PROPS_SIZE;
{
// GET_CLzmaEnc_p
const UInt32 dictSize = p->dictSize;
UInt32 v;
props[0] = (Byte)((p->pb * 5 + p->lp) * 9 + p->lc);
// we write aligned dictionary value to properties for lzma decoder
if (dictSize >= ((UInt32)1 << 21))
{
const UInt32 kDictMask = ((UInt32)1 << 20) - 1;
v = (dictSize + kDictMask) & ~kDictMask;
if (v < dictSize)
v = dictSize;
}
else
{
unsigned i = 11 * 2;
do
{
v = (UInt32)(2 + (i & 1)) << (i >> 1);
i++;
}
while (v < dictSize);
}
SetUi32(props + 1, v)
return SZ_OK;
}
}
unsigned LzmaEnc_IsWriteEndMark(CLzmaEncHandle p)
{
// GET_CLzmaEnc_p
return (unsigned)p->writeEndMark;
}
SRes LzmaEnc_MemEncode(CLzmaEncHandle p, Byte *dest, SizeT *destLen, const Byte *src, SizeT srcLen,
int writeEndMark, ICompressProgressPtr progress, ISzAllocPtr alloc, ISzAllocPtr allocBig)
{
SRes res;
// GET_CLzmaEnc_p
CLzmaEnc_SeqOutStreamBuf outStream;
outStream.vt.Write = SeqOutStreamBuf_Write;
outStream.data = dest;
outStream.rem = *destLen;
outStream.overflow = False;
p->writeEndMark = writeEndMark;
p->rc.outStream = &outStream.vt;
res = LzmaEnc_MemPrepare(p, src, srcLen, 0, alloc, allocBig);
if (res == SZ_OK)
{
res = LzmaEnc_Encode2(p, progress);
if (res == SZ_OK && p->nowPos64 != srcLen)
res = SZ_ERROR_FAIL;
}
*destLen -= (SizeT)outStream.rem;
if (outStream.overflow)
return SZ_ERROR_OUTPUT_EOF;
return res;
}
SRes LzmaEncode(Byte *dest, SizeT *destLen, const Byte *src, SizeT srcLen,
const CLzmaEncProps *props, Byte *propsEncoded, SizeT *propsSize, int writeEndMark,
ICompressProgressPtr progress, ISzAllocPtr alloc, ISzAllocPtr allocBig)
{
CLzmaEncHandle p = LzmaEnc_Create(alloc);
SRes res;
if (!p)
return SZ_ERROR_MEM;
res = LzmaEnc_SetProps(p, props);
if (res == SZ_OK)
{
res = LzmaEnc_WriteProperties(p, propsEncoded, propsSize);
if (res == SZ_OK)
res = LzmaEnc_MemEncode(p, dest, destLen, src, srcLen,
writeEndMark, progress, alloc, allocBig);
}
LzmaEnc_Destroy(p, alloc, allocBig);
return res;
}
/*
#ifndef Z7_ST
void LzmaEnc_GetLzThreads(CLzmaEncHandle p, HANDLE lz_threads[2])
{
GET_const_CLzmaEnc_p
lz_threads[0] = p->matchFinderMt.hashSync.thread;
lz_threads[1] = p->matchFinderMt.btSync.thread;
}
#endif
*/

83
extern/lzma/LzmaEnc.h vendored
View File

@@ -1,83 +0,0 @@
/* LzmaEnc.h -- LZMA Encoder
2023-04-13 : Igor Pavlov : Public domain */
#ifndef ZIP7_INC_LZMA_ENC_H
#define ZIP7_INC_LZMA_ENC_H
#include "7zTypes.h"
EXTERN_C_BEGIN
#define LZMA_PROPS_SIZE 5
typedef struct
{
int level; /* 0 <= level <= 9 */
UInt32 dictSize; /* (1 << 12) <= dictSize <= (1 << 27) for 32-bit version
(1 << 12) <= dictSize <= (3 << 29) for 64-bit version
default = (1 << 24) */
int lc; /* 0 <= lc <= 8, default = 3 */
int lp; /* 0 <= lp <= 4, default = 0 */
int pb; /* 0 <= pb <= 4, default = 2 */
int algo; /* 0 - fast, 1 - normal, default = 1 */
int fb; /* 5 <= fb <= 273, default = 32 */
int btMode; /* 0 - hashChain Mode, 1 - binTree mode - normal, default = 1 */
int numHashBytes; /* 2, 3 or 4, default = 4 */
unsigned numHashOutBits; /* default = ? */
UInt32 mc; /* 1 <= mc <= (1 << 30), default = 32 */
unsigned writeEndMark; /* 0 - do not write EOPM, 1 - write EOPM, default = 0 */
int numThreads; /* 1 or 2, default = 2 */
// int _pad;
UInt64 reduceSize; /* estimated size of data that will be compressed. default = (UInt64)(Int64)-1.
Encoder uses this value to reduce dictionary size */
UInt64 affinity;
} CLzmaEncProps;
void LzmaEncProps_Init(CLzmaEncProps *p);
void LzmaEncProps_Normalize(CLzmaEncProps *p);
UInt32 LzmaEncProps_GetDictSize(const CLzmaEncProps *props2);
/* ---------- CLzmaEncHandle Interface ---------- */
/* LzmaEnc* functions can return the following exit codes:
SRes:
SZ_OK - OK
SZ_ERROR_MEM - Memory allocation error
SZ_ERROR_PARAM - Incorrect paramater in props
SZ_ERROR_WRITE - ISeqOutStream write callback error
SZ_ERROR_OUTPUT_EOF - output buffer overflow - version with (Byte *) output
SZ_ERROR_PROGRESS - some break from progress callback
SZ_ERROR_THREAD - error in multithreading functions (only for Mt version)
*/
typedef struct CLzmaEnc CLzmaEnc;
typedef CLzmaEnc * CLzmaEncHandle;
// Z7_DECLARE_HANDLE(CLzmaEncHandle)
CLzmaEncHandle LzmaEnc_Create(ISzAllocPtr alloc);
void LzmaEnc_Destroy(CLzmaEncHandle p, ISzAllocPtr alloc, ISzAllocPtr allocBig);
SRes LzmaEnc_SetProps(CLzmaEncHandle p, const CLzmaEncProps *props);
void LzmaEnc_SetDataSize(CLzmaEncHandle p, UInt64 expectedDataSiize);
SRes LzmaEnc_WriteProperties(CLzmaEncHandle p, Byte *properties, SizeT *size);
unsigned LzmaEnc_IsWriteEndMark(CLzmaEncHandle p);
SRes LzmaEnc_Encode(CLzmaEncHandle p, ISeqOutStreamPtr outStream, ISeqInStreamPtr inStream,
ICompressProgressPtr progress, ISzAllocPtr alloc, ISzAllocPtr allocBig);
SRes LzmaEnc_MemEncode(CLzmaEncHandle p, Byte *dest, SizeT *destLen, const Byte *src, SizeT srcLen,
int writeEndMark, ICompressProgressPtr progress, ISzAllocPtr alloc, ISzAllocPtr allocBig);
/* ---------- One Call Interface ---------- */
SRes LzmaEncode(Byte *dest, SizeT *destLen, const Byte *src, SizeT srcLen,
const CLzmaEncProps *props, Byte *propsEncoded, SizeT *propsSize, int writeEndMark,
ICompressProgressPtr progress, ISzAllocPtr alloc, ISzAllocPtr allocBig);
EXTERN_C_END
#endif

42
extern/lzma/LzmaLib.c vendored
View File

@@ -1,42 +0,0 @@
/* LzmaLib.c -- LZMA library wrapper
2023-04-02 : Igor Pavlov : Public domain */
#include "Precomp.h"
#include "Alloc.h"
#include "LzmaDec.h"
#include "LzmaEnc.h"
#include "LzmaLib.h"
Z7_STDAPI LzmaCompress(unsigned char *dest, size_t *destLen, const unsigned char *src, size_t srcLen,
unsigned char *outProps, size_t *outPropsSize,
int level, /* 0 <= level <= 9, default = 5 */
unsigned dictSize, /* use (1 << N) or (3 << N). 4 KB < dictSize <= 128 MB */
int lc, /* 0 <= lc <= 8, default = 3 */
int lp, /* 0 <= lp <= 4, default = 0 */
int pb, /* 0 <= pb <= 4, default = 2 */
int fb, /* 5 <= fb <= 273, default = 32 */
int numThreads /* 1 or 2, default = 2 */
)
{
CLzmaEncProps props;
LzmaEncProps_Init(&props);
props.level = level;
props.dictSize = dictSize;
props.lc = lc;
props.lp = lp;
props.pb = pb;
props.fb = fb;
props.numThreads = numThreads;
return LzmaEncode(dest, destLen, src, srcLen, &props, outProps, outPropsSize, 0,
NULL, &g_Alloc, &g_Alloc);
}
Z7_STDAPI LzmaUncompress(unsigned char *dest, size_t *destLen, const unsigned char *src, size_t *srcLen,
const unsigned char *props, size_t propsSize)
{
ELzmaStatus status;
return LzmaDecode(dest, destLen, src, srcLen, props, (unsigned)propsSize, LZMA_FINISH_ANY, &status, &g_Alloc);
}

138
extern/lzma/LzmaLib.h vendored
View File

@@ -1,138 +0,0 @@
/* LzmaLib.h -- LZMA library interface
2023-04-02 : Igor Pavlov : Public domain */
#ifndef ZIP7_INC_LZMA_LIB_H
#define ZIP7_INC_LZMA_LIB_H
#include "7zTypes.h"
EXTERN_C_BEGIN
#define Z7_STDAPI int Z7_STDCALL
#define LZMA_PROPS_SIZE 5
/*
RAM requirements for LZMA:
for compression: (dictSize * 11.5 + 6 MB) + state_size
for decompression: dictSize + state_size
state_size = (4 + (1.5 << (lc + lp))) KB
by default (lc=3, lp=0), state_size = 16 KB.
LZMA properties (5 bytes) format
Offset Size Description
0 1 lc, lp and pb in encoded form.
1 4 dictSize (little endian).
*/
/*
LzmaCompress
------------
outPropsSize -
In: the pointer to the size of outProps buffer; *outPropsSize = LZMA_PROPS_SIZE = 5.
Out: the pointer to the size of written properties in outProps buffer; *outPropsSize = LZMA_PROPS_SIZE = 5.
LZMA Encoder will use defult values for any parameter, if it is
-1 for any from: level, loc, lp, pb, fb, numThreads
0 for dictSize
level - compression level: 0 <= level <= 9;
level dictSize algo fb
0: 64 KB 0 32
1: 256 KB 0 32
2: 1 MB 0 32
3: 4 MB 0 32
4: 16 MB 0 32
5: 16 MB 1 32
6: 32 MB 1 32
7: 32 MB 1 64
8: 64 MB 1 64
9: 64 MB 1 64
The default value for "level" is 5.
algo = 0 means fast method
algo = 1 means normal method
dictSize - The dictionary size in bytes. The maximum value is
128 MB = (1 << 27) bytes for 32-bit version
1 GB = (1 << 30) bytes for 64-bit version
The default value is 16 MB = (1 << 24) bytes.
It's recommended to use the dictionary that is larger than 4 KB and
that can be calculated as (1 << N) or (3 << N) sizes.
lc - The number of literal context bits (high bits of previous literal).
It can be in the range from 0 to 8. The default value is 3.
Sometimes lc=4 gives the gain for big files.
lp - The number of literal pos bits (low bits of current position for literals).
It can be in the range from 0 to 4. The default value is 0.
The lp switch is intended for periodical data when the period is equal to 2^lp.
For example, for 32-bit (4 bytes) periodical data you can use lp=2. Often it's
better to set lc=0, if you change lp switch.
pb - The number of pos bits (low bits of current position).
It can be in the range from 0 to 4. The default value is 2.
The pb switch is intended for periodical data when the period is equal 2^pb.
fb - Word size (the number of fast bytes).
It can be in the range from 5 to 273. The default value is 32.
Usually, a big number gives a little bit better compression ratio and
slower compression process.
numThreads - The number of thereads. 1 or 2. The default value is 2.
Fast mode (algo = 0) can use only 1 thread.
In:
dest - output data buffer
destLen - output data buffer size
src - input data
srcLen - input data size
Out:
destLen - processed output size
Returns:
SZ_OK - OK
SZ_ERROR_MEM - Memory allocation error
SZ_ERROR_PARAM - Incorrect paramater
SZ_ERROR_OUTPUT_EOF - output buffer overflow
SZ_ERROR_THREAD - errors in multithreading functions (only for Mt version)
*/
Z7_STDAPI LzmaCompress(unsigned char *dest, size_t *destLen, const unsigned char *src, size_t srcLen,
unsigned char *outProps, size_t *outPropsSize, /* *outPropsSize must be = 5 */
int level, /* 0 <= level <= 9, default = 5 */
unsigned dictSize, /* default = (1 << 24) */
int lc, /* 0 <= lc <= 8, default = 3 */
int lp, /* 0 <= lp <= 4, default = 0 */
int pb, /* 0 <= pb <= 4, default = 2 */
int fb, /* 5 <= fb <= 273, default = 32 */
int numThreads /* 1 or 2, default = 2 */
);
/*
LzmaUncompress
--------------
In:
dest - output data buffer
destLen - output data buffer size
src - input data
srcLen - input data size
Out:
destLen - processed output size
srcLen - processed input size
Returns:
SZ_OK - OK
SZ_ERROR_DATA - Data error
SZ_ERROR_MEM - Memory allocation arror
SZ_ERROR_UNSUPPORTED - Unsupported properties
SZ_ERROR_INPUT_EOF - it needs more bytes in input buffer (src)
*/
Z7_STDAPI LzmaUncompress(unsigned char *dest, size_t *destLen, const unsigned char *src, SizeT *srcLen,
const unsigned char *props, size_t propsSize);
EXTERN_C_END
#endif

10
extern/lzma/Precomp.h vendored
View File

@@ -1,10 +0,0 @@
/* Precomp.h -- StdAfx
2023-04-02 : Igor Pavlov : Public domain */
#ifndef ZIP7_INC_PRECOMP_H
#define ZIP7_INC_PRECOMP_H
#include "Compiler.h"
/* #include "7zTypes.h" */
#endif

View File

@@ -1,9 +0,0 @@
Project: LZMA SDK
URL: https://www.7-zip.org/sdk.html
License: Public Domain
Upstream version: 23.01
Local modifications:
* Update LzFind.c to find the correct NEON header when using clang-cl + Windows ARM64
- Took only files needed for Blender: C source for raw LZMA1 encoder/decoder.
- CMakeLists.txt is made for Blender codebase

562
extern/lzma/Threads.c vendored
View File

@@ -1,562 +0,0 @@
/* Threads.c -- multithreading library
2023-03-04 : Igor Pavlov : Public domain */
#include "Precomp.h"
#ifdef _WIN32
#ifndef USE_THREADS_CreateThread
#include <process.h>
#endif
#include "Threads.h"
static WRes GetError(void)
{
const DWORD res = GetLastError();
return res ? (WRes)res : 1;
}
static WRes HandleToWRes(HANDLE h) { return (h != NULL) ? 0 : GetError(); }
static WRes BOOLToWRes(BOOL v) { return v ? 0 : GetError(); }
WRes HandlePtr_Close(HANDLE *p)
{
if (*p != NULL)
{
if (!CloseHandle(*p))
return GetError();
*p = NULL;
}
return 0;
}
WRes Handle_WaitObject(HANDLE h)
{
DWORD dw = WaitForSingleObject(h, INFINITE);
/*
(dw) result:
WAIT_OBJECT_0 // 0
WAIT_ABANDONED // 0x00000080 : is not compatible with Win32 Error space
WAIT_TIMEOUT // 0x00000102 : is compatible with Win32 Error space
WAIT_FAILED // 0xFFFFFFFF
*/
if (dw == WAIT_FAILED)
{
dw = GetLastError();
if (dw == 0)
return WAIT_FAILED;
}
return (WRes)dw;
}
#define Thread_Wait(p) Handle_WaitObject(*(p))
WRes Thread_Wait_Close(CThread *p)
{
WRes res = Thread_Wait(p);
WRes res2 = Thread_Close(p);
return (res != 0 ? res : res2);
}
WRes Thread_Create(CThread *p, THREAD_FUNC_TYPE func, LPVOID param)
{
/* Windows Me/98/95: threadId parameter may not be NULL in _beginthreadex/CreateThread functions */
#ifdef USE_THREADS_CreateThread
DWORD threadId;
*p = CreateThread(NULL, 0, func, param, 0, &threadId);
#else
unsigned threadId;
*p = (HANDLE)(_beginthreadex(NULL, 0, func, param, 0, &threadId));
#endif
/* maybe we must use errno here, but probably GetLastError() is also OK. */
return HandleToWRes(*p);
}
WRes Thread_Create_With_Affinity(CThread *p, THREAD_FUNC_TYPE func, LPVOID param, CAffinityMask affinity)
{
#ifdef USE_THREADS_CreateThread
UNUSED_VAR(affinity)
return Thread_Create(p, func, param);
#else
/* Windows Me/98/95: threadId parameter may not be NULL in _beginthreadex/CreateThread functions */
HANDLE h;
WRes wres;
unsigned threadId;
h = (HANDLE)(_beginthreadex(NULL, 0, func, param, CREATE_SUSPENDED, &threadId));
*p = h;
wres = HandleToWRes(h);
if (h)
{
{
// DWORD_PTR prevMask =
SetThreadAffinityMask(h, (DWORD_PTR)affinity);
/*
if (prevMask == 0)
{
// affinity change is non-critical error, so we can ignore it
// wres = GetError();
}
*/
}
{
DWORD prevSuspendCount = ResumeThread(h);
/* ResumeThread() returns:
0 : was_not_suspended
1 : was_resumed
-1 : error
*/
if (prevSuspendCount == (DWORD)-1)
wres = GetError();
}
}
/* maybe we must use errno here, but probably GetLastError() is also OK. */
return wres;
#endif
}
static WRes Event_Create(CEvent *p, BOOL manualReset, int signaled)
{
*p = CreateEvent(NULL, manualReset, (signaled ? TRUE : FALSE), NULL);
return HandleToWRes(*p);
}
WRes Event_Set(CEvent *p) { return BOOLToWRes(SetEvent(*p)); }
WRes Event_Reset(CEvent *p) { return BOOLToWRes(ResetEvent(*p)); }
WRes ManualResetEvent_Create(CManualResetEvent *p, int signaled) { return Event_Create(p, TRUE, signaled); }
WRes AutoResetEvent_Create(CAutoResetEvent *p, int signaled) { return Event_Create(p, FALSE, signaled); }
WRes ManualResetEvent_CreateNotSignaled(CManualResetEvent *p) { return ManualResetEvent_Create(p, 0); }
WRes AutoResetEvent_CreateNotSignaled(CAutoResetEvent *p) { return AutoResetEvent_Create(p, 0); }
WRes Semaphore_Create(CSemaphore *p, UInt32 initCount, UInt32 maxCount)
{
// negative ((LONG)maxCount) is not supported in WIN32::CreateSemaphore()
*p = CreateSemaphore(NULL, (LONG)initCount, (LONG)maxCount, NULL);
return HandleToWRes(*p);
}
WRes Semaphore_OptCreateInit(CSemaphore *p, UInt32 initCount, UInt32 maxCount)
{
// if (Semaphore_IsCreated(p))
{
WRes wres = Semaphore_Close(p);
if (wres != 0)
return wres;
}
return Semaphore_Create(p, initCount, maxCount);
}
static WRes Semaphore_Release(CSemaphore *p, LONG releaseCount, LONG *previousCount)
{ return BOOLToWRes(ReleaseSemaphore(*p, releaseCount, previousCount)); }
WRes Semaphore_ReleaseN(CSemaphore *p, UInt32 num)
{ return Semaphore_Release(p, (LONG)num, NULL); }
WRes Semaphore_Release1(CSemaphore *p) { return Semaphore_ReleaseN(p, 1); }
WRes CriticalSection_Init(CCriticalSection *p)
{
/* InitializeCriticalSection() can raise exception:
Windows XP, 2003 : can raise a STATUS_NO_MEMORY exception
Windows Vista+ : no exceptions */
#ifdef _MSC_VER
#ifdef __clang__
#pragma GCC diagnostic ignored "-Wlanguage-extension-token"
#endif
__try
#endif
{
InitializeCriticalSection(p);
/* InitializeCriticalSectionAndSpinCount(p, 0); */
}
#ifdef _MSC_VER
__except (EXCEPTION_EXECUTE_HANDLER) { return ERROR_NOT_ENOUGH_MEMORY; }
#endif
return 0;
}
#else // _WIN32
// ---------- POSIX ----------
#ifndef __APPLE__
#ifndef Z7_AFFINITY_DISABLE
// _GNU_SOURCE can be required for pthread_setaffinity_np() / CPU_ZERO / CPU_SET
// clang < 3.6 : unknown warning group '-Wreserved-id-macro'
// clang 3.6 - 12.01 : gives warning "macro name is a reserved identifier"
// clang >= 13 : do not give warning
#if !defined(_GNU_SOURCE)
#if defined(__clang__) && (__clang_major__ >= 4) && (__clang_major__ <= 12)
#pragma GCC diagnostic ignored "-Wreserved-id-macro"
#endif
#define _GNU_SOURCE
#endif // !defined(_GNU_SOURCE)
#endif // Z7_AFFINITY_DISABLE
#endif // __APPLE__
#include "Threads.h"
#include <errno.h>
#include <stdlib.h>
#include <string.h>
#ifdef Z7_AFFINITY_SUPPORTED
// #include <sched.h>
#endif
// #include <stdio.h>
// #define PRF(p) p
#define PRF(p)
#define Print(s) PRF(printf("\n%s\n", s);)
WRes Thread_Create_With_CpuSet(CThread *p, THREAD_FUNC_TYPE func, LPVOID param, const CCpuSet *cpuSet)
{
// new thread in Posix probably inherits affinity from parrent thread
Print("Thread_Create_With_CpuSet")
pthread_attr_t attr;
int ret;
// int ret2;
p->_created = 0;
RINOK(pthread_attr_init(&attr))
ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE);
if (!ret)
{
if (cpuSet)
{
#ifdef Z7_AFFINITY_SUPPORTED
/*
printf("\n affinity :");
unsigned i;
for (i = 0; i < sizeof(*cpuSet) && i < 8; i++)
{
Byte b = *((const Byte *)cpuSet + i);
char temp[32];
#define GET_HEX_CHAR(t) ((char)(((t < 10) ? ('0' + t) : ('A' + (t - 10)))))
temp[0] = GET_HEX_CHAR((b & 0xF));
temp[1] = GET_HEX_CHAR((b >> 4));
// temp[0] = GET_HEX_CHAR((b >> 4)); // big-endian
// temp[1] = GET_HEX_CHAR((b & 0xF)); // big-endian
temp[2] = 0;
printf("%s", temp);
}
printf("\n");
*/
// ret2 =
pthread_attr_setaffinity_np(&attr, sizeof(*cpuSet), cpuSet);
// if (ret2) ret = ret2;
#endif
}
ret = pthread_create(&p->_tid, &attr, func, param);
if (!ret)
{
p->_created = 1;
/*
if (cpuSet)
{
// ret2 =
pthread_setaffinity_np(p->_tid, sizeof(*cpuSet), cpuSet);
// if (ret2) ret = ret2;
}
*/
}
}
// ret2 =
pthread_attr_destroy(&attr);
// if (ret2 != 0) ret = ret2;
return ret;
}
WRes Thread_Create(CThread *p, THREAD_FUNC_TYPE func, LPVOID param)
{
return Thread_Create_With_CpuSet(p, func, param, NULL);
}
WRes Thread_Create_With_Affinity(CThread *p, THREAD_FUNC_TYPE func, LPVOID param, CAffinityMask affinity)
{
Print("Thread_Create_WithAffinity")
CCpuSet cs;
unsigned i;
CpuSet_Zero(&cs);
for (i = 0; i < sizeof(affinity) * 8; i++)
{
if (affinity == 0)
break;
if (affinity & 1)
{
CpuSet_Set(&cs, i);
}
affinity >>= 1;
}
return Thread_Create_With_CpuSet(p, func, param, &cs);
}
WRes Thread_Close(CThread *p)
{
// Print("Thread_Close")
int ret;
if (!p->_created)
return 0;
ret = pthread_detach(p->_tid);
p->_tid = 0;
p->_created = 0;
return ret;
}
WRes Thread_Wait_Close(CThread *p)
{
// Print("Thread_Wait_Close")
void *thread_return;
int ret;
if (!p->_created)
return EINVAL;
ret = pthread_join(p->_tid, &thread_return);
// probably we can't use that (_tid) after pthread_join(), so we close thread here
p->_created = 0;
p->_tid = 0;
return ret;
}
static WRes Event_Create(CEvent *p, int manualReset, int signaled)
{
RINOK(pthread_mutex_init(&p->_mutex, NULL))
RINOK(pthread_cond_init(&p->_cond, NULL))
p->_manual_reset = manualReset;
p->_state = (signaled ? True : False);
p->_created = 1;
return 0;
}
WRes ManualResetEvent_Create(CManualResetEvent *p, int signaled)
{ return Event_Create(p, True, signaled); }
WRes ManualResetEvent_CreateNotSignaled(CManualResetEvent *p)
{ return ManualResetEvent_Create(p, 0); }
WRes AutoResetEvent_Create(CAutoResetEvent *p, int signaled)
{ return Event_Create(p, False, signaled); }
WRes AutoResetEvent_CreateNotSignaled(CAutoResetEvent *p)
{ return AutoResetEvent_Create(p, 0); }
WRes Event_Set(CEvent *p)
{
RINOK(pthread_mutex_lock(&p->_mutex))
p->_state = True;
int res1 = pthread_cond_broadcast(&p->_cond);
int res2 = pthread_mutex_unlock(&p->_mutex);
return (res2 ? res2 : res1);
}
WRes Event_Reset(CEvent *p)
{
RINOK(pthread_mutex_lock(&p->_mutex))
p->_state = False;
return pthread_mutex_unlock(&p->_mutex);
}
WRes Event_Wait(CEvent *p)
{
RINOK(pthread_mutex_lock(&p->_mutex))
while (p->_state == False)
{
// ETIMEDOUT
// ret =
pthread_cond_wait(&p->_cond, &p->_mutex);
// if (ret != 0) break;
}
if (p->_manual_reset == False)
{
p->_state = False;
}
return pthread_mutex_unlock(&p->_mutex);
}
WRes Event_Close(CEvent *p)
{
if (!p->_created)
return 0;
p->_created = 0;
{
int res1 = pthread_mutex_destroy(&p->_mutex);
int res2 = pthread_cond_destroy(&p->_cond);
return (res1 ? res1 : res2);
}
}
WRes Semaphore_Create(CSemaphore *p, UInt32 initCount, UInt32 maxCount)
{
if (initCount > maxCount || maxCount < 1)
return EINVAL;
RINOK(pthread_mutex_init(&p->_mutex, NULL))
RINOK(pthread_cond_init(&p->_cond, NULL))
p->_count = initCount;
p->_maxCount = maxCount;
p->_created = 1;
return 0;
}
WRes Semaphore_OptCreateInit(CSemaphore *p, UInt32 initCount, UInt32 maxCount)
{
if (Semaphore_IsCreated(p))
{
/*
WRes wres = Semaphore_Close(p);
if (wres != 0)
return wres;
*/
if (initCount > maxCount || maxCount < 1)
return EINVAL;
// return EINVAL; // for debug
p->_count = initCount;
p->_maxCount = maxCount;
return 0;
}
return Semaphore_Create(p, initCount, maxCount);
}
WRes Semaphore_ReleaseN(CSemaphore *p, UInt32 releaseCount)
{
UInt32 newCount;
int ret;
if (releaseCount < 1)
return EINVAL;
RINOK(pthread_mutex_lock(&p->_mutex))
newCount = p->_count + releaseCount;
if (newCount > p->_maxCount)
ret = ERROR_TOO_MANY_POSTS; // EINVAL;
else
{
p->_count = newCount;
ret = pthread_cond_broadcast(&p->_cond);
}
RINOK(pthread_mutex_unlock(&p->_mutex))
return ret;
}
WRes Semaphore_Wait(CSemaphore *p)
{
RINOK(pthread_mutex_lock(&p->_mutex))
while (p->_count < 1)
{
pthread_cond_wait(&p->_cond, &p->_mutex);
}
p->_count--;
return pthread_mutex_unlock(&p->_mutex);
}
WRes Semaphore_Close(CSemaphore *p)
{
if (!p->_created)
return 0;
p->_created = 0;
{
int res1 = pthread_mutex_destroy(&p->_mutex);
int res2 = pthread_cond_destroy(&p->_cond);
return (res1 ? res1 : res2);
}
}
WRes CriticalSection_Init(CCriticalSection *p)
{
// Print("CriticalSection_Init")
if (!p)
return EINTR;
return pthread_mutex_init(&p->_mutex, NULL);
}
void CriticalSection_Enter(CCriticalSection *p)
{
// Print("CriticalSection_Enter")
if (p)
{
// int ret =
pthread_mutex_lock(&p->_mutex);
}
}
void CriticalSection_Leave(CCriticalSection *p)
{
// Print("CriticalSection_Leave")
if (p)
{
// int ret =
pthread_mutex_unlock(&p->_mutex);
}
}
void CriticalSection_Delete(CCriticalSection *p)
{
// Print("CriticalSection_Delete")
if (p)
{
// int ret =
pthread_mutex_destroy(&p->_mutex);
}
}
LONG InterlockedIncrement(LONG volatile *addend)
{
// Print("InterlockedIncrement")
#ifdef USE_HACK_UNSAFE_ATOMIC
LONG val = *addend + 1;
*addend = val;
return val;
#else
#if defined(__clang__) && (__clang_major__ >= 8)
#pragma GCC diagnostic ignored "-Watomic-implicit-seq-cst"
#endif
return __sync_add_and_fetch(addend, 1);
#endif
}
#endif // _WIN32
WRes AutoResetEvent_OptCreate_And_Reset(CAutoResetEvent *p)
{
if (Event_IsCreated(p))
return Event_Reset(p);
return AutoResetEvent_CreateNotSignaled(p);
}
#undef PRF
#undef Print

240
extern/lzma/Threads.h vendored
View File

@@ -1,240 +0,0 @@
/* Threads.h -- multithreading library
2023-04-02 : Igor Pavlov : Public domain */
#ifndef ZIP7_INC_THREADS_H
#define ZIP7_INC_THREADS_H
#ifdef _WIN32
#include "7zWindows.h"
#else
#if defined(__linux__)
#if !defined(__APPLE__) && !defined(_AIX) && !defined(__ANDROID__)
#ifndef Z7_AFFINITY_DISABLE
#define Z7_AFFINITY_SUPPORTED
// #pragma message(" ==== Z7_AFFINITY_SUPPORTED")
// #define _GNU_SOURCE
#endif
#endif
#endif
#include <pthread.h>
#endif
#include "7zTypes.h"
EXTERN_C_BEGIN
#ifdef _WIN32
WRes HandlePtr_Close(HANDLE *h);
WRes Handle_WaitObject(HANDLE h);
typedef HANDLE CThread;
#define Thread_CONSTRUCT(p) { *(p) = NULL; }
#define Thread_WasCreated(p) (*(p) != NULL)
#define Thread_Close(p) HandlePtr_Close(p)
// #define Thread_Wait(p) Handle_WaitObject(*(p))
#ifdef UNDER_CE
// if (USE_THREADS_CreateThread is defined), we use _beginthreadex()
// if (USE_THREADS_CreateThread is not definned), we use CreateThread()
#define USE_THREADS_CreateThread
#endif
typedef
#ifdef USE_THREADS_CreateThread
DWORD
#else
unsigned
#endif
THREAD_FUNC_RET_TYPE;
#define THREAD_FUNC_RET_ZERO 0
typedef DWORD_PTR CAffinityMask;
typedef DWORD_PTR CCpuSet;
#define CpuSet_Zero(p) *(p) = (0)
#define CpuSet_Set(p, cpu) *(p) |= ((DWORD_PTR)1 << (cpu))
#else // _WIN32
typedef struct
{
pthread_t _tid;
int _created;
} CThread;
#define Thread_CONSTRUCT(p) { (p)->_tid = 0; (p)->_created = 0; }
#define Thread_WasCreated(p) ((p)->_created != 0)
WRes Thread_Close(CThread *p);
// #define Thread_Wait Thread_Wait_Close
typedef void * THREAD_FUNC_RET_TYPE;
#define THREAD_FUNC_RET_ZERO NULL
typedef UInt64 CAffinityMask;
#ifdef Z7_AFFINITY_SUPPORTED
typedef cpu_set_t CCpuSet;
#define CpuSet_Zero(p) CPU_ZERO(p)
#define CpuSet_Set(p, cpu) CPU_SET(cpu, p)
#define CpuSet_IsSet(p, cpu) CPU_ISSET(cpu, p)
#else
typedef UInt64 CCpuSet;
#define CpuSet_Zero(p) *(p) = (0)
#define CpuSet_Set(p, cpu) *(p) |= ((UInt64)1 << (cpu))
#define CpuSet_IsSet(p, cpu) ((*(p) & ((UInt64)1 << (cpu))) != 0)
#endif
#endif // _WIN32
#define THREAD_FUNC_CALL_TYPE Z7_STDCALL
#if defined(_WIN32) && defined(__GNUC__)
/* GCC compiler for x86 32-bit uses the rule:
the stack is 16-byte aligned before CALL instruction for function calling.
But only root function main() contains instructions that
set 16-byte alignment for stack pointer. And another functions
just keep alignment, if it was set in some parent function.
The problem:
if we create new thread in MinGW (GCC) 32-bit x86 via _beginthreadex() or CreateThread(),
the root function of thread doesn't set 16-byte alignment.
And stack frames in all child functions also will be unaligned in that case.
Here we set (force_align_arg_pointer) attribute for root function of new thread.
Do we need (force_align_arg_pointer) also for another systems? */
#define THREAD_FUNC_ATTRIB_ALIGN_ARG __attribute__((force_align_arg_pointer))
// #define THREAD_FUNC_ATTRIB_ALIGN_ARG // for debug : bad alignment in SSE functions
#else
#define THREAD_FUNC_ATTRIB_ALIGN_ARG
#endif
#define THREAD_FUNC_DECL THREAD_FUNC_ATTRIB_ALIGN_ARG THREAD_FUNC_RET_TYPE THREAD_FUNC_CALL_TYPE
typedef THREAD_FUNC_RET_TYPE (THREAD_FUNC_CALL_TYPE * THREAD_FUNC_TYPE)(void *);
WRes Thread_Create(CThread *p, THREAD_FUNC_TYPE func, LPVOID param);
WRes Thread_Create_With_Affinity(CThread *p, THREAD_FUNC_TYPE func, LPVOID param, CAffinityMask affinity);
WRes Thread_Wait_Close(CThread *p);
#ifdef _WIN32
#define Thread_Create_With_CpuSet(p, func, param, cs) \
Thread_Create_With_Affinity(p, func, param, *cs)
#else
WRes Thread_Create_With_CpuSet(CThread *p, THREAD_FUNC_TYPE func, LPVOID param, const CCpuSet *cpuSet);
#endif
#ifdef _WIN32
typedef HANDLE CEvent;
typedef CEvent CAutoResetEvent;
typedef CEvent CManualResetEvent;
#define Event_Construct(p) *(p) = NULL
#define Event_IsCreated(p) (*(p) != NULL)
#define Event_Close(p) HandlePtr_Close(p)
#define Event_Wait(p) Handle_WaitObject(*(p))
WRes Event_Set(CEvent *p);
WRes Event_Reset(CEvent *p);
WRes ManualResetEvent_Create(CManualResetEvent *p, int signaled);
WRes ManualResetEvent_CreateNotSignaled(CManualResetEvent *p);
WRes AutoResetEvent_Create(CAutoResetEvent *p, int signaled);
WRes AutoResetEvent_CreateNotSignaled(CAutoResetEvent *p);
typedef HANDLE CSemaphore;
#define Semaphore_Construct(p) *(p) = NULL
#define Semaphore_IsCreated(p) (*(p) != NULL)
#define Semaphore_Close(p) HandlePtr_Close(p)
#define Semaphore_Wait(p) Handle_WaitObject(*(p))
WRes Semaphore_Create(CSemaphore *p, UInt32 initCount, UInt32 maxCount);
WRes Semaphore_OptCreateInit(CSemaphore *p, UInt32 initCount, UInt32 maxCount);
WRes Semaphore_ReleaseN(CSemaphore *p, UInt32 num);
WRes Semaphore_Release1(CSemaphore *p);
typedef CRITICAL_SECTION CCriticalSection;
WRes CriticalSection_Init(CCriticalSection *p);
#define CriticalSection_Delete(p) DeleteCriticalSection(p)
#define CriticalSection_Enter(p) EnterCriticalSection(p)
#define CriticalSection_Leave(p) LeaveCriticalSection(p)
#else // _WIN32
typedef struct _CEvent
{
int _created;
int _manual_reset;
int _state;
pthread_mutex_t _mutex;
pthread_cond_t _cond;
} CEvent;
typedef CEvent CAutoResetEvent;
typedef CEvent CManualResetEvent;
#define Event_Construct(p) (p)->_created = 0
#define Event_IsCreated(p) ((p)->_created)
WRes ManualResetEvent_Create(CManualResetEvent *p, int signaled);
WRes ManualResetEvent_CreateNotSignaled(CManualResetEvent *p);
WRes AutoResetEvent_Create(CAutoResetEvent *p, int signaled);
WRes AutoResetEvent_CreateNotSignaled(CAutoResetEvent *p);
WRes Event_Set(CEvent *p);
WRes Event_Reset(CEvent *p);
WRes Event_Wait(CEvent *p);
WRes Event_Close(CEvent *p);
typedef struct _CSemaphore
{
int _created;
UInt32 _count;
UInt32 _maxCount;
pthread_mutex_t _mutex;
pthread_cond_t _cond;
} CSemaphore;
#define Semaphore_Construct(p) (p)->_created = 0
#define Semaphore_IsCreated(p) ((p)->_created)
WRes Semaphore_Create(CSemaphore *p, UInt32 initCount, UInt32 maxCount);
WRes Semaphore_OptCreateInit(CSemaphore *p, UInt32 initCount, UInt32 maxCount);
WRes Semaphore_ReleaseN(CSemaphore *p, UInt32 num);
#define Semaphore_Release1(p) Semaphore_ReleaseN(p, 1)
WRes Semaphore_Wait(CSemaphore *p);
WRes Semaphore_Close(CSemaphore *p);
typedef struct _CCriticalSection
{
pthread_mutex_t _mutex;
} CCriticalSection;
WRes CriticalSection_Init(CCriticalSection *p);
void CriticalSection_Delete(CCriticalSection *cs);
void CriticalSection_Enter(CCriticalSection *cs);
void CriticalSection_Leave(CCriticalSection *cs);
LONG InterlockedIncrement(LONG volatile *addend);
#endif // _WIN32
WRes AutoResetEvent_OptCreate_And_Reset(CAutoResetEvent *p);
EXTERN_C_END
#endif

View File

@@ -1,13 +0,0 @@
diff --git a/extern/lzma/LzFind.c b/extern/lzma/LzFind.c
index 0fbd5aae563..94b4879cfdc 100644
--- a/extern/lzma/LzFind.c
+++ b/extern/lzma/LzFind.c
@@ -625,7 +625,7 @@ void MatchFinder_Init(CMatchFinder *p)
#endif
#endif
- #if defined(_MSC_VER) && defined(MY_CPU_ARM64)
+ #if defined(_MSC_VER) && defined(MY_CPU_ARM64) && !defined(__clang__)
#include <arm64_neon.h>
#else
#include <arm_neon.h>

View File

@@ -1,26 +0,0 @@
# SPDX-FileCopyrightText: 2006 Blender Foundation
#
# SPDX-License-Identifier: GPL-2.0-or-later
remove_strict_flags()
set(INC
)
set(INC_SYS
)
set(SRC
minilzo/minilzo.c
minilzo/lzoconf.h
minilzo/lzodefs.h
minilzo/minilzo.h
)
set(LIB
)
blender_add_lib(extern_minilzo "${SRC}" "${INC}" "${INC_SYS}" "${LIB}")

View File

@@ -1,7 +0,0 @@
Project: miniLZO
#mini subset of the LZO real-time data compression librar
URL: http://www.oberhumer.com/opensource/lzo/
License: SPDX:GPL-2.0-or-later
Upstream version: 2.08
Local modifications: Add #ifdef for Windows ARM64 (MSVC) platforms
Copyright: Copyright (C) 1996-2014 Markus Franz Xaver Oberhumer All Rights Reserved.

View File

@@ -1,339 +0,0 @@
GNU GENERAL PUBLIC LICENSE
Version 2, June 1991
Copyright (C) 1989, 1991 Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
Preamble
The licenses for most software are designed to take away your
freedom to share and change it. By contrast, the GNU General Public
License is intended to guarantee your freedom to share and change free
software--to make sure the software is free for all its users. This
General Public License applies to most of the Free Software
Foundation's software and to any other program whose authors commit to
using it. (Some other Free Software Foundation software is covered by
the GNU Lesser General Public License instead.) You can apply it to
your programs, too.
When we speak of free software, we are referring to freedom, not
price. Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
this service if you wish), that you receive source code or can get it
if you want it, that you can change the software or use pieces of it
in new free programs; and that you know you can do these things.
To protect your rights, we need to make restrictions that forbid
anyone to deny you these rights or to ask you to surrender the rights.
These restrictions translate to certain responsibilities for you if you
distribute copies of the software, or if you modify it.
For example, if you distribute copies of such a program, whether
gratis or for a fee, you must give the recipients all the rights that
you have. You must make sure that they, too, receive or can get the
source code. And you must show them these terms so they know their
rights.
We protect your rights with two steps: (1) copyright the software, and
(2) offer you this license which gives you legal permission to copy,
distribute and/or modify the software.
Also, for each author's protection and ours, we want to make certain
that everyone understands that there is no warranty for this free
software. If the software is modified by someone else and passed on, we
want its recipients to know that what they have is not the original, so
that any problems introduced by others will not reflect on the original
authors' reputations.
Finally, any free program is threatened constantly by software
patents. We wish to avoid the danger that redistributors of a free
program will individually obtain patent licenses, in effect making the
program proprietary. To prevent this, we have made it clear that any
patent must be licensed for everyone's free use or not licensed at all.
The precise terms and conditions for copying, distribution and
modification follow.
GNU GENERAL PUBLIC LICENSE
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
0. This License applies to any program or other work which contains
a notice placed by the copyright holder saying it may be distributed
under the terms of this General Public License. The "Program", below,
refers to any such program or work, and a "work based on the Program"
means either the Program or any derivative work under copyright law:
that is to say, a work containing the Program or a portion of it,
either verbatim or with modifications and/or translated into another
language. (Hereinafter, translation is included without limitation in
the term "modification".) Each licensee is addressed as "you".
Activities other than copying, distribution and modification are not
covered by this License; they are outside its scope. The act of
running the Program is not restricted, and the output from the Program
is covered only if its contents constitute a work based on the
Program (independent of having been made by running the Program).
Whether that is true depends on what the Program does.
1. You may copy and distribute verbatim copies of the Program's
source code as you receive it, in any medium, provided that you
conspicuously and appropriately publish on each copy an appropriate
copyright notice and disclaimer of warranty; keep intact all the
notices that refer to this License and to the absence of any warranty;
and give any other recipients of the Program a copy of this License
along with the Program.
You may charge a fee for the physical act of transferring a copy, and
you may at your option offer warranty protection in exchange for a fee.
2. You may modify your copy or copies of the Program or any portion
of it, thus forming a work based on the Program, and copy and
distribute such modifications or work under the terms of Section 1
above, provided that you also meet all of these conditions:
a) You must cause the modified files to carry prominent notices
stating that you changed the files and the date of any change.
b) You must cause any work that you distribute or publish, that in
whole or in part contains or is derived from the Program or any
part thereof, to be licensed as a whole at no charge to all third
parties under the terms of this License.
c) If the modified program normally reads commands interactively
when run, you must cause it, when started running for such
interactive use in the most ordinary way, to print or display an
announcement including an appropriate copyright notice and a
notice that there is no warranty (or else, saying that you provide
a warranty) and that users may redistribute the program under
these conditions, and telling the user how to view a copy of this
License. (Exception: if the Program itself is interactive but
does not normally print such an announcement, your work based on
the Program is not required to print an announcement.)
These requirements apply to the modified work as a whole. If
identifiable sections of that work are not derived from the Program,
and can be reasonably considered independent and separate works in
themselves, then this License, and its terms, do not apply to those
sections when you distribute them as separate works. But when you
distribute the same sections as part of a whole which is a work based
on the Program, the distribution of the whole must be on the terms of
this License, whose permissions for other licensees extend to the
entire whole, and thus to each and every part regardless of who wrote it.
Thus, it is not the intent of this section to claim rights or contest
your rights to work written entirely by you; rather, the intent is to
exercise the right to control the distribution of derivative or
collective works based on the Program.
In addition, mere aggregation of another work not based on the Program
with the Program (or with a work based on the Program) on a volume of
a storage or distribution medium does not bring the other work under
the scope of this License.
3. You may copy and distribute the Program (or a work based on it,
under Section 2) in object code or executable form under the terms of
Sections 1 and 2 above provided that you also do one of the following:
a) Accompany it with the complete corresponding machine-readable
source code, which must be distributed under the terms of Sections
1 and 2 above on a medium customarily used for software interchange; or,
b) Accompany it with a written offer, valid for at least three
years, to give any third party, for a charge no more than your
cost of physically performing source distribution, a complete
machine-readable copy of the corresponding source code, to be
distributed under the terms of Sections 1 and 2 above on a medium
customarily used for software interchange; or,
c) Accompany it with the information you received as to the offer
to distribute corresponding source code. (This alternative is
allowed only for noncommercial distribution and only if you
received the program in object code or executable form with such
an offer, in accord with Subsection b above.)
The source code for a work means the preferred form of the work for
making modifications to it. For an executable work, complete source
code means all the source code for all modules it contains, plus any
associated interface definition files, plus the scripts used to
control compilation and installation of the executable. However, as a
special exception, the source code distributed need not include
anything that is normally distributed (in either source or binary
form) with the major components (compiler, kernel, and so on) of the
operating system on which the executable runs, unless that component
itself accompanies the executable.
If distribution of executable or object code is made by offering
access to copy from a designated place, then offering equivalent
access to copy the source code from the same place counts as
distribution of the source code, even though third parties are not
compelled to copy the source along with the object code.
4. You may not copy, modify, sublicense, or distribute the Program
except as expressly provided under this License. Any attempt
otherwise to copy, modify, sublicense or distribute the Program is
void, and will automatically terminate your rights under this License.
However, parties who have received copies, or rights, from you under
this License will not have their licenses terminated so long as such
parties remain in full compliance.
5. You are not required to accept this License, since you have not
signed it. However, nothing else grants you permission to modify or
distribute the Program or its derivative works. These actions are
prohibited by law if you do not accept this License. Therefore, by
modifying or distributing the Program (or any work based on the
Program), you indicate your acceptance of this License to do so, and
all its terms and conditions for copying, distributing or modifying
the Program or works based on it.
6. Each time you redistribute the Program (or any work based on the
Program), the recipient automatically receives a license from the
original licensor to copy, distribute or modify the Program subject to
these terms and conditions. You may not impose any further
restrictions on the recipients' exercise of the rights granted herein.
You are not responsible for enforcing compliance by third parties to
this License.
7. If, as a consequence of a court judgment or allegation of patent
infringement or for any other reason (not limited to patent issues),
conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot
distribute so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you
may not distribute the Program at all. For example, if a patent
license would not permit royalty-free redistribution of the Program by
all those who receive copies directly or indirectly through you, then
the only way you could satisfy both it and this License would be to
refrain entirely from distribution of the Program.
If any portion of this section is held invalid or unenforceable under
any particular circumstance, the balance of the section is intended to
apply and the section as a whole is intended to apply in other
circumstances.
It is not the purpose of this section to induce you to infringe any
patents or other property right claims or to contest validity of any
such claims; this section has the sole purpose of protecting the
integrity of the free software distribution system, which is
implemented by public license practices. Many people have made
generous contributions to the wide range of software distributed
through that system in reliance on consistent application of that
system; it is up to the author/donor to decide if he or she is willing
to distribute software through any other system and a licensee cannot
impose that choice.
This section is intended to make thoroughly clear what is believed to
be a consequence of the rest of this License.
8. If the distribution and/or use of the Program is restricted in
certain countries either by patents or by copyrighted interfaces, the
original copyright holder who places the Program under this License
may add an explicit geographical distribution limitation excluding
those countries, so that distribution is permitted only in or among
countries not thus excluded. In such case, this License incorporates
the limitation as if written in the body of this License.
9. The Free Software Foundation may publish revised and/or new versions
of the General Public License from time to time. Such new versions will
be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.
Each version is given a distinguishing version number. If the Program
specifies a version number of this License which applies to it and "any
later version", you have the option of following the terms and conditions
either of that version or of any later version published by the Free
Software Foundation. If the Program does not specify a version number of
this License, you may choose any version ever published by the Free Software
Foundation.
10. If you wish to incorporate parts of the Program into other free
programs whose distribution conditions are different, write to the author
to ask for permission. For software which is copyrighted by the Free
Software Foundation, write to the Free Software Foundation; we sometimes
make exceptions for this. Our decision will be guided by the two goals
of preserving the free status of all derivatives of our free software and
of promoting the sharing and reuse of software generally.
NO WARRANTY
11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN
OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS
TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE
PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
REPAIR OR CORRECTION.
12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
POSSIBILITY OF SUCH DAMAGES.
END OF TERMS AND CONDITIONS
How to Apply These Terms to Your New Programs
If you develop a new program, and you want it to be of the greatest
possible use to the public, the best way to achieve this is to make it
free software which everyone can redistribute and change under these terms.
To do so, attach the following notices to the program. It is safest
to attach them to the start of each source file to most effectively
convey the exclusion of warranty; and each file should have at least
the "copyright" line and a pointer to where the full notice is found.
<one line to give the program's name and a brief idea of what it does.>
Copyright (C) <year> <name of author>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
Also add information on how to contact you by electronic and paper mail.
If the program is interactive, make it output a short notice like this
when it starts in an interactive mode:
Gnomovision version 69, Copyright (C) year name of author
Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
This is free software, and you are welcome to redistribute it
under certain conditions; type `show c' for details.
The hypothetical commands `show w' and `show c' should show the appropriate
parts of the General Public License. Of course, the commands you use may
be called something other than `show w' and `show c'; they could even be
mouse-clicks or menu items--whatever suits your program.
You should also get your employer (if you work as a programmer) or your
school, if any, to sign a "copyright disclaimer" for the program, if
necessary. Here is a sample; alter the names:
Yoyodyne, Inc., hereby disclaims all copyright interest in the program
`Gnomovision' (which makes passes at compilers) written by James Hacker.
<signature of Ty Coon>, 1 April 1989
Ty Coon, President of Vice
This General Public License does not permit incorporating your program into
proprietary programs. If your program is a subroutine library, you may
consider it more useful to permit linking proprietary applications with the
library. If this is what you want to do, use the GNU Lesser General
Public License instead of this License.

View File

@@ -1,123 +0,0 @@
============================================================================
miniLZO -- mini subset of the LZO real-time data compression library
============================================================================
Author : Markus Franz Xaver Johannes Oberhumer
<markus@oberhumer.com>
http://www.oberhumer.com/opensource/lzo/
Version : 2.08
Date : 29 Jun 2014
I've created miniLZO for projects where it is inconvenient to
include (or require) the full LZO source code just because you
want to add a little bit of data compression to your application.
miniLZO implements the LZO1X-1 compressor and both the standard and
safe LZO1X decompressor. Apart from fast compression it also useful
for situations where you want to use pre-compressed data files (which
must have been compressed with LZO1X-999).
miniLZO consists of one C source file and three header files:
minilzo.c
minilzo.h, lzoconf.h, lzodefs.h
To use miniLZO just copy these files into your source directory, add
minilzo.c to your Makefile and #include minilzo.h from your program.
Note: you also must distribute this file ('README.LZO') with your project.
minilzo.o compiles to about 6 KiB (using gcc or Visual C on an i386), and
the sources are about 30 KiB when packed with zip - so there's no more
excuse that your application doesn't support data compression :-)
For more information, documentation, example programs and other support
files (like Makefiles and build scripts) please download the full LZO
package from
http://www.oberhumer.com/opensource/lzo/
Have fun,
Markus
P.S. minilzo.c is generated automatically from the LZO sources and
therefore functionality is completely identical
Appendix A: building miniLZO
----------------------------
miniLZO is written such a way that it should compile and run
out-of-the-box on most machines.
If you are running on a very unusual architecture and lzo_init() fails then
you should first recompile with '-DLZO_DEBUG' to see what causes the failure.
The most probable case is something like 'sizeof(void *) != sizeof(size_t)'.
After identifying the problem you can compile by adding some defines
like '-DSIZEOF_VOID_P=8' to your Makefile.
The best solution is (of course) using Autoconf - if your project uses
Autoconf anyway just add '-DMINILZO_HAVE_CONFIG_H' to your compiler
flags when compiling minilzo.c. See the LZO distribution for an example
how to set up configure.ac.
Appendix B: list of public functions available in miniLZO
---------------------------------------------------------
Library initialization
lzo_init()
Compression
lzo1x_1_compress()
Decompression
lzo1x_decompress()
lzo1x_decompress_safe()
Checksum functions
lzo_adler32()
Version functions
lzo_version()
lzo_version_string()
lzo_version_date()
Portable (but slow) string functions
lzo_memcmp()
lzo_memcpy()
lzo_memmove()
lzo_memset()
Appendix C: suggested macros for 'configure.ac' when using Autoconf
-------------------------------------------------------------------
Checks for typedefs and structures
AC_CHECK_TYPE(ptrdiff_t,long)
AC_TYPE_SIZE_T
AC_CHECK_SIZEOF(short)
AC_CHECK_SIZEOF(int)
AC_CHECK_SIZEOF(long)
AC_CHECK_SIZEOF(long long)
AC_CHECK_SIZEOF(__int64)
AC_CHECK_SIZEOF(void *)
AC_CHECK_SIZEOF(size_t)
AC_CHECK_SIZEOF(ptrdiff_t)
Checks for compiler characteristics
AC_C_CONST
Checks for library functions
AC_CHECK_FUNCS(memcmp memcpy memmove memset)
Appendix D: Copyright
---------------------
LZO and miniLZO are Copyright (C) 1996-2014 Markus Franz Xaver Oberhumer
All Rights Reserved.
LZO and miniLZO are distributed under the terms of the GNU General
Public License (GPL). See the file COPYING.
Special licenses for commercial and other applications which
are not willing to accept the GNU General Public License
are available by contacting the author.

View File

@@ -1,444 +0,0 @@
/* lzoconf.h -- configuration of the LZO data compression library
This file is part of the LZO real-time data compression library.
Copyright (C) 1996-2014 Markus Franz Xaver Johannes Oberhumer
All Rights Reserved.
The LZO library is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License as
published by the Free Software Foundation; either version 2 of
the License, or (at your option) any later version.
The LZO library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with the LZO library; see the file COPYING.
If not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
Markus F.X.J. Oberhumer
<markus@oberhumer.com>
http://www.oberhumer.com/opensource/lzo/
*/
#ifndef __LZOCONF_H_INCLUDED
#define __LZOCONF_H_INCLUDED 1
#define LZO_VERSION 0x2080
#define LZO_VERSION_STRING "2.08"
#define LZO_VERSION_DATE "Jun 29 2014"
/* internal Autoconf configuration file - only used when building LZO */
#if defined(LZO_HAVE_CONFIG_H)
# include <config.h>
#endif
#include <limits.h>
#include <stddef.h>
/***********************************************************************
// LZO requires a conforming <limits.h>
************************************************************************/
#if !defined(CHAR_BIT) || (CHAR_BIT != 8)
# error "invalid CHAR_BIT"
#endif
#if !defined(UCHAR_MAX) || !defined(USHRT_MAX) || !defined(UINT_MAX) || !defined(ULONG_MAX)
# error "check your compiler installation"
#endif
#if (USHRT_MAX < 1) || (UINT_MAX < 1) || (ULONG_MAX < 1)
# error "your limits.h macros are broken"
#endif
/* get OS and architecture defines */
#ifndef __LZODEFS_H_INCLUDED
#include "lzodefs.h"
#endif
#ifdef __cplusplus
extern "C" {
#endif
/***********************************************************************
// some core defines
************************************************************************/
/* memory checkers */
#if !defined(__LZO_CHECKER)
# if defined(__BOUNDS_CHECKING_ON)
# define __LZO_CHECKER 1
# elif defined(__CHECKER__)
# define __LZO_CHECKER 1
# elif defined(__INSURE__)
# define __LZO_CHECKER 1
# elif defined(__PURIFY__)
# define __LZO_CHECKER 1
# endif
#endif
/***********************************************************************
// integral and pointer types
************************************************************************/
/* lzo_uint must match size_t */
#if !defined(LZO_UINT_MAX)
# if (LZO_ABI_LLP64)
# if (LZO_OS_WIN64)
typedef unsigned __int64 lzo_uint;
typedef __int64 lzo_int;
# else
typedef lzo_ullong_t lzo_uint;
typedef lzo_llong_t lzo_int;
# endif
# define LZO_SIZEOF_LZO_UINT 8
# define LZO_UINT_MAX 0xffffffffffffffffull
# define LZO_INT_MAX 9223372036854775807LL
# define LZO_INT_MIN (-1LL - LZO_INT_MAX)
# elif (LZO_ABI_IP32L64) /* MIPS R5900 */
typedef unsigned int lzo_uint;
typedef int lzo_int;
# define LZO_SIZEOF_LZO_UINT LZO_SIZEOF_INT
# define LZO_UINT_MAX UINT_MAX
# define LZO_INT_MAX INT_MAX
# define LZO_INT_MIN INT_MIN
# elif (ULONG_MAX >= LZO_0xffffffffL)
typedef unsigned long lzo_uint;
typedef long lzo_int;
# define LZO_SIZEOF_LZO_UINT LZO_SIZEOF_LONG
# define LZO_UINT_MAX ULONG_MAX
# define LZO_INT_MAX LONG_MAX
# define LZO_INT_MIN LONG_MIN
# else
# error "lzo_uint"
# endif
#endif
/* The larger type of lzo_uint and lzo_uint32_t. */
#if (LZO_SIZEOF_LZO_UINT >= 4)
# define lzo_xint lzo_uint
#else
# define lzo_xint lzo_uint32_t
#endif
typedef int lzo_bool;
/* sanity checks */
LZO_COMPILE_TIME_ASSERT_HEADER(sizeof(lzo_uint) == LZO_SIZEOF_LZO_UINT)
LZO_COMPILE_TIME_ASSERT_HEADER(sizeof(lzo_xint) >= sizeof(lzo_uint))
LZO_COMPILE_TIME_ASSERT_HEADER(sizeof(lzo_xint) >= sizeof(lzo_uint32_t))
#ifndef __LZO_MMODEL
#define __LZO_MMODEL /*empty*/
#endif
/* no typedef here because of const-pointer issues */
#define lzo_bytep unsigned char __LZO_MMODEL *
#define lzo_charp char __LZO_MMODEL *
#define lzo_voidp void __LZO_MMODEL *
#define lzo_shortp short __LZO_MMODEL *
#define lzo_ushortp unsigned short __LZO_MMODEL *
#define lzo_intp lzo_int __LZO_MMODEL *
#define lzo_uintp lzo_uint __LZO_MMODEL *
#define lzo_xintp lzo_xint __LZO_MMODEL *
#define lzo_voidpp lzo_voidp __LZO_MMODEL *
#define lzo_bytepp lzo_bytep __LZO_MMODEL *
#define lzo_int8_tp lzo_int8_t __LZO_MMODEL *
#define lzo_uint8_tp lzo_uint8_t __LZO_MMODEL *
#define lzo_int16_tp lzo_int16_t __LZO_MMODEL *
#define lzo_uint16_tp lzo_uint16_t __LZO_MMODEL *
#define lzo_int32_tp lzo_int32_t __LZO_MMODEL *
#define lzo_uint32_tp lzo_uint32_t __LZO_MMODEL *
#if defined(lzo_int64_t)
#define lzo_int64_tp lzo_int64_t __LZO_MMODEL *
#define lzo_uint64_tp lzo_uint64_t __LZO_MMODEL *
#endif
/* Older LZO versions used to support ancient systems and memory models
* like 16-bit MSDOS with __huge pointers and Cray PVP, but these
* obsolete configurations are not supported any longer.
*/
#if defined(__LZO_MMODEL_HUGE)
#error "__LZO_MMODEL_HUGE is unsupported"
#endif
#if (LZO_MM_PVP)
#error "LZO_MM_PVP is unsupported"
#endif
#if (LZO_SIZEOF_INT < 4)
#error "LZO_SIZEOF_INT < 4 is unsupported"
#endif
#if (__LZO_UINTPTR_T_IS_POINTER)
#error "__LZO_UINTPTR_T_IS_POINTER is unsupported"
#endif
LZO_COMPILE_TIME_ASSERT_HEADER(sizeof(int) >= 4)
LZO_COMPILE_TIME_ASSERT_HEADER(sizeof(lzo_uint) >= 4)
/* Strange configurations where sizeof(lzo_uint) != sizeof(size_t) should
* work but have not received much testing lately, so be strict here.
*/
LZO_COMPILE_TIME_ASSERT_HEADER(sizeof(lzo_uint) == sizeof(size_t))
LZO_COMPILE_TIME_ASSERT_HEADER(sizeof(lzo_uint) == sizeof(ptrdiff_t))
LZO_COMPILE_TIME_ASSERT_HEADER(sizeof(lzo_uint) == sizeof(lzo_uintptr_t))
LZO_COMPILE_TIME_ASSERT_HEADER(sizeof(void *) == sizeof(lzo_uintptr_t))
LZO_COMPILE_TIME_ASSERT_HEADER(sizeof(char *) == sizeof(lzo_uintptr_t))
LZO_COMPILE_TIME_ASSERT_HEADER(sizeof(long *) == sizeof(lzo_uintptr_t))
LZO_COMPILE_TIME_ASSERT_HEADER(sizeof(void *) == sizeof(lzo_voidp))
LZO_COMPILE_TIME_ASSERT_HEADER(sizeof(char *) == sizeof(lzo_bytep))
/***********************************************************************
// function types
************************************************************************/
/* name mangling */
#if !defined(__LZO_EXTERN_C)
# ifdef __cplusplus
# define __LZO_EXTERN_C extern "C"
# else
# define __LZO_EXTERN_C extern
# endif
#endif
/* calling convention */
#if !defined(__LZO_CDECL)
# define __LZO_CDECL __lzo_cdecl
#endif
/* DLL export information */
#if !defined(__LZO_EXPORT1)
# define __LZO_EXPORT1 /*empty*/
#endif
#if !defined(__LZO_EXPORT2)
# define __LZO_EXPORT2 /*empty*/
#endif
/* __cdecl calling convention for public C and assembly functions */
#if !defined(LZO_PUBLIC)
# define LZO_PUBLIC(_rettype) __LZO_EXPORT1 _rettype __LZO_EXPORT2 __LZO_CDECL
#endif
#if !defined(LZO_EXTERN)
# define LZO_EXTERN(_rettype) __LZO_EXTERN_C LZO_PUBLIC(_rettype)
#endif
#if !defined(LZO_PRIVATE)
# define LZO_PRIVATE(_rettype) static _rettype __LZO_CDECL
#endif
/* function types */
typedef int
(__LZO_CDECL *lzo_compress_t) ( const lzo_bytep src, lzo_uint src_len,
lzo_bytep dst, lzo_uintp dst_len,
lzo_voidp wrkmem );
typedef int
(__LZO_CDECL *lzo_decompress_t) ( const lzo_bytep src, lzo_uint src_len,
lzo_bytep dst, lzo_uintp dst_len,
lzo_voidp wrkmem );
typedef int
(__LZO_CDECL *lzo_optimize_t) ( lzo_bytep src, lzo_uint src_len,
lzo_bytep dst, lzo_uintp dst_len,
lzo_voidp wrkmem );
typedef int
(__LZO_CDECL *lzo_compress_dict_t)(const lzo_bytep src, lzo_uint src_len,
lzo_bytep dst, lzo_uintp dst_len,
lzo_voidp wrkmem,
const lzo_bytep dict, lzo_uint dict_len );
typedef int
(__LZO_CDECL *lzo_decompress_dict_t)(const lzo_bytep src, lzo_uint src_len,
lzo_bytep dst, lzo_uintp dst_len,
lzo_voidp wrkmem,
const lzo_bytep dict, lzo_uint dict_len );
/* Callback interface. Currently only the progress indicator ("nprogress")
* is used, but this may change in a future release. */
struct lzo_callback_t;
typedef struct lzo_callback_t lzo_callback_t;
#define lzo_callback_p lzo_callback_t __LZO_MMODEL *
/* malloc & free function types */
typedef lzo_voidp (__LZO_CDECL *lzo_alloc_func_t)
(lzo_callback_p self, lzo_uint items, lzo_uint size);
typedef void (__LZO_CDECL *lzo_free_func_t)
(lzo_callback_p self, lzo_voidp ptr);
/* a progress indicator callback function */
typedef void (__LZO_CDECL *lzo_progress_func_t)
(lzo_callback_p, lzo_uint, lzo_uint, int);
struct lzo_callback_t
{
/* custom allocators (set to 0 to disable) */
lzo_alloc_func_t nalloc; /* [not used right now] */
lzo_free_func_t nfree; /* [not used right now] */
/* a progress indicator callback function (set to 0 to disable) */
lzo_progress_func_t nprogress;
/* INFO: the first parameter "self" of the nalloc/nfree/nprogress
* callbacks points back to this struct, so you are free to store
* some extra info in the following variables. */
lzo_voidp user1;
lzo_xint user2;
lzo_xint user3;
};
/***********************************************************************
// error codes and prototypes
************************************************************************/
/* Error codes for the compression/decompression functions. Negative
* values are errors, positive values will be used for special but
* normal events.
*/
#define LZO_E_OK 0
#define LZO_E_ERROR (-1)
#define LZO_E_OUT_OF_MEMORY (-2) /* [lzo_alloc_func_t failure] */
#define LZO_E_NOT_COMPRESSIBLE (-3) /* [not used right now] */
#define LZO_E_INPUT_OVERRUN (-4)
#define LZO_E_OUTPUT_OVERRUN (-5)
#define LZO_E_LOOKBEHIND_OVERRUN (-6)
#define LZO_E_EOF_NOT_FOUND (-7)
#define LZO_E_INPUT_NOT_CONSUMED (-8)
#define LZO_E_NOT_YET_IMPLEMENTED (-9) /* [not used right now] */
#define LZO_E_INVALID_ARGUMENT (-10)
#define LZO_E_INVALID_ALIGNMENT (-11) /* pointer argument is not properly aligned */
#define LZO_E_OUTPUT_NOT_CONSUMED (-12)
#define LZO_E_INTERNAL_ERROR (-99)
#ifndef lzo_sizeof_dict_t
# define lzo_sizeof_dict_t ((unsigned)sizeof(lzo_bytep))
#endif
/* lzo_init() should be the first function you call.
* Check the return code !
*
* lzo_init() is a macro to allow checking that the library and the
* compiler's view of various types are consistent.
*/
#define lzo_init() __lzo_init_v2(LZO_VERSION,(int)sizeof(short),(int)sizeof(int),\
(int)sizeof(long),(int)sizeof(lzo_uint32_t),(int)sizeof(lzo_uint),\
(int)lzo_sizeof_dict_t,(int)sizeof(char *),(int)sizeof(lzo_voidp),\
(int)sizeof(lzo_callback_t))
LZO_EXTERN(int) __lzo_init_v2(unsigned,int,int,int,int,int,int,int,int,int);
/* version functions (useful for shared libraries) */
LZO_EXTERN(unsigned) lzo_version(void);
LZO_EXTERN(const char *) lzo_version_string(void);
LZO_EXTERN(const char *) lzo_version_date(void);
LZO_EXTERN(const lzo_charp) _lzo_version_string(void);
LZO_EXTERN(const lzo_charp) _lzo_version_date(void);
/* string functions */
LZO_EXTERN(int)
lzo_memcmp(const lzo_voidp a, const lzo_voidp b, lzo_uint len);
LZO_EXTERN(lzo_voidp)
lzo_memcpy(lzo_voidp dst, const lzo_voidp src, lzo_uint len);
LZO_EXTERN(lzo_voidp)
lzo_memmove(lzo_voidp dst, const lzo_voidp src, lzo_uint len);
LZO_EXTERN(lzo_voidp)
lzo_memset(lzo_voidp buf, int c, lzo_uint len);
/* checksum functions */
LZO_EXTERN(lzo_uint32_t)
lzo_adler32(lzo_uint32_t c, const lzo_bytep buf, lzo_uint len);
LZO_EXTERN(lzo_uint32_t)
lzo_crc32(lzo_uint32_t c, const lzo_bytep buf, lzo_uint len);
LZO_EXTERN(const lzo_uint32_tp)
lzo_get_crc32_table(void);
/* misc. */
LZO_EXTERN(int) _lzo_config_check(void);
typedef union {
lzo_voidp a00; lzo_bytep a01; lzo_uint a02; lzo_xint a03; lzo_uintptr_t a04;
void *a05; unsigned char *a06; unsigned long a07; size_t a08; ptrdiff_t a09;
#if defined(lzo_int64_t)
lzo_uint64_t a10;
#endif
} lzo_align_t;
/* align a char pointer on a boundary that is a multiple of 'size' */
LZO_EXTERN(unsigned) __lzo_align_gap(const lzo_voidp p, lzo_uint size);
#define LZO_PTR_ALIGN_UP(p,size) \
((p) + (lzo_uint) __lzo_align_gap((const lzo_voidp)(p),(lzo_uint)(size)))
/***********************************************************************
// deprecated macros - only for backward compatibility
************************************************************************/
/* deprecated - use 'lzo_bytep' instead of 'lzo_byte *' */
#define lzo_byte unsigned char
/* deprecated type names */
#define lzo_int32 lzo_int32_t
#define lzo_uint32 lzo_uint32_t
#define lzo_int32p lzo_int32_t __LZO_MMODEL *
#define lzo_uint32p lzo_uint32_t __LZO_MMODEL *
#define LZO_INT32_MAX LZO_INT32_C(2147483647)
#define LZO_UINT32_MAX LZO_UINT32_C(4294967295)
#if defined(lzo_int64_t)
#define lzo_int64 lzo_int64_t
#define lzo_uint64 lzo_uint64_t
#define lzo_int64p lzo_int64_t __LZO_MMODEL *
#define lzo_uint64p lzo_uint64_t __LZO_MMODEL *
#define LZO_INT64_MAX LZO_INT64_C(9223372036854775807)
#define LZO_UINT64_MAX LZO_UINT64_C(18446744073709551615)
#endif
/* deprecated types */
typedef union { lzo_bytep a; lzo_uint b; } __lzo_pu_u;
typedef union { lzo_bytep a; lzo_uint32_t b; } __lzo_pu32_u;
#if defined(LZO_CFG_COMPAT)
#define __LZOCONF_H 1
#if defined(LZO_ARCH_I086)
# define __LZO_i386 1
#elif defined(LZO_ARCH_I386)
# define __LZO_i386 1
#endif
#if defined(LZO_OS_DOS16)
# define __LZO_DOS 1
# define __LZO_DOS16 1
#elif defined(LZO_OS_DOS32)
# define __LZO_DOS 1
#elif defined(LZO_OS_WIN16)
# define __LZO_WIN 1
# define __LZO_WIN16 1
#elif defined(LZO_OS_WIN32)
# define __LZO_WIN 1
#endif
#define __LZO_CMODEL /*empty*/
#define __LZO_DMODEL /*empty*/
#define __LZO_ENTRY __LZO_CDECL
#define LZO_EXTERN_CDECL LZO_EXTERN
#define LZO_ALIGN LZO_PTR_ALIGN_UP
#define lzo_compress_asm_t lzo_compress_t
#define lzo_decompress_asm_t lzo_decompress_t
#endif /* LZO_CFG_COMPAT */
#ifdef __cplusplus
} /* extern "C" */
#endif
#endif /* already included */
/* vim:set ts=4 sw=4 et: */

View File

@@ -1,2998 +0,0 @@
/* lzodefs.h -- architecture, OS and compiler specific defines
This file is part of the LZO real-time data compression library.
Copyright (C) 1996-2014 Markus Franz Xaver Johannes Oberhumer
All Rights Reserved.
The LZO library is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License as
published by the Free Software Foundation; either version 2 of
the License, or (at your option) any later version.
The LZO library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with the LZO library; see the file COPYING.
If not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
Markus F.X.J. Oberhumer
<markus@oberhumer.com>
http://www.oberhumer.com/opensource/lzo/
*/
#ifndef __LZODEFS_H_INCLUDED
#define __LZODEFS_H_INCLUDED 1
#if defined(__CYGWIN32__) && !defined(__CYGWIN__)
# define __CYGWIN__ __CYGWIN32__
#endif
#if 1 && defined(__INTERIX) && defined(__GNUC__) && !defined(_ALL_SOURCE)
# define _ALL_SOURCE 1
#endif
#if defined(__mips__) && defined(__R5900__)
# if !defined(__LONG_MAX__)
# define __LONG_MAX__ 9223372036854775807L
# endif
#endif
#if !defined(LZO_CFG_NO_DISABLE_WUNDEF)
#if defined(__ARMCC_VERSION)
# pragma diag_suppress 193
#elif defined(__clang__) && defined(__clang_minor__)
# pragma clang diagnostic ignored "-Wundef"
#elif defined(__INTEL_COMPILER)
# pragma warning(disable: 193)
#elif defined(__KEIL__) && defined(__C166__)
# pragma warning disable = 322
#elif defined(__GNUC__) && defined(__GNUC_MINOR__) && !defined(__PATHSCALE__)
# if ((__GNUC__-0) >= 5 || ((__GNUC__-0) == 4 && (__GNUC_MINOR__-0) >= 2))
# pragma GCC diagnostic ignored "-Wundef"
# endif
#elif defined(_MSC_VER) && !defined(__clang__) && !defined(__INTEL_COMPILER) && !defined(__MWERKS__)
# if ((_MSC_VER-0) >= 1300)
# pragma warning(disable: 4668)
# endif
#endif
#endif
#if 0 && defined(__POCC__) && defined(_WIN32)
# if (__POCC__ >= 400)
# pragma warn(disable: 2216)
# endif
#endif
#if 0 && defined(__WATCOMC__)
# if (__WATCOMC__ >= 1050) && (__WATCOMC__ < 1060)
# pragma warning 203 9
# endif
#endif
#if defined(__BORLANDC__) && defined(__MSDOS__) && !defined(__FLAT__)
# pragma option -h
#endif
#if !(LZO_CFG_NO_DISABLE_WCRTNONSTDC)
#ifndef _CRT_NONSTDC_NO_DEPRECATE
#define _CRT_NONSTDC_NO_DEPRECATE 1
#endif
#ifndef _CRT_NONSTDC_NO_WARNINGS
#define _CRT_NONSTDC_NO_WARNINGS 1
#endif
#ifndef _CRT_SECURE_NO_DEPRECATE
#define _CRT_SECURE_NO_DEPRECATE 1
#endif
#ifndef _CRT_SECURE_NO_WARNINGS
#define _CRT_SECURE_NO_WARNINGS 1
#endif
#endif
#if 0
#define LZO_0xffffUL 0xfffful
#define LZO_0xffffffffUL 0xfffffffful
#else
#define LZO_0xffffUL 65535ul
#define LZO_0xffffffffUL 4294967295ul
#endif
#define LZO_0xffffL LZO_0xffffUL
#define LZO_0xffffffffL LZO_0xffffffffUL
#if (LZO_0xffffL == LZO_0xffffffffL)
# error "your preprocessor is broken 1"
#endif
#if (16ul * 16384ul != 262144ul)
# error "your preprocessor is broken 2"
#endif
#if 0
#if (32767 >= 4294967295ul)
# error "your preprocessor is broken 3"
#endif
#if (65535u >= 4294967295ul)
# error "your preprocessor is broken 4"
#endif
#endif
#if defined(__COUNTER__)
# ifndef LZO_CFG_USE_COUNTER
# define LZO_CFG_USE_COUNTER 1
# endif
#else
# undef LZO_CFG_USE_COUNTER
#endif
#if (UINT_MAX == LZO_0xffffL)
#if defined(__ZTC__) && defined(__I86__) && !defined(__OS2__)
# if !defined(MSDOS)
# define MSDOS 1
# endif
# if !defined(_MSDOS)
# define _MSDOS 1
# endif
#elif 0 && defined(__VERSION) && defined(MB_LEN_MAX)
# if (__VERSION == 520) && (MB_LEN_MAX == 1)
# if !defined(__AZTEC_C__)
# define __AZTEC_C__ __VERSION
# endif
# if !defined(__DOS__)
# define __DOS__ 1
# endif
# endif
#endif
#endif
#if defined(_MSC_VER) && defined(M_I86HM) && (UINT_MAX == LZO_0xffffL)
# define ptrdiff_t long
# define _PTRDIFF_T_DEFINED 1
#endif
#if (UINT_MAX == LZO_0xffffL)
# undef __LZO_RENAME_A
# undef __LZO_RENAME_B
# if defined(__AZTEC_C__) && defined(__DOS__)
# define __LZO_RENAME_A 1
# elif defined(_MSC_VER) && defined(MSDOS)
# if (_MSC_VER < 600)
# define __LZO_RENAME_A 1
# elif (_MSC_VER < 700)
# define __LZO_RENAME_B 1
# endif
# elif defined(__TSC__) && defined(__OS2__)
# define __LZO_RENAME_A 1
# elif defined(__MSDOS__) && defined(__TURBOC__) && (__TURBOC__ < 0x0410)
# define __LZO_RENAME_A 1
# elif defined(__PACIFIC__) && defined(DOS)
# if !defined(__far)
# define __far far
# endif
# if !defined(__near)
# define __near near
# endif
# endif
# if defined(__LZO_RENAME_A)
# if !defined(__cdecl)
# define __cdecl cdecl
# endif
# if !defined(__far)
# define __far far
# endif
# if !defined(__huge)
# define __huge huge
# endif
# if !defined(__near)
# define __near near
# endif
# if !defined(__pascal)
# define __pascal pascal
# endif
# if !defined(__huge)
# define __huge huge
# endif
# elif defined(__LZO_RENAME_B)
# if !defined(__cdecl)
# define __cdecl _cdecl
# endif
# if !defined(__far)
# define __far _far
# endif
# if !defined(__huge)
# define __huge _huge
# endif
# if !defined(__near)
# define __near _near
# endif
# if !defined(__pascal)
# define __pascal _pascal
# endif
# elif (defined(__PUREC__) || defined(__TURBOC__)) && defined(__TOS__)
# if !defined(__cdecl)
# define __cdecl cdecl
# endif
# if !defined(__pascal)
# define __pascal pascal
# endif
# endif
# undef __LZO_RENAME_A
# undef __LZO_RENAME_B
#endif
#if (UINT_MAX == LZO_0xffffL)
#if defined(__AZTEC_C__) && defined(__DOS__)
# define LZO_BROKEN_CDECL_ALT_SYNTAX 1
#elif defined(_MSC_VER) && defined(MSDOS)
# if (_MSC_VER < 600)
# define LZO_BROKEN_INTEGRAL_CONSTANTS 1
# endif
# if (_MSC_VER < 700)
# define LZO_BROKEN_INTEGRAL_PROMOTION 1
# define LZO_BROKEN_SIZEOF 1
# endif
#elif defined(__PACIFIC__) && defined(DOS)
# define LZO_BROKEN_INTEGRAL_CONSTANTS 1
#elif defined(__TURBOC__) && defined(__MSDOS__)
# if (__TURBOC__ < 0x0150)
# define LZO_BROKEN_CDECL_ALT_SYNTAX 1
# define LZO_BROKEN_INTEGRAL_CONSTANTS 1
# define LZO_BROKEN_INTEGRAL_PROMOTION 1
# endif
# if (__TURBOC__ < 0x0200)
# define LZO_BROKEN_SIZEOF 1
# endif
# if (__TURBOC__ < 0x0400) && defined(__cplusplus)
# define LZO_BROKEN_CDECL_ALT_SYNTAX 1
# endif
#elif (defined(__PUREC__) || defined(__TURBOC__)) && defined(__TOS__)
# define LZO_BROKEN_CDECL_ALT_SYNTAX 1
# define LZO_BROKEN_SIZEOF 1
#endif
#endif
#if defined(__WATCOMC__) && (__WATCOMC__ < 900)
# define LZO_BROKEN_INTEGRAL_CONSTANTS 1
#endif
#if defined(_CRAY) && defined(_CRAY1)
# define LZO_BROKEN_SIGNED_RIGHT_SHIFT 1
#endif
#define LZO_PP_STRINGIZE(x) #x
#define LZO_PP_MACRO_EXPAND(x) LZO_PP_STRINGIZE(x)
#define LZO_PP_CONCAT0() /*empty*/
#define LZO_PP_CONCAT1(a) a
#define LZO_PP_CONCAT2(a,b) a ## b
#define LZO_PP_CONCAT3(a,b,c) a ## b ## c
#define LZO_PP_CONCAT4(a,b,c,d) a ## b ## c ## d
#define LZO_PP_CONCAT5(a,b,c,d,e) a ## b ## c ## d ## e
#define LZO_PP_CONCAT6(a,b,c,d,e,f) a ## b ## c ## d ## e ## f
#define LZO_PP_CONCAT7(a,b,c,d,e,f,g) a ## b ## c ## d ## e ## f ## g
#define LZO_PP_ECONCAT0() LZO_PP_CONCAT0()
#define LZO_PP_ECONCAT1(a) LZO_PP_CONCAT1(a)
#define LZO_PP_ECONCAT2(a,b) LZO_PP_CONCAT2(a,b)
#define LZO_PP_ECONCAT3(a,b,c) LZO_PP_CONCAT3(a,b,c)
#define LZO_PP_ECONCAT4(a,b,c,d) LZO_PP_CONCAT4(a,b,c,d)
#define LZO_PP_ECONCAT5(a,b,c,d,e) LZO_PP_CONCAT5(a,b,c,d,e)
#define LZO_PP_ECONCAT6(a,b,c,d,e,f) LZO_PP_CONCAT6(a,b,c,d,e,f)
#define LZO_PP_ECONCAT7(a,b,c,d,e,f,g) LZO_PP_CONCAT7(a,b,c,d,e,f,g)
#define LZO_PP_EMPTY /*empty*/
#define LZO_PP_EMPTY0() /*empty*/
#define LZO_PP_EMPTY1(a) /*empty*/
#define LZO_PP_EMPTY2(a,b) /*empty*/
#define LZO_PP_EMPTY3(a,b,c) /*empty*/
#define LZO_PP_EMPTY4(a,b,c,d) /*empty*/
#define LZO_PP_EMPTY5(a,b,c,d,e) /*empty*/
#define LZO_PP_EMPTY6(a,b,c,d,e,f) /*empty*/
#define LZO_PP_EMPTY7(a,b,c,d,e,f,g) /*empty*/
#if 1
#define LZO_CPP_STRINGIZE(x) #x
#define LZO_CPP_MACRO_EXPAND(x) LZO_CPP_STRINGIZE(x)
#define LZO_CPP_CONCAT2(a,b) a ## b
#define LZO_CPP_CONCAT3(a,b,c) a ## b ## c
#define LZO_CPP_CONCAT4(a,b,c,d) a ## b ## c ## d
#define LZO_CPP_CONCAT5(a,b,c,d,e) a ## b ## c ## d ## e
#define LZO_CPP_CONCAT6(a,b,c,d,e,f) a ## b ## c ## d ## e ## f
#define LZO_CPP_CONCAT7(a,b,c,d,e,f,g) a ## b ## c ## d ## e ## f ## g
#define LZO_CPP_ECONCAT2(a,b) LZO_CPP_CONCAT2(a,b)
#define LZO_CPP_ECONCAT3(a,b,c) LZO_CPP_CONCAT3(a,b,c)
#define LZO_CPP_ECONCAT4(a,b,c,d) LZO_CPP_CONCAT4(a,b,c,d)
#define LZO_CPP_ECONCAT5(a,b,c,d,e) LZO_CPP_CONCAT5(a,b,c,d,e)
#define LZO_CPP_ECONCAT6(a,b,c,d,e,f) LZO_CPP_CONCAT6(a,b,c,d,e,f)
#define LZO_CPP_ECONCAT7(a,b,c,d,e,f,g) LZO_CPP_CONCAT7(a,b,c,d,e,f,g)
#endif
#define __LZO_MASK_GEN(o,b) (((((o) << ((b)-!!(b))) - (o)) << 1) + (o)*!!(b))
#if 1 && defined(__cplusplus)
# if !defined(__STDC_CONSTANT_MACROS)
# define __STDC_CONSTANT_MACROS 1
# endif
# if !defined(__STDC_LIMIT_MACROS)
# define __STDC_LIMIT_MACROS 1
# endif
#endif
#if defined(__cplusplus)
# define LZO_EXTERN_C extern "C"
# define LZO_EXTERN_C_BEGIN extern "C" {
# define LZO_EXTERN_C_END }
#else
# define LZO_EXTERN_C extern
# define LZO_EXTERN_C_BEGIN /*empty*/
# define LZO_EXTERN_C_END /*empty*/
#endif
#if !defined(__LZO_OS_OVERRIDE)
#if (LZO_OS_FREESTANDING)
# define LZO_INFO_OS "freestanding"
#elif (LZO_OS_EMBEDDED)
# define LZO_INFO_OS "embedded"
#elif 1 && defined(__IAR_SYSTEMS_ICC__)
# define LZO_OS_EMBEDDED 1
# define LZO_INFO_OS "embedded"
#elif defined(__CYGWIN__) && defined(__GNUC__)
# define LZO_OS_CYGWIN 1
# define LZO_INFO_OS "cygwin"
#elif defined(__EMX__) && defined(__GNUC__)
# define LZO_OS_EMX 1
# define LZO_INFO_OS "emx"
#elif defined(__BEOS__)
# define LZO_OS_BEOS 1
# define LZO_INFO_OS "beos"
#elif defined(__Lynx__)
# define LZO_OS_LYNXOS 1
# define LZO_INFO_OS "lynxos"
#elif defined(__OS400__)
# define LZO_OS_OS400 1
# define LZO_INFO_OS "os400"
#elif defined(__QNX__)
# define LZO_OS_QNX 1
# define LZO_INFO_OS "qnx"
#elif defined(__BORLANDC__) && defined(__DPMI32__) && (__BORLANDC__ >= 0x0460)
# define LZO_OS_DOS32 1
# define LZO_INFO_OS "dos32"
#elif defined(__BORLANDC__) && defined(__DPMI16__)
# define LZO_OS_DOS16 1
# define LZO_INFO_OS "dos16"
#elif defined(__ZTC__) && defined(DOS386)
# define LZO_OS_DOS32 1
# define LZO_INFO_OS "dos32"
#elif defined(__OS2__) || defined(__OS2V2__)
# if (UINT_MAX == LZO_0xffffL)
# define LZO_OS_OS216 1
# define LZO_INFO_OS "os216"
# elif (UINT_MAX == LZO_0xffffffffL)
# define LZO_OS_OS2 1
# define LZO_INFO_OS "os2"
# else
# error "check your limits.h header"
# endif
#elif defined(__WIN64__) || defined(_WIN64) || defined(WIN64)
# define LZO_OS_WIN64 1
# define LZO_INFO_OS "win64"
#elif defined(__WIN32__) || defined(_WIN32) || defined(WIN32) || defined(__WINDOWS_386__)
# define LZO_OS_WIN32 1
# define LZO_INFO_OS "win32"
#elif defined(__MWERKS__) && defined(__INTEL__)
# define LZO_OS_WIN32 1
# define LZO_INFO_OS "win32"
#elif defined(__WINDOWS__) || defined(_WINDOWS) || defined(_Windows)
# if (UINT_MAX == LZO_0xffffL)
# define LZO_OS_WIN16 1
# define LZO_INFO_OS "win16"
# elif (UINT_MAX == LZO_0xffffffffL)
# define LZO_OS_WIN32 1
# define LZO_INFO_OS "win32"
# else
# error "check your limits.h header"
# endif
#elif defined(__DOS__) || defined(__MSDOS__) || defined(_MSDOS) || defined(MSDOS) || (defined(__PACIFIC__) && defined(DOS))
# if (UINT_MAX == LZO_0xffffL)
# define LZO_OS_DOS16 1
# define LZO_INFO_OS "dos16"
# elif (UINT_MAX == LZO_0xffffffffL)
# define LZO_OS_DOS32 1
# define LZO_INFO_OS "dos32"
# else
# error "check your limits.h header"
# endif
#elif defined(__WATCOMC__)
# if defined(__NT__) && (UINT_MAX == LZO_0xffffL)
# define LZO_OS_DOS16 1
# define LZO_INFO_OS "dos16"
# elif defined(__NT__) && (__WATCOMC__ < 1100)
# define LZO_OS_WIN32 1
# define LZO_INFO_OS "win32"
# elif defined(__linux__) || defined(__LINUX__)
# define LZO_OS_POSIX 1
# define LZO_INFO_OS "posix"
# else
# error "please specify a target using the -bt compiler option"
# endif
#elif defined(__palmos__)
# define LZO_OS_PALMOS 1
# define LZO_INFO_OS "palmos"
#elif defined(__TOS__) || defined(__atarist__)
# define LZO_OS_TOS 1
# define LZO_INFO_OS "tos"
#elif defined(macintosh) && !defined(__ppc__)
# define LZO_OS_MACCLASSIC 1
# define LZO_INFO_OS "macclassic"
#elif defined(__VMS)
# define LZO_OS_VMS 1
# define LZO_INFO_OS "vms"
#elif (defined(__mips__) && defined(__R5900__)) || defined(__MIPS_PSX2__)
# define LZO_OS_CONSOLE 1
# define LZO_OS_CONSOLE_PS2 1
# define LZO_INFO_OS "console"
# define LZO_INFO_OS_CONSOLE "ps2"
#elif defined(__mips__) && defined(__psp__)
# define LZO_OS_CONSOLE 1
# define LZO_OS_CONSOLE_PSP 1
# define LZO_INFO_OS "console"
# define LZO_INFO_OS_CONSOLE "psp"
#else
# define LZO_OS_POSIX 1
# define LZO_INFO_OS "posix"
#endif
#if (LZO_OS_POSIX)
# if defined(_AIX) || defined(__AIX__) || defined(__aix__)
# define LZO_OS_POSIX_AIX 1
# define LZO_INFO_OS_POSIX "aix"
# elif defined(__FreeBSD__)
# define LZO_OS_POSIX_FREEBSD 1
# define LZO_INFO_OS_POSIX "freebsd"
# elif defined(__hpux__) || defined(__hpux)
# define LZO_OS_POSIX_HPUX 1
# define LZO_INFO_OS_POSIX "hpux"
# elif defined(__INTERIX)
# define LZO_OS_POSIX_INTERIX 1
# define LZO_INFO_OS_POSIX "interix"
# elif defined(__IRIX__) || defined(__irix__)
# define LZO_OS_POSIX_IRIX 1
# define LZO_INFO_OS_POSIX "irix"
# elif defined(__linux__) || defined(__linux) || defined(__LINUX__)
# define LZO_OS_POSIX_LINUX 1
# define LZO_INFO_OS_POSIX "linux"
# elif defined(__APPLE__) && defined(__MACH__)
# if ((__ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__-0) >= 20000)
# define LZO_OS_POSIX_DARWIN 1040
# define LZO_INFO_OS_POSIX "darwin_iphone"
# elif ((__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__-0) >= 1040)
# define LZO_OS_POSIX_DARWIN __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__
# define LZO_INFO_OS_POSIX "darwin"
# else
# define LZO_OS_POSIX_DARWIN 1
# define LZO_INFO_OS_POSIX "darwin"
# endif
# define LZO_OS_POSIX_MACOSX LZO_OS_POSIX_DARWIN
# elif defined(__minix__) || defined(__minix)
# define LZO_OS_POSIX_MINIX 1
# define LZO_INFO_OS_POSIX "minix"
# elif defined(__NetBSD__)
# define LZO_OS_POSIX_NETBSD 1
# define LZO_INFO_OS_POSIX "netbsd"
# elif defined(__OpenBSD__)
# define LZO_OS_POSIX_OPENBSD 1
# define LZO_INFO_OS_POSIX "openbsd"
# elif defined(__osf__)
# define LZO_OS_POSIX_OSF 1
# define LZO_INFO_OS_POSIX "osf"
# elif defined(__solaris__) || defined(__sun)
# if defined(__SVR4) || defined(__svr4__)
# define LZO_OS_POSIX_SOLARIS 1
# define LZO_INFO_OS_POSIX "solaris"
# else
# define LZO_OS_POSIX_SUNOS 1
# define LZO_INFO_OS_POSIX "sunos"
# endif
# elif defined(__ultrix__) || defined(__ultrix)
# define LZO_OS_POSIX_ULTRIX 1
# define LZO_INFO_OS_POSIX "ultrix"
# elif defined(_UNICOS)
# define LZO_OS_POSIX_UNICOS 1
# define LZO_INFO_OS_POSIX "unicos"
# else
# define LZO_OS_POSIX_UNKNOWN 1
# define LZO_INFO_OS_POSIX "unknown"
# endif
#endif
#endif
#if (LZO_OS_DOS16 || LZO_OS_OS216 || LZO_OS_WIN16)
# if (UINT_MAX != LZO_0xffffL)
# error "unexpected configuration - check your compiler defines"
# endif
# if (ULONG_MAX != LZO_0xffffffffL)
# error "unexpected configuration - check your compiler defines"
# endif
#endif
#if (LZO_OS_DOS32 || LZO_OS_OS2 || LZO_OS_WIN32 || LZO_OS_WIN64)
# if (UINT_MAX != LZO_0xffffffffL)
# error "unexpected configuration - check your compiler defines"
# endif
# if (ULONG_MAX != LZO_0xffffffffL)
# error "unexpected configuration - check your compiler defines"
# endif
#endif
#if defined(CIL) && defined(_GNUCC) && defined(__GNUC__)
# define LZO_CC_CILLY 1
# define LZO_INFO_CC "Cilly"
# if defined(__CILLY__)
# define LZO_INFO_CCVER LZO_PP_MACRO_EXPAND(__CILLY__)
# else
# define LZO_INFO_CCVER "unknown"
# endif
#elif 0 && defined(SDCC) && defined(__VERSION__) && !defined(__GNUC__)
# define LZO_CC_SDCC 1
# define LZO_INFO_CC "sdcc"
# define LZO_INFO_CCVER LZO_PP_MACRO_EXPAND(SDCC)
#elif defined(__PATHSCALE__) && defined(__PATHCC_PATCHLEVEL__)
# define LZO_CC_PATHSCALE (__PATHCC__ * 0x10000L + (__PATHCC_MINOR__-0) * 0x100 + (__PATHCC_PATCHLEVEL__-0))
# define LZO_INFO_CC "Pathscale C"
# define LZO_INFO_CCVER __PATHSCALE__
# if defined(__GNUC__) && defined(__GNUC_MINOR__) && defined(__VERSION__)
# define LZO_CC_PATHSCALE_GNUC (__GNUC__ * 0x10000L + (__GNUC_MINOR__-0) * 0x100 + (__GNUC_PATCHLEVEL__-0))
# endif
#elif defined(__INTEL_COMPILER) && ((__INTEL_COMPILER-0) > 0)
# define LZO_CC_INTELC __INTEL_COMPILER
# define LZO_INFO_CC "Intel C"
# define LZO_INFO_CCVER LZO_PP_MACRO_EXPAND(__INTEL_COMPILER)
# if defined(_MSC_VER) && ((_MSC_VER-0) > 0)
# define LZO_CC_INTELC_MSC _MSC_VER
# elif defined(__GNUC__) && defined(__GNUC_MINOR__) && defined(__VERSION__)
# define LZO_CC_INTELC_GNUC (__GNUC__ * 0x10000L + (__GNUC_MINOR__-0) * 0x100 + (__GNUC_PATCHLEVEL__-0))
# endif
#elif defined(__POCC__) && defined(_WIN32)
# define LZO_CC_PELLESC 1
# define LZO_INFO_CC "Pelles C"
# define LZO_INFO_CCVER LZO_PP_MACRO_EXPAND(__POCC__)
#elif defined(__ARMCC_VERSION) && defined(__GNUC__) && defined(__GNUC_MINOR__) && defined(__VERSION__)
# if defined(__GNUC_PATCHLEVEL__)
# define LZO_CC_ARMCC_GNUC (__GNUC__ * 0x10000L + (__GNUC_MINOR__-0) * 0x100 + (__GNUC_PATCHLEVEL__-0))
# else
# define LZO_CC_ARMCC_GNUC (__GNUC__ * 0x10000L + (__GNUC_MINOR__-0) * 0x100)
# endif
# define LZO_CC_ARMCC __ARMCC_VERSION
# define LZO_INFO_CC "ARM C Compiler"
# define LZO_INFO_CCVER __VERSION__
#elif defined(__clang__) && defined(__llvm__) && defined(__VERSION__)
# if defined(__clang_major__) && defined(__clang_minor__) && defined(__clang_patchlevel__)
# define LZO_CC_CLANG (__clang_major__ * 0x10000L + (__clang_minor__-0) * 0x100 + (__clang_patchlevel__-0))
# else
# define LZO_CC_CLANG 0x010000L
# endif
# if defined(_MSC_VER) && ((_MSC_VER-0) > 0)
# define LZO_CC_CLANG_MSC _MSC_VER
# elif defined(__GNUC__) && defined(__GNUC_MINOR__) && defined(__VERSION__)
# define LZO_CC_CLANG_GNUC (__GNUC__ * 0x10000L + (__GNUC_MINOR__-0) * 0x100 + (__GNUC_PATCHLEVEL__-0))
# endif
# define LZO_INFO_CC "clang"
# define LZO_INFO_CCVER __VERSION__
#elif defined(__llvm__) && defined(__GNUC__) && defined(__GNUC_MINOR__) && defined(__VERSION__)
# if defined(__GNUC_PATCHLEVEL__)
# define LZO_CC_LLVM_GNUC (__GNUC__ * 0x10000L + (__GNUC_MINOR__-0) * 0x100 + (__GNUC_PATCHLEVEL__-0))
# else
# define LZO_CC_LLVM_GNUC (__GNUC__ * 0x10000L + (__GNUC_MINOR__-0) * 0x100)
# endif
# define LZO_CC_LLVM LZO_CC_LLVM_GNUC
# define LZO_INFO_CC "llvm-gcc"
# define LZO_INFO_CCVER __VERSION__
#elif defined(__ACK__) && defined(_ACK)
# define LZO_CC_ACK 1
# define LZO_INFO_CC "Amsterdam Compiler Kit C"
# define LZO_INFO_CCVER "unknown"
#elif defined(__ARMCC_VERSION) && !defined(__GNUC__)
# define LZO_CC_ARMCC __ARMCC_VERSION
# define LZO_CC_ARMCC_ARMCC __ARMCC_VERSION
# define LZO_INFO_CC "ARM C Compiler"
# define LZO_INFO_CCVER LZO_PP_MACRO_EXPAND(__ARMCC_VERSION)
#elif defined(__AZTEC_C__)
# define LZO_CC_AZTECC 1
# define LZO_INFO_CC "Aztec C"
# define LZO_INFO_CCVER LZO_PP_MACRO_EXPAND(__AZTEC_C__)
#elif defined(__CODEGEARC__)
# define LZO_CC_CODEGEARC 1
# define LZO_INFO_CC "CodeGear C"
# define LZO_INFO_CCVER LZO_PP_MACRO_EXPAND(__CODEGEARC__)
#elif defined(__BORLANDC__)
# define LZO_CC_BORLANDC 1
# define LZO_INFO_CC "Borland C"
# define LZO_INFO_CCVER LZO_PP_MACRO_EXPAND(__BORLANDC__)
#elif defined(_CRAYC) && defined(_RELEASE)
# define LZO_CC_CRAYC 1
# define LZO_INFO_CC "Cray C"
# define LZO_INFO_CCVER LZO_PP_MACRO_EXPAND(_RELEASE)
#elif defined(__DMC__) && defined(__SC__)
# define LZO_CC_DMC 1
# define LZO_INFO_CC "Digital Mars C"
# define LZO_INFO_CCVER LZO_PP_MACRO_EXPAND(__DMC__)
#elif defined(__DECC)
# define LZO_CC_DECC 1
# define LZO_INFO_CC "DEC C"
# define LZO_INFO_CCVER LZO_PP_MACRO_EXPAND(__DECC)
#elif (defined(__ghs) || defined(__ghs__)) && defined(__GHS_VERSION_NUMBER) && ((__GHS_VERSION_NUMBER-0) > 0)
# define LZO_CC_GHS 1
# define LZO_INFO_CC "Green Hills C"
# define LZO_INFO_CCVER LZO_PP_MACRO_EXPAND(__GHS_VERSION_NUMBER)
# if defined(_MSC_VER) && ((_MSC_VER-0) > 0)
# define LZO_CC_GHS_MSC _MSC_VER
# elif defined(__GNUC__) && defined(__GNUC_MINOR__) && defined(__VERSION__)
# define LZO_CC_GHS_GNUC (__GNUC__ * 0x10000L + (__GNUC_MINOR__-0) * 0x100 + (__GNUC_PATCHLEVEL__-0))
# endif
#elif defined(__HIGHC__)
# define LZO_CC_HIGHC 1
# define LZO_INFO_CC "MetaWare High C"
# define LZO_INFO_CCVER "unknown"
#elif defined(__HP_aCC) && ((__HP_aCC-0) > 0)
# define LZO_CC_HPACC __HP_aCC
# define LZO_INFO_CC "HP aCC"
# define LZO_INFO_CCVER LZO_PP_MACRO_EXPAND(__HP_aCC)
#elif defined(__IAR_SYSTEMS_ICC__)
# define LZO_CC_IARC 1
# define LZO_INFO_CC "IAR C"
# if defined(__VER__)
# define LZO_INFO_CCVER LZO_PP_MACRO_EXPAND(__VER__)
# else
# define LZO_INFO_CCVER "unknown"
# endif
#elif defined(__IBMC__) && ((__IBMC__-0) > 0)
# define LZO_CC_IBMC __IBMC__
# define LZO_INFO_CC "IBM C"
# define LZO_INFO_CCVER LZO_PP_MACRO_EXPAND(__IBMC__)
#elif defined(__IBMCPP__) && ((__IBMCPP__-0) > 0)
# define LZO_CC_IBMC __IBMCPP__
# define LZO_INFO_CC "IBM C"
# define LZO_INFO_CCVER LZO_PP_MACRO_EXPAND(__IBMCPP__)
#elif defined(__KEIL__) && defined(__C166__)
# define LZO_CC_KEILC 1
# define LZO_INFO_CC "Keil C"
# define LZO_INFO_CCVER LZO_PP_MACRO_EXPAND(__C166__)
#elif defined(__LCC__) && defined(_WIN32) && defined(__LCCOPTIMLEVEL)
# define LZO_CC_LCCWIN32 1
# define LZO_INFO_CC "lcc-win32"
# define LZO_INFO_CCVER "unknown"
#elif defined(__LCC__)
# define LZO_CC_LCC 1
# define LZO_INFO_CC "lcc"
# if defined(__LCC_VERSION__)
# define LZO_INFO_CCVER LZO_PP_MACRO_EXPAND(__LCC_VERSION__)
# else
# define LZO_INFO_CCVER "unknown"
# endif
#elif defined(__MWERKS__) && ((__MWERKS__-0) > 0)
# define LZO_CC_MWERKS __MWERKS__
# define LZO_INFO_CC "Metrowerks C"
# define LZO_INFO_CCVER LZO_PP_MACRO_EXPAND(__MWERKS__)
#elif (defined(__NDPC__) || defined(__NDPX__)) && defined(__i386)
# define LZO_CC_NDPC 1
# define LZO_INFO_CC "Microway NDP C"
# define LZO_INFO_CCVER "unknown"
#elif defined(__PACIFIC__)
# define LZO_CC_PACIFICC 1
# define LZO_INFO_CC "Pacific C"
# define LZO_INFO_CCVER LZO_PP_MACRO_EXPAND(__PACIFIC__)
#elif defined(__PGI) && defined(__PGIC__) && defined(__PGIC_MINOR__)
# if defined(__PGIC_PATCHLEVEL__)
# define LZO_CC_PGI (__PGIC__ * 0x10000L + (__PGIC_MINOR__-0) * 0x100 + (__PGIC_PATCHLEVEL__-0))
# define LZO_INFO_CCVER LZO_PP_MACRO_EXPAND(__PGIC__) "." LZO_PP_MACRO_EXPAND(__PGIC_MINOR__) "." LZO_PP_MACRO_EXPAND(__PGIC_PATCHLEVEL__)
# else
# define LZO_CC_PGI (__PGIC__ * 0x10000L + (__PGIC_MINOR__-0) * 0x100)
# define LZO_INFO_CCVER LZO_PP_MACRO_EXPAND(__PGIC__) "." LZO_PP_MACRO_EXPAND(__PGIC_MINOR__) ".0"
# endif
# define LZO_INFO_CC "Portland Group PGI C"
#elif defined(__PGI) && (defined(__linux__) || defined(__WIN32__))
# define LZO_CC_PGI 1
# define LZO_INFO_CC "Portland Group PGI C"
# define LZO_INFO_CCVER "unknown"
#elif defined(__PUREC__) && defined(__TOS__)
# define LZO_CC_PUREC 1
# define LZO_INFO_CC "Pure C"
# define LZO_INFO_CCVER LZO_PP_MACRO_EXPAND(__PUREC__)
#elif defined(__SC__) && defined(__ZTC__)
# define LZO_CC_SYMANTECC 1
# define LZO_INFO_CC "Symantec C"
# define LZO_INFO_CCVER LZO_PP_MACRO_EXPAND(__SC__)
#elif defined(__SUNPRO_C)
# define LZO_INFO_CC "SunPro C"
# if ((__SUNPRO_C-0) > 0)
# define LZO_CC_SUNPROC __SUNPRO_C
# define LZO_INFO_CCVER LZO_PP_MACRO_EXPAND(__SUNPRO_C)
# else
# define LZO_CC_SUNPROC 1
# define LZO_INFO_CCVER "unknown"
# endif
#elif defined(__SUNPRO_CC)
# define LZO_INFO_CC "SunPro C"
# if ((__SUNPRO_CC-0) > 0)
# define LZO_CC_SUNPROC __SUNPRO_CC
# define LZO_INFO_CCVER LZO_PP_MACRO_EXPAND(__SUNPRO_CC)
# else
# define LZO_CC_SUNPROC 1
# define LZO_INFO_CCVER "unknown"
# endif
#elif defined(__TINYC__)
# define LZO_CC_TINYC 1
# define LZO_INFO_CC "Tiny C"
# define LZO_INFO_CCVER LZO_PP_MACRO_EXPAND(__TINYC__)
#elif defined(__TSC__)
# define LZO_CC_TOPSPEEDC 1
# define LZO_INFO_CC "TopSpeed C"
# define LZO_INFO_CCVER LZO_PP_MACRO_EXPAND(__TSC__)
#elif defined(__WATCOMC__)
# define LZO_CC_WATCOMC 1
# define LZO_INFO_CC "Watcom C"
# define LZO_INFO_CCVER LZO_PP_MACRO_EXPAND(__WATCOMC__)
#elif defined(__TURBOC__)
# define LZO_CC_TURBOC 1
# define LZO_INFO_CC "Turbo C"
# define LZO_INFO_CCVER LZO_PP_MACRO_EXPAND(__TURBOC__)
#elif defined(__ZTC__)
# define LZO_CC_ZORTECHC 1
# define LZO_INFO_CC "Zortech C"
# if ((__ZTC__-0) == 0x310)
# define LZO_INFO_CCVER "0x310"
# else
# define LZO_INFO_CCVER LZO_PP_MACRO_EXPAND(__ZTC__)
# endif
#elif defined(__GNUC__) && defined(__VERSION__)
# if defined(__GNUC_MINOR__) && defined(__GNUC_PATCHLEVEL__)
# define LZO_CC_GNUC (__GNUC__ * 0x10000L + (__GNUC_MINOR__-0) * 0x100 + (__GNUC_PATCHLEVEL__-0))
# elif defined(__GNUC_MINOR__)
# define LZO_CC_GNUC (__GNUC__ * 0x10000L + (__GNUC_MINOR__-0) * 0x100)
# else
# define LZO_CC_GNUC (__GNUC__ * 0x10000L)
# endif
# define LZO_INFO_CC "gcc"
# define LZO_INFO_CCVER __VERSION__
#elif defined(_MSC_VER) && ((_MSC_VER-0) > 0)
# define LZO_CC_MSC _MSC_VER
# define LZO_INFO_CC "Microsoft C"
# if defined(_MSC_FULL_VER)
# define LZO_INFO_CCVER LZO_PP_MACRO_EXPAND(_MSC_VER) "." LZO_PP_MACRO_EXPAND(_MSC_FULL_VER)
# else
# define LZO_INFO_CCVER LZO_PP_MACRO_EXPAND(_MSC_VER)
# endif
#else
# define LZO_CC_UNKNOWN 1
# define LZO_INFO_CC "unknown"
# define LZO_INFO_CCVER "unknown"
#endif
#if (LZO_CC_GNUC) && defined(__OPEN64__)
# if defined(__OPENCC__) && defined(__OPENCC_MINOR__) && defined(__OPENCC_PATCHLEVEL__)
# define LZO_CC_OPEN64 (__OPENCC__ * 0x10000L + (__OPENCC_MINOR__-0) * 0x100 + (__OPENCC_PATCHLEVEL__-0))
# define LZO_CC_OPEN64_GNUC LZO_CC_GNUC
# endif
#endif
#if (LZO_CC_GNUC) && defined(__PCC__)
# if defined(__PCC__) && defined(__PCC_MINOR__) && defined(__PCC_MINORMINOR__)
# define LZO_CC_PCC (__PCC__ * 0x10000L + (__PCC_MINOR__-0) * 0x100 + (__PCC_MINORMINOR__-0))
# define LZO_CC_PCC_GNUC LZO_CC_GNUC
# endif
#endif
#if 0 && (LZO_CC_MSC && (_MSC_VER >= 1200)) && !defined(_MSC_FULL_VER)
# error "LZO_CC_MSC: _MSC_FULL_VER is not defined"
#endif
#if !defined(__LZO_ARCH_OVERRIDE) && !(LZO_ARCH_GENERIC) && defined(_CRAY)
# if (UINT_MAX > LZO_0xffffffffL) && defined(_CRAY)
# if defined(_CRAYMPP) || defined(_CRAYT3D) || defined(_CRAYT3E)
# define LZO_ARCH_CRAY_MPP 1
# elif defined(_CRAY1)
# define LZO_ARCH_CRAY_PVP 1
# endif
# endif
#endif
#if !defined(__LZO_ARCH_OVERRIDE)
#if (LZO_ARCH_GENERIC)
# define LZO_INFO_ARCH "generic"
#elif (LZO_OS_DOS16 || LZO_OS_OS216 || LZO_OS_WIN16)
# define LZO_ARCH_I086 1
# define LZO_INFO_ARCH "i086"
#elif defined(__aarch64__) || defined(_M_ARM64)
# define LZO_ARCH_ARM64 1
# define LZO_INFO_ARCH "arm64"
#elif defined(__alpha__) || defined(__alpha) || defined(_M_ALPHA)
# define LZO_ARCH_ALPHA 1
# define LZO_INFO_ARCH "alpha"
#elif (LZO_ARCH_CRAY_MPP) && (defined(_CRAYT3D) || defined(_CRAYT3E))
# define LZO_ARCH_ALPHA 1
# define LZO_INFO_ARCH "alpha"
#elif defined(__amd64__) || defined(__x86_64__) || defined(_M_AMD64)
# define LZO_ARCH_AMD64 1
# define LZO_INFO_ARCH "amd64"
#elif defined(__thumb__) || (defined(_M_ARM) && defined(_M_THUMB))
# define LZO_ARCH_ARM 1
# define LZO_ARCH_ARM_THUMB 1
# define LZO_INFO_ARCH "arm_thumb"
#elif defined(__IAR_SYSTEMS_ICC__) && defined(__ICCARM__)
# define LZO_ARCH_ARM 1
# if defined(__CPU_MODE__) && ((__CPU_MODE__-0) == 1)
# define LZO_ARCH_ARM_THUMB 1
# define LZO_INFO_ARCH "arm_thumb"
# elif defined(__CPU_MODE__) && ((__CPU_MODE__-0) == 2)
# define LZO_INFO_ARCH "arm"
# else
# define LZO_INFO_ARCH "arm"
# endif
#elif defined(__arm__) || defined(_M_ARM)
# define LZO_ARCH_ARM 1
# define LZO_INFO_ARCH "arm"
#elif (UINT_MAX <= LZO_0xffffL) && defined(__AVR__)
# define LZO_ARCH_AVR 1
# define LZO_INFO_ARCH "avr"
#elif defined(__avr32__) || defined(__AVR32__)
# define LZO_ARCH_AVR32 1
# define LZO_INFO_ARCH "avr32"
#elif defined(__bfin__)
# define LZO_ARCH_BLACKFIN 1
# define LZO_INFO_ARCH "blackfin"
#elif (UINT_MAX == LZO_0xffffL) && defined(__C166__)
# define LZO_ARCH_C166 1
# define LZO_INFO_ARCH "c166"
#elif defined(__cris__)
# define LZO_ARCH_CRIS 1
# define LZO_INFO_ARCH "cris"
#elif defined(__IAR_SYSTEMS_ICC__) && defined(__ICCEZ80__)
# define LZO_ARCH_EZ80 1
# define LZO_INFO_ARCH "ez80"
#elif defined(__H8300__) || defined(__H8300H__) || defined(__H8300S__) || defined(__H8300SX__)
# define LZO_ARCH_H8300 1
# define LZO_INFO_ARCH "h8300"
#elif defined(__hppa__) || defined(__hppa)
# define LZO_ARCH_HPPA 1
# define LZO_INFO_ARCH "hppa"
#elif defined(__386__) || defined(__i386__) || defined(__i386) || defined(_M_IX86) || defined(_M_I386)
# define LZO_ARCH_I386 1
# define LZO_ARCH_IA32 1
# define LZO_INFO_ARCH "i386"
#elif (LZO_CC_ZORTECHC && defined(__I86__))
# define LZO_ARCH_I386 1
# define LZO_ARCH_IA32 1
# define LZO_INFO_ARCH "i386"
#elif (LZO_OS_DOS32 && LZO_CC_HIGHC) && defined(_I386)
# define LZO_ARCH_I386 1
# define LZO_ARCH_IA32 1
# define LZO_INFO_ARCH "i386"
#elif defined(__ia64__) || defined(__ia64) || defined(_M_IA64)
# define LZO_ARCH_IA64 1
# define LZO_INFO_ARCH "ia64"
#elif (UINT_MAX == LZO_0xffffL) && defined(__m32c__)
# define LZO_ARCH_M16C 1
# define LZO_INFO_ARCH "m16c"
#elif defined(__IAR_SYSTEMS_ICC__) && defined(__ICCM16C__)
# define LZO_ARCH_M16C 1
# define LZO_INFO_ARCH "m16c"
#elif defined(__m32r__)
# define LZO_ARCH_M32R 1
# define LZO_INFO_ARCH "m32r"
#elif (LZO_OS_TOS) || defined(__m68k__) || defined(__m68000__) || defined(__mc68000__) || defined(__mc68020__) || defined(_M_M68K)
# define LZO_ARCH_M68K 1
# define LZO_INFO_ARCH "m68k"
#elif (UINT_MAX == LZO_0xffffL) && defined(__C251__)
# define LZO_ARCH_MCS251 1
# define LZO_INFO_ARCH "mcs251"
#elif (UINT_MAX == LZO_0xffffL) && defined(__C51__)
# define LZO_ARCH_MCS51 1
# define LZO_INFO_ARCH "mcs51"
#elif defined(__IAR_SYSTEMS_ICC__) && defined(__ICC8051__)
# define LZO_ARCH_MCS51 1
# define LZO_INFO_ARCH "mcs51"
#elif defined(__mips__) || defined(__mips) || defined(_MIPS_ARCH) || defined(_M_MRX000)
# define LZO_ARCH_MIPS 1
# define LZO_INFO_ARCH "mips"
#elif (UINT_MAX == LZO_0xffffL) && defined(__MSP430__)
# define LZO_ARCH_MSP430 1
# define LZO_INFO_ARCH "msp430"
#elif defined(__IAR_SYSTEMS_ICC__) && defined(__ICC430__)
# define LZO_ARCH_MSP430 1
# define LZO_INFO_ARCH "msp430"
#elif defined(__powerpc__) || defined(__powerpc) || defined(__ppc__) || defined(__PPC__) || defined(_M_PPC) || defined(_ARCH_PPC) || defined(_ARCH_PWR)
# define LZO_ARCH_POWERPC 1
# define LZO_INFO_ARCH "powerpc"
#elif defined(__s390__) || defined(__s390) || defined(__s390x__) || defined(__s390x)
# define LZO_ARCH_S390 1
# define LZO_INFO_ARCH "s390"
#elif defined(__sh__) || defined(_M_SH)
# define LZO_ARCH_SH 1
# define LZO_INFO_ARCH "sh"
#elif defined(__sparc__) || defined(__sparc) || defined(__sparcv8)
# define LZO_ARCH_SPARC 1
# define LZO_INFO_ARCH "sparc"
#elif defined(__SPU__)
# define LZO_ARCH_SPU 1
# define LZO_INFO_ARCH "spu"
#elif (UINT_MAX == LZO_0xffffL) && defined(__z80)
# define LZO_ARCH_Z80 1
# define LZO_INFO_ARCH "z80"
#elif (LZO_ARCH_CRAY_PVP)
# if defined(_CRAYSV1)
# define LZO_ARCH_CRAY_SV1 1
# define LZO_INFO_ARCH "cray_sv1"
# elif (_ADDR64)
# define LZO_ARCH_CRAY_T90 1
# define LZO_INFO_ARCH "cray_t90"
# elif (_ADDR32)
# define LZO_ARCH_CRAY_YMP 1
# define LZO_INFO_ARCH "cray_ymp"
# else
# define LZO_ARCH_CRAY_XMP 1
# define LZO_INFO_ARCH "cray_xmp"
# endif
#else
# define LZO_ARCH_UNKNOWN 1
# define LZO_INFO_ARCH "unknown"
#endif
#endif
#if 1 && (LZO_ARCH_UNKNOWN) && (LZO_OS_DOS32 || LZO_OS_OS2)
# error "FIXME - missing define for CPU architecture"
#endif
#if 1 && (LZO_ARCH_UNKNOWN) && (LZO_OS_WIN32)
# error "FIXME - missing LZO_OS_WIN32 define for CPU architecture"
#endif
#if 1 && (LZO_ARCH_UNKNOWN) && (LZO_OS_WIN64)
# error "FIXME - missing LZO_OS_WIN64 define for CPU architecture"
#endif
#if (LZO_OS_OS216 || LZO_OS_WIN16)
# define LZO_ARCH_I086PM 1
#elif 1 && (LZO_OS_DOS16 && defined(BLX286))
# define LZO_ARCH_I086PM 1
#elif 1 && (LZO_OS_DOS16 && defined(DOSX286))
# define LZO_ARCH_I086PM 1
#elif 1 && (LZO_OS_DOS16 && LZO_CC_BORLANDC && defined(__DPMI16__))
# define LZO_ARCH_I086PM 1
#endif
#if (LZO_ARCH_AMD64 && !LZO_ARCH_X64)
# define LZO_ARCH_X64 1
#elif (!LZO_ARCH_AMD64 && LZO_ARCH_X64) && defined(__LZO_ARCH_OVERRIDE)
# define LZO_ARCH_AMD64 1
#endif
#if (LZO_ARCH_ARM64 && !LZO_ARCH_AARCH64)
# define LZO_ARCH_AARCH64 1
#elif (!LZO_ARCH_ARM64 && LZO_ARCH_AARCH64) && defined(__LZO_ARCH_OVERRIDE)
# define LZO_ARCH_ARM64 1
#endif
#if (LZO_ARCH_I386 && !LZO_ARCH_X86)
# define LZO_ARCH_X86 1
#elif (!LZO_ARCH_I386 && LZO_ARCH_X86) && defined(__LZO_ARCH_OVERRIDE)
# define LZO_ARCH_I386 1
#endif
#if (LZO_ARCH_AMD64 && !LZO_ARCH_X64) || (!LZO_ARCH_AMD64 && LZO_ARCH_X64)
# error "unexpected configuration - check your compiler defines"
#endif
#if (LZO_ARCH_ARM64 && !LZO_ARCH_AARCH64) || (!LZO_ARCH_ARM64 && LZO_ARCH_AARCH64)
# error "unexpected configuration - check your compiler defines"
#endif
#if (LZO_ARCH_I386 && !LZO_ARCH_X86) || (!LZO_ARCH_I386 && LZO_ARCH_X86)
# error "unexpected configuration - check your compiler defines"
#endif
#if (LZO_ARCH_ARM_THUMB && !LZO_ARCH_ARM)
# error "unexpected configuration - check your compiler defines"
#endif
#if (LZO_ARCH_ARM_THUMB1 && !LZO_ARCH_ARM_THUMB)
# error "unexpected configuration - check your compiler defines"
#endif
#if (LZO_ARCH_ARM_THUMB2 && !LZO_ARCH_ARM_THUMB)
# error "unexpected configuration - check your compiler defines"
#endif
#if (LZO_ARCH_ARM_THUMB1 && LZO_ARCH_ARM_THUMB2)
# error "unexpected configuration - check your compiler defines"
#endif
#if (LZO_ARCH_I086PM && !LZO_ARCH_I086)
# error "unexpected configuration - check your compiler defines"
#endif
#if (LZO_ARCH_I086)
# if (UINT_MAX != LZO_0xffffL)
# error "unexpected configuration - check your compiler defines"
# endif
# if (ULONG_MAX != LZO_0xffffffffL)
# error "unexpected configuration - check your compiler defines"
# endif
#endif
#if (LZO_ARCH_I386)
# if (UINT_MAX != LZO_0xffffL) && defined(__i386_int16__)
# error "unexpected configuration - check your compiler defines"
# endif
# if (UINT_MAX != LZO_0xffffffffL) && !defined(__i386_int16__)
# error "unexpected configuration - check your compiler defines"
# endif
# if (ULONG_MAX != LZO_0xffffffffL)
# error "unexpected configuration - check your compiler defines"
# endif
#endif
#if (LZO_ARCH_AMD64 || LZO_ARCH_I386)
# if !defined(LZO_TARGET_FEATURE_SSE2)
# if defined(__SSE2__)
# define LZO_TARGET_FEATURE_SSE2 1
# elif defined(_MSC_VER) && ((defined(_M_IX86_FP) && ((_M_IX86_FP)+0 >= 2)) || defined(_M_AMD64))
# define LZO_TARGET_FEATURE_SSE2 1
# endif
# endif
# if !defined(LZO_TARGET_FEATURE_SSSE3)
# if (LZO_TARGET_FEATURE_SSE2)
# if defined(__SSSE3__)
# define LZO_TARGET_FEATURE_SSSE3 1
# elif defined(_MSC_VER) && defined(__AVX__)
# define LZO_TARGET_FEATURE_SSSE3 1
# endif
# endif
# endif
# if !defined(LZO_TARGET_FEATURE_SSE4_2)
# if (LZO_TARGET_FEATURE_SSSE3)
# if defined(__SSE4_2__)
# define LZO_TARGET_FEATURE_SSE4_2 1
# endif
# endif
# endif
# if !defined(LZO_TARGET_FEATURE_AVX)
# if (LZO_TARGET_FEATURE_SSSE3)
# if defined(__AVX__)
# define LZO_TARGET_FEATURE_AVX 1
# endif
# endif
# endif
# if !defined(LZO_TARGET_FEATURE_AVX2)
# if (LZO_TARGET_FEATURE_AVX)
# if defined(__AVX2__)
# define LZO_TARGET_FEATURE_AVX2 1
# endif
# endif
# endif
#endif
#if (LZO_TARGET_FEATURE_SSSE3 && !(LZO_TARGET_FEATURE_SSE2))
# error "unexpected configuration - check your compiler defines"
#endif
#if (LZO_TARGET_FEATURE_SSE4_2 && !(LZO_TARGET_FEATURE_SSSE3))
# error "unexpected configuration - check your compiler defines"
#endif
#if (LZO_TARGET_FEATURE_AVX && !(LZO_TARGET_FEATURE_SSSE3))
# error "unexpected configuration - check your compiler defines"
#endif
#if (LZO_TARGET_FEATURE_AVX2 && !(LZO_TARGET_FEATURE_AVX))
# error "unexpected configuration - check your compiler defines"
#endif
#if (LZO_ARCH_ARM)
# if !defined(LZO_TARGET_FEATURE_NEON)
# if defined(__ARM_NEON__)
# define LZO_TARGET_FEATURE_NEON 1
# endif
# endif
#elif (LZO_ARCH_ARM64)
# if !defined(LZO_TARGET_FEATURE_NEON)
# if 1
# define LZO_TARGET_FEATURE_NEON 1
# endif
# endif
#endif
#if 0
#elif !defined(__LZO_MM_OVERRIDE)
#if (LZO_ARCH_I086)
#if (UINT_MAX != LZO_0xffffL)
# error "unexpected configuration - check your compiler defines"
#endif
#if defined(__TINY__) || defined(M_I86TM) || defined(_M_I86TM)
# define LZO_MM_TINY 1
#elif defined(__HUGE__) || defined(_HUGE_) || defined(M_I86HM) || defined(_M_I86HM)
# define LZO_MM_HUGE 1
#elif defined(__SMALL__) || defined(M_I86SM) || defined(_M_I86SM) || defined(SMALL_MODEL)
# define LZO_MM_SMALL 1
#elif defined(__MEDIUM__) || defined(M_I86MM) || defined(_M_I86MM)
# define LZO_MM_MEDIUM 1
#elif defined(__COMPACT__) || defined(M_I86CM) || defined(_M_I86CM)
# define LZO_MM_COMPACT 1
#elif defined(__LARGE__) || defined(M_I86LM) || defined(_M_I86LM) || defined(LARGE_MODEL)
# define LZO_MM_LARGE 1
#elif (LZO_CC_AZTECC)
# if defined(_LARGE_CODE) && defined(_LARGE_DATA)
# define LZO_MM_LARGE 1
# elif defined(_LARGE_CODE)
# define LZO_MM_MEDIUM 1
# elif defined(_LARGE_DATA)
# define LZO_MM_COMPACT 1
# else
# define LZO_MM_SMALL 1
# endif
#elif (LZO_CC_ZORTECHC && defined(__VCM__))
# define LZO_MM_LARGE 1
#else
# error "unknown LZO_ARCH_I086 memory model"
#endif
#if (LZO_OS_DOS16 || LZO_OS_OS216 || LZO_OS_WIN16)
#define LZO_HAVE_MM_HUGE_PTR 1
#define LZO_HAVE_MM_HUGE_ARRAY 1
#if (LZO_MM_TINY)
# undef LZO_HAVE_MM_HUGE_ARRAY
#endif
#if (LZO_CC_AZTECC || LZO_CC_PACIFICC || LZO_CC_ZORTECHC)
# undef LZO_HAVE_MM_HUGE_PTR
# undef LZO_HAVE_MM_HUGE_ARRAY
#elif (LZO_CC_DMC || LZO_CC_SYMANTECC)
# undef LZO_HAVE_MM_HUGE_ARRAY
#elif (LZO_CC_MSC && defined(_QC))
# undef LZO_HAVE_MM_HUGE_ARRAY
# if (_MSC_VER < 600)
# undef LZO_HAVE_MM_HUGE_PTR
# endif
#elif (LZO_CC_TURBOC && (__TURBOC__ < 0x0295))
# undef LZO_HAVE_MM_HUGE_ARRAY
#endif
#if (LZO_ARCH_I086PM) && !(LZO_HAVE_MM_HUGE_PTR)
# if (LZO_OS_DOS16)
# error "unexpected configuration - check your compiler defines"
# elif (LZO_CC_ZORTECHC)
# else
# error "unexpected configuration - check your compiler defines"
# endif
#endif
#ifdef __cplusplus
extern "C" {
#endif
#if (LZO_CC_BORLANDC && (__BORLANDC__ >= 0x0200))
extern void __near __cdecl _AHSHIFT(void);
# define LZO_MM_AHSHIFT ((unsigned) _AHSHIFT)
#elif (LZO_CC_DMC || LZO_CC_SYMANTECC || LZO_CC_ZORTECHC)
extern void __near __cdecl _AHSHIFT(void);
# define LZO_MM_AHSHIFT ((unsigned) _AHSHIFT)
#elif (LZO_CC_MSC || LZO_CC_TOPSPEEDC)
extern void __near __cdecl _AHSHIFT(void);
# define LZO_MM_AHSHIFT ((unsigned) _AHSHIFT)
#elif (LZO_CC_TURBOC && (__TURBOC__ >= 0x0295))
extern void __near __cdecl _AHSHIFT(void);
# define LZO_MM_AHSHIFT ((unsigned) _AHSHIFT)
#elif ((LZO_CC_AZTECC || LZO_CC_PACIFICC || LZO_CC_TURBOC) && LZO_OS_DOS16)
# define LZO_MM_AHSHIFT 12
#elif (LZO_CC_WATCOMC)
extern unsigned char _HShift;
# define LZO_MM_AHSHIFT ((unsigned) _HShift)
#else
# error "FIXME - implement LZO_MM_AHSHIFT"
#endif
#ifdef __cplusplus
}
#endif
#endif
#elif (LZO_ARCH_C166)
#if !defined(__MODEL__)
# error "FIXME - LZO_ARCH_C166 __MODEL__"
#elif ((__MODEL__) == 0)
# define LZO_MM_SMALL 1
#elif ((__MODEL__) == 1)
# define LZO_MM_SMALL 1
#elif ((__MODEL__) == 2)
# define LZO_MM_LARGE 1
#elif ((__MODEL__) == 3)
# define LZO_MM_TINY 1
#elif ((__MODEL__) == 4)
# define LZO_MM_XTINY 1
#elif ((__MODEL__) == 5)
# define LZO_MM_XSMALL 1
#else
# error "FIXME - LZO_ARCH_C166 __MODEL__"
#endif
#elif (LZO_ARCH_MCS251)
#if !defined(__MODEL__)
# error "FIXME - LZO_ARCH_MCS251 __MODEL__"
#elif ((__MODEL__) == 0)
# define LZO_MM_SMALL 1
#elif ((__MODEL__) == 2)
# define LZO_MM_LARGE 1
#elif ((__MODEL__) == 3)
# define LZO_MM_TINY 1
#elif ((__MODEL__) == 4)
# define LZO_MM_XTINY 1
#elif ((__MODEL__) == 5)
# define LZO_MM_XSMALL 1
#else
# error "FIXME - LZO_ARCH_MCS251 __MODEL__"
#endif
#elif (LZO_ARCH_MCS51)
#if !defined(__MODEL__)
# error "FIXME - LZO_ARCH_MCS51 __MODEL__"
#elif ((__MODEL__) == 1)
# define LZO_MM_SMALL 1
#elif ((__MODEL__) == 2)
# define LZO_MM_LARGE 1
#elif ((__MODEL__) == 3)
# define LZO_MM_TINY 1
#elif ((__MODEL__) == 4)
# define LZO_MM_XTINY 1
#elif ((__MODEL__) == 5)
# define LZO_MM_XSMALL 1
#else
# error "FIXME - LZO_ARCH_MCS51 __MODEL__"
#endif
#elif (LZO_ARCH_CRAY_PVP)
# define LZO_MM_PVP 1
#else
# define LZO_MM_FLAT 1
#endif
#if (LZO_MM_COMPACT)
# define LZO_INFO_MM "compact"
#elif (LZO_MM_FLAT)
# define LZO_INFO_MM "flat"
#elif (LZO_MM_HUGE)
# define LZO_INFO_MM "huge"
#elif (LZO_MM_LARGE)
# define LZO_INFO_MM "large"
#elif (LZO_MM_MEDIUM)
# define LZO_INFO_MM "medium"
#elif (LZO_MM_PVP)
# define LZO_INFO_MM "pvp"
#elif (LZO_MM_SMALL)
# define LZO_INFO_MM "small"
#elif (LZO_MM_TINY)
# define LZO_INFO_MM "tiny"
#else
# error "unknown memory model"
#endif
#endif
#if !defined(__lzo_gnuc_extension__)
#if (LZO_CC_GNUC >= 0x020800ul)
# define __lzo_gnuc_extension__ __extension__
#elif (LZO_CC_ARMCC_GNUC || LZO_CC_CLANG || LZO_CC_LLVM || LZO_CC_PATHSCALE)
# define __lzo_gnuc_extension__ __extension__
#elif (LZO_CC_IBMC >= 600)
# define __lzo_gnuc_extension__ __extension__
#else
#endif
#endif
#if !defined(__lzo_gnuc_extension__)
# define __lzo_gnuc_extension__ /*empty*/
#endif
#if !defined(LZO_CFG_USE_NEW_STYLE_CASTS) && defined(__cplusplus) && 0
# if (LZO_CC_GNUC && (LZO_CC_GNUC < 0x020800ul))
# define LZO_CFG_USE_NEW_STYLE_CASTS 0
# elif (LZO_CC_INTELC && (__INTEL_COMPILER < 1200))
# define LZO_CFG_USE_NEW_STYLE_CASTS 0
# else
# define LZO_CFG_USE_NEW_STYLE_CASTS 1
# endif
#endif
#if !defined(LZO_CFG_USE_NEW_STYLE_CASTS)
# define LZO_CFG_USE_NEW_STYLE_CASTS 0
#endif
#if !defined(__cplusplus)
# if defined(LZO_CFG_USE_NEW_STYLE_CASTS)
# undef LZO_CFG_USE_NEW_STYLE_CASTS
# endif
# define LZO_CFG_USE_NEW_STYLE_CASTS 0
#endif
#if !defined(LZO_REINTERPRET_CAST)
# if (LZO_CFG_USE_NEW_STYLE_CASTS)
# define LZO_REINTERPRET_CAST(t,e) (reinterpret_cast<t> (e))
# endif
#endif
#if !defined(LZO_REINTERPRET_CAST)
# define LZO_REINTERPRET_CAST(t,e) ((t) (e))
#endif
#if !defined(LZO_STATIC_CAST)
# if (LZO_CFG_USE_NEW_STYLE_CASTS)
# define LZO_STATIC_CAST(t,e) (static_cast<t> (e))
# endif
#endif
#if !defined(LZO_STATIC_CAST)
# define LZO_STATIC_CAST(t,e) ((t) (e))
#endif
#if !defined(LZO_STATIC_CAST2)
# define LZO_STATIC_CAST2(t1,t2,e) LZO_STATIC_CAST(t1, LZO_STATIC_CAST(t2, e))
#endif
#if !defined(LZO_UNCONST_CAST)
# if (LZO_CFG_USE_NEW_STYLE_CASTS)
# define LZO_UNCONST_CAST(t,e) (const_cast<t> (e))
# elif (LZO_HAVE_MM_HUGE_PTR)
# define LZO_UNCONST_CAST(t,e) ((t) (e))
# elif (LZO_CC_ARMCC_GNUC || LZO_CC_CLANG || LZO_CC_GNUC || LZO_CC_LLVM || LZO_CC_PATHSCALE)
# define LZO_UNCONST_CAST(t,e) ((t) ((void *) ((lzo_uintptr_t) ((const void *) (e)))))
# endif
#endif
#if !defined(LZO_UNCONST_CAST)
# define LZO_UNCONST_CAST(t,e) ((t) ((void *) ((const void *) (e))))
#endif
#if !defined(LZO_UNCONST_VOLATILE_CAST)
# if (LZO_CFG_USE_NEW_STYLE_CASTS)
# define LZO_UNCONST_VOLATILE_CAST(t,e) (const_cast<t> (e))
# elif (LZO_HAVE_MM_HUGE_PTR)
# define LZO_UNCONST_VOLATILE_CAST(t,e) ((t) (e))
# elif (LZO_CC_ARMCC_GNUC || LZO_CC_CLANG || LZO_CC_GNUC || LZO_CC_LLVM || LZO_CC_PATHSCALE)
# define LZO_UNCONST_VOLATILE_CAST(t,e) ((t) ((volatile void *) ((lzo_uintptr_t) ((volatile const void *) (e)))))
# endif
#endif
#if !defined(LZO_UNCONST_VOLATILE_CAST)
# define LZO_UNCONST_VOLATILE_CAST(t,e) ((t) ((volatile void *) ((volatile const void *) (e))))
#endif
#if !defined(LZO_UNVOLATILE_CAST)
# if (LZO_CFG_USE_NEW_STYLE_CASTS)
# define LZO_UNVOLATILE_CAST(t,e) (const_cast<t> (e))
# elif (LZO_HAVE_MM_HUGE_PTR)
# define LZO_UNVOLATILE_CAST(t,e) ((t) (e))
# elif (LZO_CC_ARMCC_GNUC || LZO_CC_CLANG || LZO_CC_GNUC || LZO_CC_LLVM || LZO_CC_PATHSCALE)
# define LZO_UNVOLATILE_CAST(t,e) ((t) ((void *) ((lzo_uintptr_t) ((volatile void *) (e)))))
# endif
#endif
#if !defined(LZO_UNVOLATILE_CAST)
# define LZO_UNVOLATILE_CAST(t,e) ((t) ((void *) ((volatile void *) (e))))
#endif
#if !defined(LZO_UNVOLATILE_CONST_CAST)
# if (LZO_CFG_USE_NEW_STYLE_CASTS)
# define LZO_UNVOLATILE_CONST_CAST(t,e) (const_cast<t> (e))
# elif (LZO_HAVE_MM_HUGE_PTR)
# define LZO_UNVOLATILE_CONST_CAST(t,e) ((t) (e))
# elif (LZO_CC_ARMCC_GNUC || LZO_CC_CLANG || LZO_CC_GNUC || LZO_CC_LLVM || LZO_CC_PATHSCALE)
# define LZO_UNVOLATILE_CONST_CAST(t,e) ((t) ((const void *) ((lzo_uintptr_t) ((volatile const void *) (e)))))
# endif
#endif
#if !defined(LZO_UNVOLATILE_CONST_CAST)
# define LZO_UNVOLATILE_CONST_CAST(t,e) ((t) ((const void *) ((volatile const void *) (e))))
#endif
#if !defined(LZO_PCAST)
# if (LZO_HAVE_MM_HUGE_PTR)
# define LZO_PCAST(t,e) ((t) (e))
# endif
#endif
#if !defined(LZO_PCAST)
# define LZO_PCAST(t,e) LZO_STATIC_CAST(t, LZO_STATIC_CAST(void *, e))
#endif
#if !defined(LZO_CCAST)
# if (LZO_HAVE_MM_HUGE_PTR)
# define LZO_CCAST(t,e) ((t) (e))
# endif
#endif
#if !defined(LZO_CCAST)
# define LZO_CCAST(t,e) LZO_STATIC_CAST(t, LZO_STATIC_CAST(const void *, e))
#endif
#if !defined(LZO_ICONV)
# define LZO_ICONV(t,e) LZO_STATIC_CAST(t, e)
#endif
#if !defined(LZO_ICAST)
# define LZO_ICAST(t,e) LZO_STATIC_CAST(t, e)
#endif
#if !defined(LZO_ITRUNC)
# define LZO_ITRUNC(t,e) LZO_STATIC_CAST(t, e)
#endif
#if !defined(__lzo_cte)
# if (LZO_CC_MSC || LZO_CC_WATCOMC)
# define __lzo_cte(e) ((void)0,(e))
# elif 1
# define __lzo_cte(e) ((void)0,(e))
# endif
#endif
#if !defined(__lzo_cte)
# define __lzo_cte(e) (e)
#endif
#if !defined(LZO_BLOCK_BEGIN)
# define LZO_BLOCK_BEGIN do {
# define LZO_BLOCK_END } while __lzo_cte(0)
#endif
#if !defined(LZO_UNUSED)
# if (LZO_CC_BORLANDC && (__BORLANDC__ >= 0x0600))
# define LZO_UNUSED(var) ((void) &var)
# elif (LZO_CC_BORLANDC || LZO_CC_HIGHC || LZO_CC_NDPC || LZO_CC_PELLESC || LZO_CC_TURBOC)
# define LZO_UNUSED(var) if (&var) ; else
# elif (LZO_CC_CLANG && (LZO_CC_CLANG >= 0x030200ul))
# define LZO_UNUSED(var) ((void) &var)
# elif (LZO_CC_CLANG || LZO_CC_GNUC || LZO_CC_LLVM || LZO_CC_PATHSCALE)
# define LZO_UNUSED(var) ((void) var)
# elif (LZO_CC_MSC && (_MSC_VER < 900))
# define LZO_UNUSED(var) if (&var) ; else
# elif (LZO_CC_KEILC)
# define LZO_UNUSED(var) {LZO_EXTERN_C int lzo_unused__[1-2*!(sizeof(var)>0)];}
# elif (LZO_CC_PACIFICC)
# define LZO_UNUSED(var) ((void) sizeof(var))
# elif (LZO_CC_WATCOMC) && defined(__cplusplus)
# define LZO_UNUSED(var) ((void) var)
# else
# define LZO_UNUSED(var) ((void) &var)
# endif
#endif
#if !defined(LZO_UNUSED_FUNC)
# if (LZO_CC_BORLANDC && (__BORLANDC__ >= 0x0600))
# define LZO_UNUSED_FUNC(func) ((void) func)
# elif (LZO_CC_BORLANDC || LZO_CC_NDPC || LZO_CC_TURBOC)
# define LZO_UNUSED_FUNC(func) if (func) ; else
# elif (LZO_CC_CLANG || LZO_CC_LLVM)
# define LZO_UNUSED_FUNC(func) ((void) &func)
# elif (LZO_CC_MSC && (_MSC_VER < 900))
# define LZO_UNUSED_FUNC(func) if (func) ; else
# elif (LZO_CC_MSC)
# define LZO_UNUSED_FUNC(func) ((void) &func)
# elif (LZO_CC_KEILC || LZO_CC_PELLESC)
# define LZO_UNUSED_FUNC(func) {LZO_EXTERN_C int lzo_unused_func__[1-2*!(sizeof((int)func)>0)];}
# else
# define LZO_UNUSED_FUNC(func) ((void) func)
# endif
#endif
#if !defined(LZO_UNUSED_LABEL)
# if (LZO_CC_CLANG >= 0x020800ul)
# define LZO_UNUSED_LABEL(l) (__lzo_gnuc_extension__ ((void) ((const void *) &&l)))
# elif (LZO_CC_ARMCC || LZO_CC_CLANG || LZO_CC_INTELC || LZO_CC_WATCOMC)
# define LZO_UNUSED_LABEL(l) if __lzo_cte(0) goto l
# else
# define LZO_UNUSED_LABEL(l) switch (0) case 1:goto l
# endif
#endif
#if !defined(LZO_DEFINE_UNINITIALIZED_VAR)
# if 0
# define LZO_DEFINE_UNINITIALIZED_VAR(type,var,init) type var
# elif 0 && (LZO_CC_GNUC)
# define LZO_DEFINE_UNINITIALIZED_VAR(type,var,init) type var = var
# else
# define LZO_DEFINE_UNINITIALIZED_VAR(type,var,init) type var = init
# endif
#endif
#if !defined(__lzo_inline)
#if (LZO_CC_TURBOC && (__TURBOC__ <= 0x0295))
#elif defined(__cplusplus)
# define __lzo_inline inline
#elif defined(__STDC_VERSION__) && (__STDC_VERSION__-0 >= 199901L)
# define __lzo_inline inline
#elif (LZO_CC_BORLANDC && (__BORLANDC__ >= 0x0550))
# define __lzo_inline __inline
#elif (LZO_CC_ARMCC_GNUC || LZO_CC_CILLY || LZO_CC_CLANG || LZO_CC_GNUC || LZO_CC_LLVM || LZO_CC_PATHSCALE || LZO_CC_PGI)
# define __lzo_inline __inline__
#elif (LZO_CC_DMC)
# define __lzo_inline __inline
#elif (LZO_CC_GHS)
# define __lzo_inline __inline__
#elif (LZO_CC_IBMC >= 600)
# define __lzo_inline __inline__
#elif (LZO_CC_INTELC)
# define __lzo_inline __inline
#elif (LZO_CC_MWERKS && (__MWERKS__ >= 0x2405))
# define __lzo_inline __inline
#elif (LZO_CC_MSC && (_MSC_VER >= 900))
# define __lzo_inline __inline
#elif (LZO_CC_SUNPROC >= 0x5100)
# define __lzo_inline __inline__
#endif
#endif
#if defined(__lzo_inline)
# ifndef __lzo_HAVE_inline
# define __lzo_HAVE_inline 1
# endif
#else
# define __lzo_inline /*empty*/
#endif
#if !defined(__lzo_forceinline)
#if (LZO_CC_GNUC >= 0x030200ul)
# define __lzo_forceinline __inline__ __attribute__((__always_inline__))
#elif (LZO_CC_IBMC >= 700)
# define __lzo_forceinline __inline__ __attribute__((__always_inline__))
#elif (LZO_CC_INTELC_MSC && (__INTEL_COMPILER >= 450))
# define __lzo_forceinline __forceinline
#elif (LZO_CC_INTELC_GNUC && (__INTEL_COMPILER >= 800))
# define __lzo_forceinline __inline__ __attribute__((__always_inline__))
#elif (LZO_CC_ARMCC_GNUC || LZO_CC_CLANG || LZO_CC_LLVM || LZO_CC_PATHSCALE)
# define __lzo_forceinline __inline__ __attribute__((__always_inline__))
#elif (LZO_CC_MSC && (_MSC_VER >= 1200))
# define __lzo_forceinline __forceinline
#elif (LZO_CC_PGI >= 0x0d0a00ul)
# define __lzo_forceinline __inline__ __attribute__((__always_inline__))
#elif (LZO_CC_SUNPROC >= 0x5100)
# define __lzo_forceinline __inline__ __attribute__((__always_inline__))
#endif
#endif
#if defined(__lzo_forceinline)
# ifndef __lzo_HAVE_forceinline
# define __lzo_HAVE_forceinline 1
# endif
#else
# define __lzo_forceinline __lzo_inline
#endif
#if !defined(__lzo_noinline)
#if 1 && (LZO_ARCH_I386) && (LZO_CC_GNUC >= 0x040000ul) && (LZO_CC_GNUC < 0x040003ul)
# define __lzo_noinline __attribute__((__noinline__,__used__))
#elif (LZO_CC_GNUC >= 0x030200ul)
# define __lzo_noinline __attribute__((__noinline__))
#elif (LZO_CC_IBMC >= 700)
# define __lzo_noinline __attribute__((__noinline__))
#elif (LZO_CC_INTELC_MSC && (__INTEL_COMPILER >= 600))
# define __lzo_noinline __declspec(noinline)
#elif (LZO_CC_INTELC_GNUC && (__INTEL_COMPILER >= 800))
# define __lzo_noinline __attribute__((__noinline__))
#elif (LZO_CC_ARMCC_GNUC || LZO_CC_CLANG || LZO_CC_LLVM || LZO_CC_PATHSCALE)
# define __lzo_noinline __attribute__((__noinline__))
#elif (LZO_CC_MSC && (_MSC_VER >= 1300))
# define __lzo_noinline __declspec(noinline)
#elif (LZO_CC_MWERKS && (__MWERKS__ >= 0x3200) && (LZO_OS_WIN32 || LZO_OS_WIN64))
# if defined(__cplusplus)
# else
# define __lzo_noinline __declspec(noinline)
# endif
#elif (LZO_CC_PGI >= 0x0d0a00ul)
# define __lzo_noinline __attribute__((__noinline__))
#elif (LZO_CC_SUNPROC >= 0x5100)
# define __lzo_noinline __attribute__((__noinline__))
#endif
#endif
#if defined(__lzo_noinline)
# ifndef __lzo_HAVE_noinline
# define __lzo_HAVE_noinline 1
# endif
#else
# define __lzo_noinline /*empty*/
#endif
#if (__lzo_HAVE_forceinline || __lzo_HAVE_noinline) && !(__lzo_HAVE_inline)
# error "unexpected configuration - check your compiler defines"
#endif
#if !defined(__lzo_static_inline)
#if (LZO_CC_IBMC)
# define __lzo_static_inline __lzo_gnuc_extension__ static __lzo_inline
#endif
#endif
#if !defined(__lzo_static_inline)
# define __lzo_static_inline static __lzo_inline
#endif
#if !defined(__lzo_static_forceinline)
#if (LZO_CC_IBMC)
# define __lzo_static_forceinline __lzo_gnuc_extension__ static __lzo_forceinline
#endif
#endif
#if !defined(__lzo_static_forceinline)
# define __lzo_static_forceinline static __lzo_forceinline
#endif
#if !defined(__lzo_static_noinline)
#if (LZO_CC_IBMC)
# define __lzo_static_noinline __lzo_gnuc_extension__ static __lzo_noinline
#endif
#endif
#if !defined(__lzo_static_noinline)
# define __lzo_static_noinline static __lzo_noinline
#endif
#if !defined(__lzo_c99_extern_inline)
#if defined(__GNUC_GNU_INLINE__)
# define __lzo_c99_extern_inline __lzo_inline
#elif defined(__GNUC_STDC_INLINE__)
# define __lzo_c99_extern_inline extern __lzo_inline
#elif defined(__STDC_VERSION__) && (__STDC_VERSION__-0 >= 199901L)
# define __lzo_c99_extern_inline extern __lzo_inline
#endif
#if !defined(__lzo_c99_extern_inline) && (__lzo_HAVE_inline)
# define __lzo_c99_extern_inline __lzo_inline
#endif
#endif
#if defined(__lzo_c99_extern_inline)
# ifndef __lzo_HAVE_c99_extern_inline
# define __lzo_HAVE_c99_extern_inline 1
# endif
#else
# define __lzo_c99_extern_inline /*empty*/
#endif
#if !defined(__lzo_may_alias)
#if (LZO_CC_GNUC >= 0x030400ul)
# define __lzo_may_alias __attribute__((__may_alias__))
#elif (LZO_CC_CLANG >= 0x020900ul)
# define __lzo_may_alias __attribute__((__may_alias__))
#elif (LZO_CC_INTELC_GNUC && (__INTEL_COMPILER >= 1210)) && 0
# define __lzo_may_alias __attribute__((__may_alias__))
#elif (LZO_CC_PGI >= 0x0d0a00ul) && 0
# define __lzo_may_alias __attribute__((__may_alias__))
#endif
#endif
#if defined(__lzo_may_alias)
# ifndef __lzo_HAVE_may_alias
# define __lzo_HAVE_may_alias 1
# endif
#else
# define __lzo_may_alias /*empty*/
#endif
#if !defined(__lzo_noreturn)
#if (LZO_CC_GNUC >= 0x020700ul)
# define __lzo_noreturn __attribute__((__noreturn__))
#elif (LZO_CC_IBMC >= 700)
# define __lzo_noreturn __attribute__((__noreturn__))
#elif (LZO_CC_INTELC_MSC && (__INTEL_COMPILER >= 450))
# define __lzo_noreturn __declspec(noreturn)
#elif (LZO_CC_INTELC_GNUC && (__INTEL_COMPILER >= 600))
# define __lzo_noreturn __attribute__((__noreturn__))
#elif (LZO_CC_ARMCC_GNUC || LZO_CC_CLANG || LZO_CC_LLVM || LZO_CC_PATHSCALE)
# define __lzo_noreturn __attribute__((__noreturn__))
#elif (LZO_CC_MSC && (_MSC_VER >= 1200))
# define __lzo_noreturn __declspec(noreturn)
#elif (LZO_CC_PGI >= 0x0d0a00ul)
# define __lzo_noreturn __attribute__((__noreturn__))
#endif
#endif
#if defined(__lzo_noreturn)
# ifndef __lzo_HAVE_noreturn
# define __lzo_HAVE_noreturn 1
# endif
#else
# define __lzo_noreturn /*empty*/
#endif
#if !defined(__lzo_nothrow)
#if (LZO_CC_GNUC >= 0x030300ul)
# define __lzo_nothrow __attribute__((__nothrow__))
#elif (LZO_CC_INTELC_MSC && (__INTEL_COMPILER >= 450)) && defined(__cplusplus)
# define __lzo_nothrow __declspec(nothrow)
#elif (LZO_CC_INTELC_GNUC && (__INTEL_COMPILER >= 900))
# define __lzo_nothrow __attribute__((__nothrow__))
#elif (LZO_CC_ARMCC_GNUC || LZO_CC_CLANG || LZO_CC_LLVM || LZO_CC_PATHSCALE)
# define __lzo_nothrow __attribute__((__nothrow__))
#elif (LZO_CC_MSC && (_MSC_VER >= 1200)) && defined(__cplusplus)
# define __lzo_nothrow __declspec(nothrow)
#endif
#endif
#if defined(__lzo_nothrow)
# ifndef __lzo_HAVE_nothrow
# define __lzo_HAVE_nothrow 1
# endif
#else
# define __lzo_nothrow /*empty*/
#endif
#if !defined(__lzo_restrict)
#if (LZO_CC_GNUC >= 0x030400ul)
# define __lzo_restrict __restrict__
#elif (LZO_CC_IBMC >= 800) && !defined(__cplusplus)
# define __lzo_restrict __restrict__
#elif (LZO_CC_IBMC >= 1210)
# define __lzo_restrict __restrict__
#elif (LZO_CC_INTELC_MSC && (__INTEL_COMPILER >= 600))
#elif (LZO_CC_INTELC_GNUC && (__INTEL_COMPILER >= 600))
# define __lzo_restrict __restrict__
#elif (LZO_CC_ARMCC_GNUC || LZO_CC_CLANG || LZO_CC_LLVM)
# define __lzo_restrict __restrict__
#elif (LZO_CC_MSC && (_MSC_VER >= 1400))
# define __lzo_restrict __restrict
#elif (LZO_CC_PGI >= 0x0d0a00ul)
# define __lzo_restrict __restrict__
#endif
#endif
#if defined(__lzo_restrict)
# ifndef __lzo_HAVE_restrict
# define __lzo_HAVE_restrict 1
# endif
#else
# define __lzo_restrict /*empty*/
#endif
#if !defined(__lzo_alignof)
#if (LZO_CC_ARMCC || LZO_CC_CILLY || LZO_CC_CLANG || LZO_CC_GNUC || LZO_CC_LLVM || LZO_CC_PATHSCALE || LZO_CC_PGI)
# define __lzo_alignof(e) __alignof__(e)
#elif (LZO_CC_GHS) && !defined(__cplusplus)
# define __lzo_alignof(e) __alignof__(e)
#elif (LZO_CC_IBMC >= 600)
# define __lzo_alignof(e) (__lzo_gnuc_extension__ __alignof__(e))
#elif (LZO_CC_INTELC && (__INTEL_COMPILER >= 700))
# define __lzo_alignof(e) __alignof__(e)
#elif (LZO_CC_MSC && (_MSC_VER >= 1300))
# define __lzo_alignof(e) __alignof(e)
#elif (LZO_CC_SUNPROC >= 0x5100)
# define __lzo_alignof(e) __alignof__(e)
#endif
#endif
#if defined(__lzo_alignof)
# ifndef __lzo_HAVE_alignof
# define __lzo_HAVE_alignof 1
# endif
#endif
#if !defined(__lzo_struct_packed)
#if (LZO_CC_CLANG && (LZO_CC_CLANG < 0x020800ul)) && defined(__cplusplus)
#elif (LZO_CC_GNUC && (LZO_CC_GNUC < 0x020700ul))
#elif (LZO_CC_GNUC && (LZO_CC_GNUC < 0x020800ul)) && defined(__cplusplus)
#elif (LZO_CC_PCC && (LZO_CC_PCC < 0x010100ul))
#elif (LZO_CC_SUNPROC && (LZO_CC_SUNPROC < 0x5110)) && !defined(__cplusplus)
#elif (LZO_CC_GNUC >= 0x030400ul) && !(LZO_CC_PCC_GNUC) && (LZO_ARCH_AMD64 || LZO_ARCH_I386)
# define __lzo_struct_packed(s) struct s {
# define __lzo_struct_packed_end() } __attribute__((__gcc_struct__,__packed__));
# define __lzo_struct_packed_ma_end() } __lzo_may_alias __attribute__((__gcc_struct__,__packed__));
#elif (LZO_CC_ARMCC || LZO_CC_CLANG || LZO_CC_GNUC || LZO_CC_INTELC_GNUC || LZO_CC_LLVM || LZO_CC_PATHSCALE || (LZO_CC_PGI >= 0x0d0a00ul) || (LZO_CC_SUNPROC >= 0x5100))
# define __lzo_struct_packed(s) struct s {
# define __lzo_struct_packed_end() } __attribute__((__packed__));
# define __lzo_struct_packed_ma_end() } __lzo_may_alias __attribute__((__packed__));
#elif (LZO_CC_IBMC >= 700)
# define __lzo_struct_packed(s) __lzo_gnuc_extension__ struct s {
# define __lzo_struct_packed_end() } __attribute__((__packed__));
# define __lzo_struct_packed_ma_end() } __lzo_may_alias __attribute__((__packed__));
#elif (LZO_CC_INTELC_MSC) || (LZO_CC_MSC && (_MSC_VER >= 1300))
# define __lzo_struct_packed(s) __pragma(pack(push,1)) struct s {
# define __lzo_struct_packed_end() } __pragma(pack(pop));
#elif (LZO_CC_WATCOMC && (__WATCOMC__ >= 900))
# define __lzo_struct_packed(s) _Packed struct s {
# define __lzo_struct_packed_end() };
#endif
#endif
#if defined(__lzo_struct_packed) && !defined(__lzo_struct_packed_ma)
# define __lzo_struct_packed_ma(s) __lzo_struct_packed(s)
#endif
#if defined(__lzo_struct_packed_end) && !defined(__lzo_struct_packed_ma_end)
# define __lzo_struct_packed_ma_end() __lzo_struct_packed_end()
#endif
#if !defined(__lzo_byte_struct)
#if defined(__lzo_struct_packed)
# define __lzo_byte_struct(s,n) __lzo_struct_packed(s) unsigned char a[n]; __lzo_struct_packed_end()
# define __lzo_byte_struct_ma(s,n) __lzo_struct_packed_ma(s) unsigned char a[n]; __lzo_struct_packed_ma_end()
#elif (LZO_CC_CILLY || LZO_CC_CLANG || LZO_CC_PGI || (LZO_CC_SUNPROC >= 0x5100))
# define __lzo_byte_struct(s,n) struct s { unsigned char a[n]; } __attribute__((__packed__));
# define __lzo_byte_struct_ma(s,n) struct s { unsigned char a[n]; } __lzo_may_alias __attribute__((__packed__));
#endif
#endif
#if defined(__lzo_byte_struct) && !defined(__lzo_byte_struct_ma)
# define __lzo_byte_struct_ma(s,n) __lzo_byte_struct(s,n)
#endif
#if !defined(__lzo_struct_align16) && (__lzo_HAVE_alignof)
#if (LZO_CC_GNUC && (LZO_CC_GNUC < 0x030000ul))
#elif (LZO_CC_CLANG && (LZO_CC_CLANG < 0x020800ul)) && defined(__cplusplus)
#elif (LZO_CC_CILLY || LZO_CC_PCC)
#elif (LZO_CC_INTELC_MSC) || (LZO_CC_MSC && (_MSC_VER >= 1300))
# define __lzo_struct_align16(s) struct __declspec(align(16)) s {
# define __lzo_struct_align16_end() };
# define __lzo_struct_align32(s) struct __declspec(align(32)) s {
# define __lzo_struct_align32_end() };
# define __lzo_struct_align64(s) struct __declspec(align(64)) s {
# define __lzo_struct_align64_end() };
#elif (LZO_CC_ARMCC || LZO_CC_CLANG || LZO_CC_GNUC || (LZO_CC_IBMC >= 700) || LZO_CC_INTELC_GNUC || LZO_CC_LLVM || LZO_CC_PATHSCALE)
# define __lzo_struct_align16(s) struct s {
# define __lzo_struct_align16_end() } __attribute__((__aligned__(16)));
# define __lzo_struct_align32(s) struct s {
# define __lzo_struct_align32_end() } __attribute__((__aligned__(32)));
# define __lzo_struct_align64(s) struct s {
# define __lzo_struct_align64_end() } __attribute__((__aligned__(64)));
#endif
#endif
#if !defined(__lzo_union_um)
#if (LZO_CC_CLANG && (LZO_CC_CLANG < 0x020800ul)) && defined(__cplusplus)
#elif (LZO_CC_GNUC && (LZO_CC_GNUC < 0x020700ul))
#elif (LZO_CC_GNUC && (LZO_CC_GNUC < 0x020800ul)) && defined(__cplusplus)
#elif (LZO_CC_INTELC_GNUC && (__INTEL_COMPILER < 810))
#elif (LZO_CC_PCC && (LZO_CC_PCC < 0x010100ul))
#elif (LZO_CC_SUNPROC && (LZO_CC_SUNPROC < 0x5110)) && !defined(__cplusplus)
#elif (LZO_CC_ARMCC || LZO_CC_CLANG || LZO_CC_GNUC || LZO_CC_INTELC_GNUC || LZO_CC_LLVM || LZO_CC_PATHSCALE || (LZO_CC_PGI >= 0x0d0a00ul) || (LZO_CC_SUNPROC >= 0x5100))
# define __lzo_union_am(s) union s {
# define __lzo_union_am_end() } __lzo_may_alias;
# define __lzo_union_um(s) union s {
# define __lzo_union_um_end() } __lzo_may_alias __attribute__((__packed__));
#elif (LZO_CC_IBMC >= 700)
# define __lzo_union_am(s) __lzo_gnuc_extension__ union s {
# define __lzo_union_am_end() } __lzo_may_alias;
# define __lzo_union_um(s) __lzo_gnuc_extension__ union s {
# define __lzo_union_um_end() } __lzo_may_alias __attribute__((__packed__));
#elif (LZO_CC_INTELC_MSC) || (LZO_CC_MSC && (_MSC_VER >= 1300))
# define __lzo_union_um(s) __pragma(pack(push,1)) union s {
# define __lzo_union_um_end() } __pragma(pack(pop));
#elif (LZO_CC_WATCOMC && (__WATCOMC__ >= 900))
# define __lzo_union_um(s) _Packed union s {
# define __lzo_union_um_end() };
#endif
#endif
#if !defined(__lzo_union_am)
# define __lzo_union_am(s) union s {
# define __lzo_union_am_end() };
#endif
#if !defined(__lzo_constructor)
#if (LZO_CC_GNUC >= 0x030400ul)
# define __lzo_constructor __attribute__((__constructor__,__used__))
#elif (LZO_CC_GNUC >= 0x020700ul)
# define __lzo_constructor __attribute__((__constructor__))
#elif (LZO_CC_INTELC_GNUC && (__INTEL_COMPILER >= 800))
# define __lzo_constructor __attribute__((__constructor__,__used__))
#elif (LZO_CC_ARMCC_GNUC || LZO_CC_CLANG || LZO_CC_LLVM || LZO_CC_PATHSCALE)
# define __lzo_constructor __attribute__((__constructor__))
#endif
#endif
#if defined(__lzo_constructor)
# ifndef __lzo_HAVE_constructor
# define __lzo_HAVE_constructor 1
# endif
#endif
#if !defined(__lzo_destructor)
#if (LZO_CC_GNUC >= 0x030400ul)
# define __lzo_destructor __attribute__((__destructor__,__used__))
#elif (LZO_CC_GNUC >= 0x020700ul)
# define __lzo_destructor __attribute__((__destructor__))
#elif (LZO_CC_INTELC_GNUC && (__INTEL_COMPILER >= 800))
# define __lzo_destructor __attribute__((__destructor__,__used__))
#elif (LZO_CC_ARMCC_GNUC || LZO_CC_CLANG || LZO_CC_LLVM || LZO_CC_PATHSCALE)
# define __lzo_destructor __attribute__((__destructor__))
#endif
#endif
#if defined(__lzo_destructor)
# ifndef __lzo_HAVE_destructor
# define __lzo_HAVE_destructor 1
# endif
#endif
#if (__lzo_HAVE_destructor) && !(__lzo_HAVE_constructor)
# error "unexpected configuration - check your compiler defines"
#endif
#if !defined(__lzo_likely) && !defined(__lzo_unlikely)
#if (LZO_CC_GNUC >= 0x030200ul)
# define __lzo_likely(e) (__builtin_expect(!!(e),1))
# define __lzo_unlikely(e) (__builtin_expect(!!(e),0))
#elif (LZO_CC_IBMC >= 1010)
# define __lzo_likely(e) (__builtin_expect(!!(e),1))
# define __lzo_unlikely(e) (__builtin_expect(!!(e),0))
#elif (LZO_CC_INTELC && (__INTEL_COMPILER >= 800))
# define __lzo_likely(e) (__builtin_expect(!!(e),1))
# define __lzo_unlikely(e) (__builtin_expect(!!(e),0))
#elif (LZO_CC_ARMCC_GNUC || LZO_CC_CLANG || LZO_CC_LLVM || LZO_CC_PATHSCALE)
# define __lzo_likely(e) (__builtin_expect(!!(e),1))
# define __lzo_unlikely(e) (__builtin_expect(!!(e),0))
#endif
#endif
#if defined(__lzo_likely)
# ifndef __lzo_HAVE_likely
# define __lzo_HAVE_likely 1
# endif
#else
# define __lzo_likely(e) (e)
#endif
#if defined(__lzo_unlikely)
# ifndef __lzo_HAVE_unlikely
# define __lzo_HAVE_unlikely 1
# endif
#else
# define __lzo_unlikely(e) (e)
#endif
#if !defined(__lzo_static_unused_void_func)
# if 1 && (LZO_CC_ARMCC_GNUC || LZO_CC_CLANG || (LZO_CC_GNUC >= 0x020700ul) || LZO_CC_INTELC_GNUC || LZO_CC_LLVM || LZO_CC_PATHSCALE || LZO_CC_PGI)
# define __lzo_static_unused_void_func(f) static void __attribute__((__unused__)) f(void)
# else
# define __lzo_static_unused_void_func(f) static __lzo_inline void f(void)
# endif
#endif
#if !defined(__lzo_loop_forever)
# if (LZO_CC_IBMC)
# define __lzo_loop_forever() LZO_BLOCK_BEGIN for (;;) { ; } LZO_BLOCK_END
# else
# define __lzo_loop_forever() do { ; } while __lzo_cte(1)
# endif
#endif
#if !defined(__lzo_unreachable)
#if (LZO_CC_CLANG && (LZO_CC_CLANG >= 0x020800ul))
# define __lzo_unreachable() __builtin_unreachable();
#elif (LZO_CC_GNUC >= 0x040500ul)
# define __lzo_unreachable() __builtin_unreachable();
#elif (LZO_CC_INTELC_GNUC && (__INTEL_COMPILER >= 1300)) && 1
# define __lzo_unreachable() __builtin_unreachable();
#endif
#endif
#if defined(__lzo_unreachable)
# ifndef __lzo_HAVE_unreachable
# define __lzo_HAVE_unreachable 1
# endif
#else
# if 0
# define __lzo_unreachable() ((void)0);
# else
# define __lzo_unreachable() __lzo_loop_forever();
# endif
#endif
#ifndef __LZO_CTA_NAME
#if (LZO_CFG_USE_COUNTER)
# define __LZO_CTA_NAME(a) LZO_PP_ECONCAT2(a,__COUNTER__)
#else
# define __LZO_CTA_NAME(a) LZO_PP_ECONCAT2(a,__LINE__)
#endif
#endif
#if !defined(LZO_COMPILE_TIME_ASSERT_HEADER)
# if (LZO_CC_AZTECC || LZO_CC_ZORTECHC)
# define LZO_COMPILE_TIME_ASSERT_HEADER(e) LZO_EXTERN_C_BEGIN extern int __LZO_CTA_NAME(lzo_cta__)[1-!(e)]; LZO_EXTERN_C_END
# elif (LZO_CC_DMC || LZO_CC_SYMANTECC)
# define LZO_COMPILE_TIME_ASSERT_HEADER(e) LZO_EXTERN_C_BEGIN extern int __LZO_CTA_NAME(lzo_cta__)[1u-2*!(e)]; LZO_EXTERN_C_END
# elif (LZO_CC_TURBOC && (__TURBOC__ == 0x0295))
# define LZO_COMPILE_TIME_ASSERT_HEADER(e) LZO_EXTERN_C_BEGIN extern int __LZO_CTA_NAME(lzo_cta__)[1-!(e)]; LZO_EXTERN_C_END
# elif (LZO_CC_CLANG && (LZO_CC_CLANG < 0x020900ul)) && defined(__cplusplus)
# define LZO_COMPILE_TIME_ASSERT_HEADER(e) LZO_EXTERN_C_BEGIN int __LZO_CTA_NAME(lzo_cta_f__)(int [1-2*!(e)]); LZO_EXTERN_C_END
# elif (LZO_CC_GNUC) && defined(__CHECKER__) && defined(__SPARSE_CHECKER__)
# define LZO_COMPILE_TIME_ASSERT_HEADER(e) LZO_EXTERN_C_BEGIN enum {__LZO_CTA_NAME(lzo_cta_e__)=1/!!(e)} __attribute__((__unused__)); LZO_EXTERN_C_END
# else
# define LZO_COMPILE_TIME_ASSERT_HEADER(e) LZO_EXTERN_C_BEGIN extern int __LZO_CTA_NAME(lzo_cta__)[1-2*!(e)]; LZO_EXTERN_C_END
# endif
#endif
#if !defined(LZO_COMPILE_TIME_ASSERT)
# if (LZO_CC_AZTECC)
# define LZO_COMPILE_TIME_ASSERT(e) {typedef int __LZO_CTA_NAME(lzo_cta_t__)[1-!(e)];}
# elif (LZO_CC_DMC || LZO_CC_PACIFICC || LZO_CC_SYMANTECC || LZO_CC_ZORTECHC)
# define LZO_COMPILE_TIME_ASSERT(e) switch(0) case 1:case !(e):break;
# elif (LZO_CC_GNUC) && defined(__CHECKER__) && defined(__SPARSE_CHECKER__)
# define LZO_COMPILE_TIME_ASSERT(e) {(void) (0/!!(e));}
# elif (LZO_CC_GNUC >= 0x040700ul) && (LZO_CFG_USE_COUNTER) && defined(__cplusplus)
# define LZO_COMPILE_TIME_ASSERT(e) {enum {__LZO_CTA_NAME(lzo_cta_e__)=1/!!(e)} __attribute__((__unused__));}
# elif (LZO_CC_GNUC >= 0x040700ul)
# define LZO_COMPILE_TIME_ASSERT(e) {typedef int __LZO_CTA_NAME(lzo_cta_t__)[1-2*!(e)] __attribute__((__unused__));}
# elif (LZO_CC_MSC && (_MSC_VER < 900))
# define LZO_COMPILE_TIME_ASSERT(e) switch(0) case 1:case !(e):break;
# elif (LZO_CC_TURBOC && (__TURBOC__ == 0x0295))
# define LZO_COMPILE_TIME_ASSERT(e) switch(0) case 1:case !(e):break;
# else
# define LZO_COMPILE_TIME_ASSERT(e) {typedef int __LZO_CTA_NAME(lzo_cta_t__)[1-2*!(e)];}
# endif
#endif
LZO_COMPILE_TIME_ASSERT_HEADER(1 == 1)
#if defined(__cplusplus)
extern "C" { LZO_COMPILE_TIME_ASSERT_HEADER(2 == 2) }
#endif
LZO_COMPILE_TIME_ASSERT_HEADER(3 == 3)
#if (LZO_ARCH_I086 || LZO_ARCH_I386) && (LZO_OS_DOS16 || LZO_OS_DOS32 || LZO_OS_OS2 || LZO_OS_OS216 || LZO_OS_WIN16 || LZO_OS_WIN32 || LZO_OS_WIN64)
# if (LZO_CC_GNUC || LZO_CC_HIGHC || LZO_CC_NDPC || LZO_CC_PACIFICC)
# elif (LZO_CC_DMC || LZO_CC_SYMANTECC || LZO_CC_ZORTECHC)
# define __lzo_cdecl __cdecl
# define __lzo_cdecl_atexit /*empty*/
# define __lzo_cdecl_main __cdecl
# if (LZO_OS_OS2 && (LZO_CC_DMC || LZO_CC_SYMANTECC))
# define __lzo_cdecl_qsort __pascal
# elif (LZO_OS_OS2 && (LZO_CC_ZORTECHC))
# define __lzo_cdecl_qsort _stdcall
# else
# define __lzo_cdecl_qsort __cdecl
# endif
# elif (LZO_CC_WATCOMC)
# define __lzo_cdecl __cdecl
# else
# define __lzo_cdecl __cdecl
# define __lzo_cdecl_atexit __cdecl
# define __lzo_cdecl_main __cdecl
# define __lzo_cdecl_qsort __cdecl
# endif
# if (LZO_CC_GNUC || LZO_CC_HIGHC || LZO_CC_NDPC || LZO_CC_PACIFICC || LZO_CC_WATCOMC)
# elif (LZO_OS_OS2 && (LZO_CC_DMC || LZO_CC_SYMANTECC))
# define __lzo_cdecl_sighandler __pascal
# elif (LZO_OS_OS2 && (LZO_CC_ZORTECHC))
# define __lzo_cdecl_sighandler _stdcall
# elif (LZO_CC_MSC && (_MSC_VER >= 1400)) && defined(_M_CEE_PURE)
# define __lzo_cdecl_sighandler __clrcall
# elif (LZO_CC_MSC && (_MSC_VER >= 600 && _MSC_VER < 700))
# if defined(_DLL)
# define __lzo_cdecl_sighandler _far _cdecl _loadds
# elif defined(_MT)
# define __lzo_cdecl_sighandler _far _cdecl
# else
# define __lzo_cdecl_sighandler _cdecl
# endif
# else
# define __lzo_cdecl_sighandler __cdecl
# endif
#elif (LZO_ARCH_I386) && (LZO_CC_WATCOMC)
# define __lzo_cdecl __cdecl
#elif (LZO_ARCH_M68K && LZO_OS_TOS && (LZO_CC_PUREC || LZO_CC_TURBOC))
# define __lzo_cdecl cdecl
#endif
#if !defined(__lzo_cdecl)
# define __lzo_cdecl /*empty*/
#endif
#if !defined(__lzo_cdecl_atexit)
# define __lzo_cdecl_atexit /*empty*/
#endif
#if !defined(__lzo_cdecl_main)
# define __lzo_cdecl_main /*empty*/
#endif
#if !defined(__lzo_cdecl_qsort)
# define __lzo_cdecl_qsort /*empty*/
#endif
#if !defined(__lzo_cdecl_sighandler)
# define __lzo_cdecl_sighandler /*empty*/
#endif
#if !defined(__lzo_cdecl_va)
# define __lzo_cdecl_va __lzo_cdecl
#endif
#if !(LZO_CFG_NO_WINDOWS_H)
#if !defined(LZO_HAVE_WINDOWS_H)
#if (LZO_OS_CYGWIN || (LZO_OS_EMX && defined(__RSXNT__)) || LZO_OS_WIN32 || LZO_OS_WIN64)
# if (LZO_CC_WATCOMC && (__WATCOMC__ < 1000))
# elif (LZO_OS_WIN32 && LZO_CC_GNUC) && defined(__PW32__)
# elif ((LZO_OS_CYGWIN || defined(__MINGW32__)) && (LZO_CC_GNUC && (LZO_CC_GNUC < 0x025f00ul)))
# else
# define LZO_HAVE_WINDOWS_H 1
# endif
#endif
#endif
#endif
#ifndef LZO_SIZEOF_SHORT
#if defined(SIZEOF_SHORT)
# define LZO_SIZEOF_SHORT (SIZEOF_SHORT)
#elif defined(__SIZEOF_SHORT__)
# define LZO_SIZEOF_SHORT (__SIZEOF_SHORT__)
#endif
#endif
#ifndef LZO_SIZEOF_INT
#if defined(SIZEOF_INT)
# define LZO_SIZEOF_INT (SIZEOF_INT)
#elif defined(__SIZEOF_INT__)
# define LZO_SIZEOF_INT (__SIZEOF_INT__)
#endif
#endif
#ifndef LZO_SIZEOF_LONG
#if defined(SIZEOF_LONG)
# define LZO_SIZEOF_LONG (SIZEOF_LONG)
#elif defined(__SIZEOF_LONG__)
# define LZO_SIZEOF_LONG (__SIZEOF_LONG__)
#endif
#endif
#ifndef LZO_SIZEOF_LONG_LONG
#if defined(SIZEOF_LONG_LONG)
# define LZO_SIZEOF_LONG_LONG (SIZEOF_LONG_LONG)
#elif defined(__SIZEOF_LONG_LONG__)
# define LZO_SIZEOF_LONG_LONG (__SIZEOF_LONG_LONG__)
#endif
#endif
#ifndef LZO_SIZEOF___INT16
#if defined(SIZEOF___INT16)
# define LZO_SIZEOF___INT16 (SIZEOF___INT16)
#endif
#endif
#ifndef LZO_SIZEOF___INT32
#if defined(SIZEOF___INT32)
# define LZO_SIZEOF___INT32 (SIZEOF___INT32)
#endif
#endif
#ifndef LZO_SIZEOF___INT64
#if defined(SIZEOF___INT64)
# define LZO_SIZEOF___INT64 (SIZEOF___INT64)
#endif
#endif
#ifndef LZO_SIZEOF_VOID_P
#if defined(SIZEOF_VOID_P)
# define LZO_SIZEOF_VOID_P (SIZEOF_VOID_P)
#elif defined(__SIZEOF_POINTER__)
# define LZO_SIZEOF_VOID_P (__SIZEOF_POINTER__)
#endif
#endif
#ifndef LZO_SIZEOF_SIZE_T
#if defined(SIZEOF_SIZE_T)
# define LZO_SIZEOF_SIZE_T (SIZEOF_SIZE_T)
#elif defined(__SIZEOF_SIZE_T__)
# define LZO_SIZEOF_SIZE_T (__SIZEOF_SIZE_T__)
#endif
#endif
#ifndef LZO_SIZEOF_PTRDIFF_T
#if defined(SIZEOF_PTRDIFF_T)
# define LZO_SIZEOF_PTRDIFF_T (SIZEOF_PTRDIFF_T)
#elif defined(__SIZEOF_PTRDIFF_T__)
# define LZO_SIZEOF_PTRDIFF_T (__SIZEOF_PTRDIFF_T__)
#endif
#endif
#define __LZO_LSR(x,b) (((x)+0ul) >> (b))
#if !defined(LZO_SIZEOF_SHORT)
# if (LZO_ARCH_CRAY_PVP)
# define LZO_SIZEOF_SHORT 8
# elif (USHRT_MAX == LZO_0xffffL)
# define LZO_SIZEOF_SHORT 2
# elif (__LZO_LSR(USHRT_MAX,7) == 1)
# define LZO_SIZEOF_SHORT 1
# elif (__LZO_LSR(USHRT_MAX,15) == 1)
# define LZO_SIZEOF_SHORT 2
# elif (__LZO_LSR(USHRT_MAX,31) == 1)
# define LZO_SIZEOF_SHORT 4
# elif (__LZO_LSR(USHRT_MAX,63) == 1)
# define LZO_SIZEOF_SHORT 8
# elif (__LZO_LSR(USHRT_MAX,127) == 1)
# define LZO_SIZEOF_SHORT 16
# else
# error "LZO_SIZEOF_SHORT"
# endif
#endif
LZO_COMPILE_TIME_ASSERT_HEADER(LZO_SIZEOF_SHORT == sizeof(short))
#if !defined(LZO_SIZEOF_INT)
# if (LZO_ARCH_CRAY_PVP)
# define LZO_SIZEOF_INT 8
# elif (UINT_MAX == LZO_0xffffL)
# define LZO_SIZEOF_INT 2
# elif (UINT_MAX == LZO_0xffffffffL)
# define LZO_SIZEOF_INT 4
# elif (__LZO_LSR(UINT_MAX,7) == 1)
# define LZO_SIZEOF_INT 1
# elif (__LZO_LSR(UINT_MAX,15) == 1)
# define LZO_SIZEOF_INT 2
# elif (__LZO_LSR(UINT_MAX,31) == 1)
# define LZO_SIZEOF_INT 4
# elif (__LZO_LSR(UINT_MAX,63) == 1)
# define LZO_SIZEOF_INT 8
# elif (__LZO_LSR(UINT_MAX,127) == 1)
# define LZO_SIZEOF_INT 16
# else
# error "LZO_SIZEOF_INT"
# endif
#endif
LZO_COMPILE_TIME_ASSERT_HEADER(LZO_SIZEOF_INT == sizeof(int))
#if !defined(LZO_SIZEOF_LONG)
# if (ULONG_MAX == LZO_0xffffffffL)
# define LZO_SIZEOF_LONG 4
# elif (__LZO_LSR(ULONG_MAX,7) == 1)
# define LZO_SIZEOF_LONG 1
# elif (__LZO_LSR(ULONG_MAX,15) == 1)
# define LZO_SIZEOF_LONG 2
# elif (__LZO_LSR(ULONG_MAX,31) == 1)
# define LZO_SIZEOF_LONG 4
# elif (__LZO_LSR(ULONG_MAX,39) == 1)
# define LZO_SIZEOF_LONG 5
# elif (__LZO_LSR(ULONG_MAX,63) == 1)
# define LZO_SIZEOF_LONG 8
# elif (__LZO_LSR(ULONG_MAX,127) == 1)
# define LZO_SIZEOF_LONG 16
# else
# error "LZO_SIZEOF_LONG"
# endif
#endif
LZO_COMPILE_TIME_ASSERT_HEADER(LZO_SIZEOF_LONG == sizeof(long))
#if !defined(LZO_SIZEOF_LONG_LONG) && !defined(LZO_SIZEOF___INT64)
#if (LZO_SIZEOF_LONG > 0 && LZO_SIZEOF_LONG < 8)
# if defined(__LONG_MAX__) && defined(__LONG_LONG_MAX__)
# if (LZO_CC_GNUC >= 0x030300ul)
# if ((__LONG_MAX__-0) == (__LONG_LONG_MAX__-0))
# define LZO_SIZEOF_LONG_LONG LZO_SIZEOF_LONG
# elif (__LZO_LSR(__LONG_LONG_MAX__,30) == 1)
# define LZO_SIZEOF_LONG_LONG 4
# endif
# endif
# endif
#endif
#endif
#if !defined(LZO_SIZEOF_LONG_LONG) && !defined(LZO_SIZEOF___INT64)
#if (LZO_SIZEOF_LONG > 0 && LZO_SIZEOF_LONG < 8)
#if (LZO_ARCH_I086 && LZO_CC_DMC)
#elif (LZO_CC_CILLY) && defined(__GNUC__)
# define LZO_SIZEOF_LONG_LONG 8
#elif (LZO_CC_ARMCC_GNUC || LZO_CC_CLANG || LZO_CC_GNUC || LZO_CC_LLVM || LZO_CC_PATHSCALE)
# define LZO_SIZEOF_LONG_LONG 8
#elif ((LZO_OS_WIN32 || LZO_OS_WIN64 || defined(_WIN32)) && LZO_CC_MSC && (_MSC_VER >= 1400))
# define LZO_SIZEOF_LONG_LONG 8
#elif (LZO_OS_WIN64 || defined(_WIN64))
# define LZO_SIZEOF___INT64 8
#elif (LZO_ARCH_I386 && (LZO_CC_DMC))
# define LZO_SIZEOF_LONG_LONG 8
#elif (LZO_ARCH_I386 && (LZO_CC_SYMANTECC && (__SC__ >= 0x700)))
# define LZO_SIZEOF_LONG_LONG 8
#elif (LZO_ARCH_I386 && (LZO_CC_INTELC && defined(__linux__)))
# define LZO_SIZEOF_LONG_LONG 8
#elif (LZO_ARCH_I386 && (LZO_CC_MWERKS || LZO_CC_PELLESC || LZO_CC_PGI || LZO_CC_SUNPROC))
# define LZO_SIZEOF_LONG_LONG 8
#elif (LZO_ARCH_I386 && (LZO_CC_INTELC || LZO_CC_MSC))
# define LZO_SIZEOF___INT64 8
#elif ((LZO_OS_WIN32 || defined(_WIN32)) && (LZO_CC_MSC))
# define LZO_SIZEOF___INT64 8
#elif (LZO_ARCH_I386 && (LZO_CC_BORLANDC && (__BORLANDC__ >= 0x0520)))
# define LZO_SIZEOF___INT64 8
#elif (LZO_ARCH_I386 && (LZO_CC_WATCOMC && (__WATCOMC__ >= 1100)))
# define LZO_SIZEOF___INT64 8
#elif (LZO_CC_GHS && defined(__LLONG_BIT) && ((__LLONG_BIT-0) == 64))
# define LZO_SIZEOF_LONG_LONG 8
#elif (LZO_CC_WATCOMC && defined(_INTEGRAL_MAX_BITS) && ((_INTEGRAL_MAX_BITS-0) == 64))
# define LZO_SIZEOF___INT64 8
#elif (LZO_OS_OS400 || defined(__OS400__)) && defined(__LLP64_IFC__)
# define LZO_SIZEOF_LONG_LONG 8
#elif (defined(__vms) || defined(__VMS)) && ((__INITIAL_POINTER_SIZE-0) == 64)
# define LZO_SIZEOF_LONG_LONG 8
#elif (LZO_CC_SDCC) && (LZO_SIZEOF_INT == 2)
#elif 1 && defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L)
# define LZO_SIZEOF_LONG_LONG 8
#endif
#endif
#endif
#if defined(__cplusplus) && (LZO_CC_GNUC)
# if (LZO_CC_GNUC < 0x020800ul)
# undef LZO_SIZEOF_LONG_LONG
# endif
#endif
#if (LZO_CFG_NO_LONG_LONG)
# undef LZO_SIZEOF_LONG_LONG
#elif defined(__NO_LONG_LONG)
# undef LZO_SIZEOF_LONG_LONG
#elif defined(_NO_LONGLONG)
# undef LZO_SIZEOF_LONG_LONG
#endif
#if !defined(LZO_WORDSIZE)
#if (LZO_ARCH_ALPHA)
# define LZO_WORDSIZE 8
#elif (LZO_ARCH_AMD64)
# define LZO_WORDSIZE 8
#elif (LZO_ARCH_AVR)
# define LZO_WORDSIZE 1
#elif (LZO_ARCH_H8300)
# if defined(__NORMAL_MODE__)
# define LZO_WORDSIZE 4
# elif defined(__H8300H__) || defined(__H8300S__) || defined(__H8300SX__)
# define LZO_WORDSIZE 4
# else
# define LZO_WORDSIZE 2
# endif
#elif (LZO_ARCH_I086)
# define LZO_WORDSIZE 2
#elif (LZO_ARCH_IA64)
# define LZO_WORDSIZE 8
#elif (LZO_ARCH_M16C)
# define LZO_WORDSIZE 2
#elif (LZO_ARCH_SPU)
# define LZO_WORDSIZE 4
#elif (LZO_ARCH_Z80)
# define LZO_WORDSIZE 1
#elif (LZO_SIZEOF_LONG == 8) && ((defined(__mips__) && defined(__R5900__)) || defined(__MIPS_PSX2__))
# define LZO_WORDSIZE 8
#elif (LZO_OS_OS400 || defined(__OS400__))
# define LZO_WORDSIZE 8
#elif (defined(__vms) || defined(__VMS)) && (__INITIAL_POINTER_SIZE+0 == 64)
# define LZO_WORDSIZE 8
#endif
#endif
#if !defined(LZO_SIZEOF_VOID_P)
#if defined(__ILP32__) || defined(__ILP32) || defined(_ILP32)
LZO_COMPILE_TIME_ASSERT_HEADER(sizeof(int) == 4)
LZO_COMPILE_TIME_ASSERT_HEADER(sizeof(long) == 4)
# define LZO_SIZEOF_VOID_P 4
#elif defined(__ILP64__) || defined(__ILP64) || defined(_ILP64)
LZO_COMPILE_TIME_ASSERT_HEADER(sizeof(int) == 8)
LZO_COMPILE_TIME_ASSERT_HEADER(sizeof(long) == 8)
# define LZO_SIZEOF_VOID_P 8
#elif defined(__LLP64__) || defined(__LLP64) || defined(_LLP64) || defined(_WIN64)
LZO_COMPILE_TIME_ASSERT_HEADER(sizeof(long) == 4)
# define LZO_SIZEOF_VOID_P 8
#elif defined(__LP64__) || defined(__LP64) || defined(_LP64)
LZO_COMPILE_TIME_ASSERT_HEADER(sizeof(long) == 8)
# define LZO_SIZEOF_VOID_P 8
#elif (LZO_ARCH_AVR)
# define LZO_SIZEOF_VOID_P 2
#elif (LZO_ARCH_C166 || LZO_ARCH_MCS51 || LZO_ARCH_MCS251 || LZO_ARCH_MSP430)
# define LZO_SIZEOF_VOID_P 2
#elif (LZO_ARCH_H8300)
# if defined(__NORMAL_MODE__)
# define LZO_SIZEOF_VOID_P 2
# elif defined(__H8300H__) || defined(__H8300S__) || defined(__H8300SX__)
# define LZO_SIZEOF_VOID_P 4
# else
# define LZO_SIZEOF_VOID_P 2
# endif
# if (LZO_CC_GNUC && (LZO_CC_GNUC < 0x040000ul)) && (LZO_SIZEOF_INT == 4)
# define LZO_SIZEOF_SIZE_T LZO_SIZEOF_INT
# define LZO_SIZEOF_PTRDIFF_T LZO_SIZEOF_INT
# endif
#elif (LZO_ARCH_I086)
# if (LZO_MM_TINY || LZO_MM_SMALL || LZO_MM_MEDIUM)
# define LZO_SIZEOF_VOID_P 2
# elif (LZO_MM_COMPACT || LZO_MM_LARGE || LZO_MM_HUGE)
# define LZO_SIZEOF_VOID_P 4
# else
# error "invalid LZO_ARCH_I086 memory model"
# endif
#elif (LZO_ARCH_M16C)
# if defined(__m32c_cpu__) || defined(__m32cm_cpu__)
# define LZO_SIZEOF_VOID_P 4
# else
# define LZO_SIZEOF_VOID_P 2
# endif
#elif (LZO_ARCH_SPU)
# define LZO_SIZEOF_VOID_P 4
#elif (LZO_ARCH_Z80)
# define LZO_SIZEOF_VOID_P 2
#elif (LZO_SIZEOF_LONG == 8) && ((defined(__mips__) && defined(__R5900__)) || defined(__MIPS_PSX2__))
# define LZO_SIZEOF_VOID_P 4
#elif (LZO_OS_OS400 || defined(__OS400__))
# if defined(__LLP64_IFC__)
# define LZO_SIZEOF_VOID_P 8
# define LZO_SIZEOF_SIZE_T LZO_SIZEOF_LONG
# define LZO_SIZEOF_PTRDIFF_T LZO_SIZEOF_LONG
# else
# define LZO_SIZEOF_VOID_P 16
# define LZO_SIZEOF_SIZE_T LZO_SIZEOF_LONG
# define LZO_SIZEOF_PTRDIFF_T LZO_SIZEOF_LONG
# endif
#elif (defined(__vms) || defined(__VMS)) && (__INITIAL_POINTER_SIZE+0 == 64)
# define LZO_SIZEOF_VOID_P 8
# define LZO_SIZEOF_SIZE_T LZO_SIZEOF_LONG
# define LZO_SIZEOF_PTRDIFF_T LZO_SIZEOF_LONG
#endif
#endif
#if !defined(LZO_SIZEOF_VOID_P)
# define LZO_SIZEOF_VOID_P LZO_SIZEOF_LONG
#endif
LZO_COMPILE_TIME_ASSERT_HEADER(LZO_SIZEOF_VOID_P == sizeof(void *))
#if !defined(LZO_SIZEOF_SIZE_T)
#if (LZO_ARCH_I086 || LZO_ARCH_M16C)
# define LZO_SIZEOF_SIZE_T 2
#endif
#endif
#if !defined(LZO_SIZEOF_SIZE_T)
# define LZO_SIZEOF_SIZE_T LZO_SIZEOF_VOID_P
#endif
#if defined(offsetof)
LZO_COMPILE_TIME_ASSERT_HEADER(LZO_SIZEOF_SIZE_T == sizeof(size_t))
#endif
#if !defined(LZO_SIZEOF_PTRDIFF_T)
#if (LZO_ARCH_I086)
# if (LZO_MM_TINY || LZO_MM_SMALL || LZO_MM_MEDIUM || LZO_MM_HUGE)
# define LZO_SIZEOF_PTRDIFF_T LZO_SIZEOF_VOID_P
# elif (LZO_MM_COMPACT || LZO_MM_LARGE)
# if (LZO_CC_BORLANDC || LZO_CC_TURBOC)
# define LZO_SIZEOF_PTRDIFF_T 4
# else
# define LZO_SIZEOF_PTRDIFF_T 2
# endif
# else
# error "invalid LZO_ARCH_I086 memory model"
# endif
#endif
#endif
#if !defined(LZO_SIZEOF_PTRDIFF_T)
# define LZO_SIZEOF_PTRDIFF_T LZO_SIZEOF_SIZE_T
#endif
#if defined(offsetof)
LZO_COMPILE_TIME_ASSERT_HEADER(LZO_SIZEOF_PTRDIFF_T == sizeof(ptrdiff_t))
#endif
#if !defined(LZO_WORDSIZE)
# define LZO_WORDSIZE LZO_SIZEOF_VOID_P
#endif
#if (LZO_ABI_NEUTRAL_ENDIAN)
# undef LZO_ABI_BIG_ENDIAN
# undef LZO_ABI_LITTLE_ENDIAN
#elif !(LZO_ABI_BIG_ENDIAN) && !(LZO_ABI_LITTLE_ENDIAN)
#if (LZO_ARCH_ALPHA) && (LZO_ARCH_CRAY_MPP)
# define LZO_ABI_BIG_ENDIAN 1
#elif (LZO_ARCH_IA64) && (LZO_OS_POSIX_LINUX || LZO_OS_WIN64)
# define LZO_ABI_LITTLE_ENDIAN 1
#elif (LZO_ARCH_ALPHA || LZO_ARCH_AMD64 || LZO_ARCH_BLACKFIN || LZO_ARCH_CRIS || LZO_ARCH_I086 || LZO_ARCH_I386 || LZO_ARCH_MSP430)
# define LZO_ABI_LITTLE_ENDIAN 1
#elif (LZO_ARCH_AVR32 || LZO_ARCH_M68K || LZO_ARCH_S390 || LZO_ARCH_SPU)
# define LZO_ABI_BIG_ENDIAN 1
#elif 1 && defined(__IAR_SYSTEMS_ICC__) && defined(__LITTLE_ENDIAN__)
# if (__LITTLE_ENDIAN__ == 1)
# define LZO_ABI_LITTLE_ENDIAN 1
# else
# define LZO_ABI_BIG_ENDIAN 1
# endif
#elif 1 && defined(__BIG_ENDIAN__) && !defined(__LITTLE_ENDIAN__)
# define LZO_ABI_BIG_ENDIAN 1
#elif 1 && defined(__LITTLE_ENDIAN__) && !defined(__BIG_ENDIAN__)
# define LZO_ABI_LITTLE_ENDIAN 1
#elif 1 && (LZO_ARCH_ARM) && defined(__ARMEB__) && !defined(__ARMEL__)
# define LZO_ABI_BIG_ENDIAN 1
#elif 1 && (LZO_ARCH_ARM) && defined(__ARMEL__) && !defined(__ARMEB__)
# define LZO_ABI_LITTLE_ENDIAN 1
#elif 1 && (LZO_ARCH_ARM && LZO_CC_ARMCC_ARMCC)
# if defined(__BIG_ENDIAN) && defined(__LITTLE_ENDIAN)
# error "unexpected configuration - check your compiler defines"
# elif defined(__BIG_ENDIAN)
# define LZO_ABI_BIG_ENDIAN 1
# else
# define LZO_ABI_LITTLE_ENDIAN 1
# endif
# define LZO_ABI_LITTLE_ENDIAN 1
#elif 1 && (LZO_ARCH_ARM64) && defined(__AARCH64EB__) && !defined(__AARCH64EL__)
# define LZO_ABI_BIG_ENDIAN 1
#elif 1 && (LZO_ARCH_ARM64) && defined(__AARCH64EL__) && !defined(__AARCH64EB__)
# define LZO_ABI_LITTLE_ENDIAN 1
#elif 1 && (LZO_ARCH_MIPS) && defined(__MIPSEB__) && !defined(__MIPSEL__)
# define LZO_ABI_BIG_ENDIAN 1
#elif 1 && (LZO_ARCH_MIPS) && defined(__MIPSEL__) && !defined(__MIPSEB__)
# define LZO_ABI_LITTLE_ENDIAN 1
#endif
#endif
#if (LZO_ABI_BIG_ENDIAN) && (LZO_ABI_LITTLE_ENDIAN)
# error "unexpected configuration - check your compiler defines"
#endif
#if (LZO_ABI_BIG_ENDIAN)
# define LZO_INFO_ABI_ENDIAN "be"
#elif (LZO_ABI_LITTLE_ENDIAN)
# define LZO_INFO_ABI_ENDIAN "le"
#elif (LZO_ABI_NEUTRAL_ENDIAN)
# define LZO_INFO_ABI_ENDIAN "neutral"
#endif
#if (LZO_SIZEOF_INT == 1 && LZO_SIZEOF_LONG == 2 && LZO_SIZEOF_VOID_P == 2)
# define LZO_ABI_I8LP16 1
# define LZO_INFO_ABI_PM "i8lp16"
#elif (LZO_SIZEOF_INT == 2 && LZO_SIZEOF_LONG == 2 && LZO_SIZEOF_VOID_P == 2)
# define LZO_ABI_ILP16 1
# define LZO_INFO_ABI_PM "ilp16"
#elif (LZO_SIZEOF_INT == 2 && LZO_SIZEOF_LONG == 4 && LZO_SIZEOF_VOID_P == 4)
# define LZO_ABI_LP32 1
# define LZO_INFO_ABI_PM "lp32"
#elif (LZO_SIZEOF_INT == 4 && LZO_SIZEOF_LONG == 4 && LZO_SIZEOF_VOID_P == 4)
# define LZO_ABI_ILP32 1
# define LZO_INFO_ABI_PM "ilp32"
#elif (LZO_SIZEOF_INT == 4 && LZO_SIZEOF_LONG == 4 && LZO_SIZEOF_VOID_P == 8 && LZO_SIZEOF_SIZE_T == 8)
# define LZO_ABI_LLP64 1
# define LZO_INFO_ABI_PM "llp64"
#elif (LZO_SIZEOF_INT == 4 && LZO_SIZEOF_LONG == 8 && LZO_SIZEOF_VOID_P == 8)
# define LZO_ABI_LP64 1
# define LZO_INFO_ABI_PM "lp64"
#elif (LZO_SIZEOF_INT == 8 && LZO_SIZEOF_LONG == 8 && LZO_SIZEOF_VOID_P == 8)
# define LZO_ABI_ILP64 1
# define LZO_INFO_ABI_PM "ilp64"
#elif (LZO_SIZEOF_INT == 4 && LZO_SIZEOF_LONG == 8 && LZO_SIZEOF_VOID_P == 4)
# define LZO_ABI_IP32L64 1
# define LZO_INFO_ABI_PM "ip32l64"
#endif
#if 0
#elif !defined(__LZO_LIBC_OVERRIDE)
#if (LZO_LIBC_NAKED)
# define LZO_INFO_LIBC "naked"
#elif (LZO_LIBC_FREESTANDING)
# define LZO_INFO_LIBC "freestanding"
#elif (LZO_LIBC_MOSTLY_FREESTANDING)
# define LZO_INFO_LIBC "mfreestanding"
#elif (LZO_LIBC_ISOC90)
# define LZO_INFO_LIBC "isoc90"
#elif (LZO_LIBC_ISOC99)
# define LZO_INFO_LIBC "isoc99"
#elif (LZO_CC_ARMCC_ARMCC) && defined(__ARMCLIB_VERSION)
# define LZO_LIBC_ISOC90 1
# define LZO_INFO_LIBC "isoc90"
#elif defined(__dietlibc__)
# define LZO_LIBC_DIETLIBC 1
# define LZO_INFO_LIBC "dietlibc"
#elif defined(_NEWLIB_VERSION)
# define LZO_LIBC_NEWLIB 1
# define LZO_INFO_LIBC "newlib"
#elif defined(__UCLIBC__) && defined(__UCLIBC_MAJOR__) && defined(__UCLIBC_MINOR__)
# if defined(__UCLIBC_SUBLEVEL__)
# define LZO_LIBC_UCLIBC (__UCLIBC_MAJOR__ * 0x10000L + (__UCLIBC_MINOR__-0) * 0x100 + (__UCLIBC_SUBLEVEL__-0))
# else
# define LZO_LIBC_UCLIBC 0x00090bL
# endif
# define LZO_INFO_LIBC "uc" "libc"
#elif defined(__GLIBC__) && defined(__GLIBC_MINOR__)
# define LZO_LIBC_GLIBC (__GLIBC__ * 0x10000L + (__GLIBC_MINOR__-0) * 0x100)
# define LZO_INFO_LIBC "glibc"
#elif (LZO_CC_MWERKS) && defined(__MSL__)
# define LZO_LIBC_MSL __MSL__
# define LZO_INFO_LIBC "msl"
#elif 1 && defined(__IAR_SYSTEMS_ICC__)
# define LZO_LIBC_ISOC90 1
# define LZO_INFO_LIBC "isoc90"
#else
# define LZO_LIBC_DEFAULT 1
# define LZO_INFO_LIBC "default"
#endif
#endif
#if (LZO_ARCH_I386 && (LZO_OS_DOS32 || LZO_OS_WIN32) && (LZO_CC_DMC || LZO_CC_INTELC || LZO_CC_MSC || LZO_CC_PELLESC))
# define LZO_ASM_SYNTAX_MSC 1
#elif (LZO_OS_WIN64 && (LZO_CC_DMC || LZO_CC_INTELC || LZO_CC_MSC || LZO_CC_PELLESC))
#elif (LZO_ARCH_I386 && LZO_CC_GNUC && (LZO_CC_GNUC == 0x011f00ul))
#elif (LZO_ARCH_I386 && (LZO_CC_CLANG || LZO_CC_GNUC || LZO_CC_INTELC || LZO_CC_PATHSCALE))
# define LZO_ASM_SYNTAX_GNUC 1
#elif (LZO_ARCH_AMD64 && (LZO_CC_CLANG || LZO_CC_GNUC || LZO_CC_INTELC || LZO_CC_PATHSCALE))
# define LZO_ASM_SYNTAX_GNUC 1
#elif (LZO_CC_GNUC)
# define LZO_ASM_SYNTAX_GNUC 1
#endif
#if (LZO_ASM_SYNTAX_GNUC)
#if (LZO_ARCH_I386 && LZO_CC_GNUC && (LZO_CC_GNUC < 0x020000ul))
# define __LZO_ASM_CLOBBER "ax"
# define __LZO_ASM_CLOBBER_LIST_CC /*empty*/
# define __LZO_ASM_CLOBBER_LIST_CC_MEMORY /*empty*/
# define __LZO_ASM_CLOBBER_LIST_EMPTY /*empty*/
#elif (LZO_CC_INTELC && (__INTEL_COMPILER < 1000))
# define __LZO_ASM_CLOBBER "memory"
# define __LZO_ASM_CLOBBER_LIST_CC /*empty*/
# define __LZO_ASM_CLOBBER_LIST_CC_MEMORY : "memory"
# define __LZO_ASM_CLOBBER_LIST_EMPTY /*empty*/
#else
# define __LZO_ASM_CLOBBER "cc", "memory"
# define __LZO_ASM_CLOBBER_LIST_CC : "cc"
# define __LZO_ASM_CLOBBER_LIST_CC_MEMORY : "cc", "memory"
# define __LZO_ASM_CLOBBER_LIST_EMPTY /*empty*/
#endif
#endif
#if (LZO_ARCH_ALPHA)
# define LZO_OPT_AVOID_UINT_INDEX 1
#elif (LZO_ARCH_AMD64)
# define LZO_OPT_AVOID_INT_INDEX 1
# define LZO_OPT_AVOID_UINT_INDEX 1
# ifndef LZO_OPT_UNALIGNED16
# define LZO_OPT_UNALIGNED16 1
# endif
# ifndef LZO_OPT_UNALIGNED32
# define LZO_OPT_UNALIGNED32 1
# endif
# ifndef LZO_OPT_UNALIGNED64
# define LZO_OPT_UNALIGNED64 1
# endif
#elif (LZO_ARCH_ARM)
# if defined(__ARM_FEATURE_UNALIGNED)
# ifndef LZO_OPT_UNALIGNED16
# define LZO_OPT_UNALIGNED16 1
# endif
# ifndef LZO_OPT_UNALIGNED32
# define LZO_OPT_UNALIGNED32 1
# endif
# elif defined(__TARGET_ARCH_ARM) && ((__TARGET_ARCH_ARM+0) >= 7)
# ifndef LZO_OPT_UNALIGNED16
# define LZO_OPT_UNALIGNED16 1
# endif
# ifndef LZO_OPT_UNALIGNED32
# define LZO_OPT_UNALIGNED32 1
# endif
# elif defined(__TARGET_ARCH_ARM) && ((__TARGET_ARCH_ARM+0) >= 6) && !defined(__TARGET_PROFILE_M)
# ifndef LZO_OPT_UNALIGNED16
# define LZO_OPT_UNALIGNED16 1
# endif
# ifndef LZO_OPT_UNALIGNED32
# define LZO_OPT_UNALIGNED32 1
# endif
# endif
#elif (LZO_ARCH_ARM64)
# ifndef LZO_OPT_UNALIGNED16
# define LZO_OPT_UNALIGNED16 1
# endif
# ifndef LZO_OPT_UNALIGNED32
# define LZO_OPT_UNALIGNED32 1
# endif
# ifndef LZO_OPT_UNALIGNED64
# define LZO_OPT_UNALIGNED64 1
# endif
#elif (LZO_ARCH_CRIS)
# ifndef LZO_OPT_UNALIGNED16
# define LZO_OPT_UNALIGNED16 1
# endif
# ifndef LZO_OPT_UNALIGNED32
# define LZO_OPT_UNALIGNED32 1
# endif
#elif (LZO_ARCH_I386)
# ifndef LZO_OPT_UNALIGNED16
# define LZO_OPT_UNALIGNED16 1
# endif
# ifndef LZO_OPT_UNALIGNED32
# define LZO_OPT_UNALIGNED32 1
# endif
#elif (LZO_ARCH_IA64)
# define LZO_OPT_AVOID_INT_INDEX 1
# define LZO_OPT_AVOID_UINT_INDEX 1
# define LZO_OPT_PREFER_POSTINC 1
#elif (LZO_ARCH_M68K)
# define LZO_OPT_PREFER_POSTINC 1
# define LZO_OPT_PREFER_PREDEC 1
# if defined(__mc68020__) && !defined(__mcoldfire__)
# ifndef LZO_OPT_UNALIGNED16
# define LZO_OPT_UNALIGNED16 1
# endif
# ifndef LZO_OPT_UNALIGNED32
# define LZO_OPT_UNALIGNED32 1
# endif
# endif
#elif (LZO_ARCH_MIPS)
# define LZO_OPT_AVOID_UINT_INDEX 1
#elif (LZO_ARCH_POWERPC)
# define LZO_OPT_PREFER_PREINC 1
# define LZO_OPT_PREFER_PREDEC 1
# if (LZO_ABI_BIG_ENDIAN)
# ifndef LZO_OPT_UNALIGNED16
# define LZO_OPT_UNALIGNED16 1
# endif
# ifndef LZO_OPT_UNALIGNED32
# define LZO_OPT_UNALIGNED32 1
# endif
# if (LZO_WORDSIZE == 8)
# ifndef LZO_OPT_UNALIGNED64
# define LZO_OPT_UNALIGNED64 1
# endif
# endif
# endif
#elif (LZO_ARCH_S390)
# ifndef LZO_OPT_UNALIGNED16
# define LZO_OPT_UNALIGNED16 1
# endif
# ifndef LZO_OPT_UNALIGNED32
# define LZO_OPT_UNALIGNED32 1
# endif
# if (LZO_WORDSIZE == 8)
# ifndef LZO_OPT_UNALIGNED64
# define LZO_OPT_UNALIGNED64 1
# endif
# endif
#elif (LZO_ARCH_SH)
# define LZO_OPT_PREFER_POSTINC 1
# define LZO_OPT_PREFER_PREDEC 1
#endif
#ifndef LZO_CFG_NO_INLINE_ASM
#if (LZO_ABI_NEUTRAL_ENDIAN) || (LZO_ARCH_GENERIC)
# define LZO_CFG_NO_INLINE_ASM 1
#elif (LZO_CC_LLVM)
# define LZO_CFG_NO_INLINE_ASM 1
#endif
#endif
#if (LZO_CFG_NO_INLINE_ASM)
# undef LZO_ASM_SYNTAX_MSC
# undef LZO_ASM_SYNTAX_GNUC
# undef __LZO_ASM_CLOBBER
# undef __LZO_ASM_CLOBBER_LIST_CC
# undef __LZO_ASM_CLOBBER_LIST_CC_MEMORY
# undef __LZO_ASM_CLOBBER_LIST_EMPTY
#endif
#ifndef LZO_CFG_NO_UNALIGNED
#if (LZO_ABI_NEUTRAL_ENDIAN) || (LZO_ARCH_GENERIC)
# define LZO_CFG_NO_UNALIGNED 1
#endif
#endif
#if (LZO_CFG_NO_UNALIGNED)
# undef LZO_OPT_UNALIGNED16
# undef LZO_OPT_UNALIGNED32
# undef LZO_OPT_UNALIGNED64
#endif
#if defined(__LZO_INFOSTR_MM)
#elif (LZO_MM_FLAT) && (defined(__LZO_INFOSTR_PM) || defined(LZO_INFO_ABI_PM))
# define __LZO_INFOSTR_MM ""
#elif defined(LZO_INFO_MM)
# define __LZO_INFOSTR_MM "." LZO_INFO_MM
#else
# define __LZO_INFOSTR_MM ""
#endif
#if defined(__LZO_INFOSTR_PM)
#elif defined(LZO_INFO_ABI_PM)
# define __LZO_INFOSTR_PM "." LZO_INFO_ABI_PM
#else
# define __LZO_INFOSTR_PM ""
#endif
#if defined(__LZO_INFOSTR_ENDIAN)
#elif defined(LZO_INFO_ABI_ENDIAN)
# define __LZO_INFOSTR_ENDIAN "." LZO_INFO_ABI_ENDIAN
#else
# define __LZO_INFOSTR_ENDIAN ""
#endif
#if defined(__LZO_INFOSTR_OSNAME)
#elif defined(LZO_INFO_OS_CONSOLE)
# define __LZO_INFOSTR_OSNAME LZO_INFO_OS "." LZO_INFO_OS_CONSOLE
#elif defined(LZO_INFO_OS_POSIX)
# define __LZO_INFOSTR_OSNAME LZO_INFO_OS "." LZO_INFO_OS_POSIX
#else
# define __LZO_INFOSTR_OSNAME LZO_INFO_OS
#endif
#if defined(__LZO_INFOSTR_LIBC)
#elif defined(LZO_INFO_LIBC)
# define __LZO_INFOSTR_LIBC "." LZO_INFO_LIBC
#else
# define __LZO_INFOSTR_LIBC ""
#endif
#if defined(__LZO_INFOSTR_CCVER)
#elif defined(LZO_INFO_CCVER)
# define __LZO_INFOSTR_CCVER " " LZO_INFO_CCVER
#else
# define __LZO_INFOSTR_CCVER ""
#endif
#define LZO_INFO_STRING \
LZO_INFO_ARCH __LZO_INFOSTR_MM __LZO_INFOSTR_PM __LZO_INFOSTR_ENDIAN \
" " __LZO_INFOSTR_OSNAME __LZO_INFOSTR_LIBC " " LZO_INFO_CC __LZO_INFOSTR_CCVER
#if !(LZO_CFG_SKIP_LZO_TYPES)
#if (!(LZO_SIZEOF_SHORT+0 > 0 && LZO_SIZEOF_INT+0 > 0 && LZO_SIZEOF_LONG+0 > 0))
# error "missing defines for sizes"
#endif
#if (!(LZO_SIZEOF_PTRDIFF_T+0 > 0 && LZO_SIZEOF_SIZE_T+0 > 0 && LZO_SIZEOF_VOID_P+0 > 0))
# error "missing defines for sizes"
#endif
#if !defined(lzo_llong_t)
#if (LZO_SIZEOF_LONG_LONG+0 > 0)
__lzo_gnuc_extension__ typedef long long lzo_llong_t__;
__lzo_gnuc_extension__ typedef unsigned long long lzo_ullong_t__;
# define lzo_llong_t lzo_llong_t__
# define lzo_ullong_t lzo_ullong_t__
#endif
#endif
#if !defined(lzo_int16e_t)
#if (LZO_SIZEOF_LONG == 2)
# define lzo_int16e_t long
# define lzo_uint16e_t unsigned long
#elif (LZO_SIZEOF_INT == 2)
# define lzo_int16e_t int
# define lzo_uint16e_t unsigned int
#elif (LZO_SIZEOF_SHORT == 2)
# define lzo_int16e_t short int
# define lzo_uint16e_t unsigned short int
#elif 1 && !(LZO_CFG_TYPE_NO_MODE_HI) && (LZO_CC_CLANG || (LZO_CC_GNUC >= 0x025f00ul) || LZO_CC_LLVM)
typedef int lzo_int16e_hi_t__ __attribute__((__mode__(__HI__)));
typedef unsigned int lzo_uint16e_hi_t__ __attribute__((__mode__(__HI__)));
# define lzo_int16e_t lzo_int16e_hi_t__
# define lzo_uint16e_t lzo_uint16e_hi_t__
#elif (LZO_SIZEOF___INT16 == 2)
# define lzo_int16e_t __int16
# define lzo_uint16e_t unsigned __int16
#else
#endif
#endif
#if defined(lzo_int16e_t)
# define LZO_SIZEOF_LZO_INT16E_T 2
LZO_COMPILE_TIME_ASSERT_HEADER(sizeof(lzo_int16e_t) == 2)
LZO_COMPILE_TIME_ASSERT_HEADER(sizeof(lzo_int16e_t) == LZO_SIZEOF_LZO_INT16E_T)
#endif
#if !defined(lzo_int32e_t)
#if (LZO_SIZEOF_LONG == 4)
# define lzo_int32e_t long int
# define lzo_uint32e_t unsigned long int
#elif (LZO_SIZEOF_INT == 4)
# define lzo_int32e_t int
# define lzo_uint32e_t unsigned int
#elif (LZO_SIZEOF_SHORT == 4)
# define lzo_int32e_t short int
# define lzo_uint32e_t unsigned short int
#elif (LZO_SIZEOF_LONG_LONG == 4)
# define lzo_int32e_t lzo_llong_t
# define lzo_uint32e_t lzo_ullong_t
#elif 1 && !(LZO_CFG_TYPE_NO_MODE_SI) && (LZO_CC_CLANG || (LZO_CC_GNUC >= 0x025f00ul) || LZO_CC_LLVM) && (__INT_MAX__+0 > 2147483647L)
typedef int lzo_int32e_si_t__ __attribute__((__mode__(__SI__)));
typedef unsigned int lzo_uint32e_si_t__ __attribute__((__mode__(__SI__)));
# define lzo_int32e_t lzo_int32e_si_t__
# define lzo_uint32e_t lzo_uint32e_si_t__
#elif 1 && !(LZO_CFG_TYPE_NO_MODE_SI) && (LZO_CC_GNUC >= 0x025f00ul) && defined(__AVR__) && (__LONG_MAX__+0 == 32767L)
typedef int lzo_int32e_si_t__ __attribute__((__mode__(__SI__)));
typedef unsigned int lzo_uint32e_si_t__ __attribute__((__mode__(__SI__)));
# define lzo_int32e_t lzo_int32e_si_t__
# define lzo_uint32e_t lzo_uint32e_si_t__
# define LZO_INT32_C(c) (c##LL)
# define LZO_UINT32_C(c) (c##ULL)
#elif (LZO_SIZEOF___INT32 == 4)
# define lzo_int32e_t __int32
# define lzo_uint32e_t unsigned __int32
#else
#endif
#endif
#if defined(lzo_int32e_t)
# define LZO_SIZEOF_LZO_INT32E_T 4
LZO_COMPILE_TIME_ASSERT_HEADER(sizeof(lzo_int32e_t) == 4)
LZO_COMPILE_TIME_ASSERT_HEADER(sizeof(lzo_int32e_t) == LZO_SIZEOF_LZO_INT32E_T)
#endif
#if !defined(lzo_int64e_t)
#if (LZO_SIZEOF___INT64 == 8)
# if (LZO_CC_BORLANDC) && !(LZO_CFG_TYPE_PREFER___INT64)
# define LZO_CFG_TYPE_PREFER___INT64 1
# endif
#endif
#if (LZO_SIZEOF_INT == 8) && (LZO_SIZEOF_INT < LZO_SIZEOF_LONG)
# define lzo_int64e_t int
# define lzo_uint64e_t unsigned int
# define LZO_SIZEOF_LZO_INT64E_T LZO_SIZEOF_INT
#elif (LZO_SIZEOF_LONG == 8)
# define lzo_int64e_t long int
# define lzo_uint64e_t unsigned long int
# define LZO_SIZEOF_LZO_INT64E_T LZO_SIZEOF_LONG
#elif (LZO_SIZEOF_LONG_LONG == 8) && !(LZO_CFG_TYPE_PREFER___INT64)
# define lzo_int64e_t lzo_llong_t
# define lzo_uint64e_t lzo_ullong_t
# if (LZO_CC_BORLANDC)
# define LZO_INT64_C(c) ((c) + 0ll)
# define LZO_UINT64_C(c) ((c) + 0ull)
# elif 0
# define LZO_INT64_C(c) (__lzo_gnuc_extension__ (c##LL))
# define LZO_UINT64_C(c) (__lzo_gnuc_extension__ (c##ULL))
# else
# define LZO_INT64_C(c) (c##LL)
# define LZO_UINT64_C(c) (c##ULL)
# endif
# define LZO_SIZEOF_LZO_INT64E_T LZO_SIZEOF_LONG_LONG
#elif (LZO_SIZEOF___INT64 == 8)
# define lzo_int64e_t __int64
# define lzo_uint64e_t unsigned __int64
# if (LZO_CC_BORLANDC)
# define LZO_INT64_C(c) ((c) + 0i64)
# define LZO_UINT64_C(c) ((c) + 0ui64)
# else
# define LZO_INT64_C(c) (c##i64)
# define LZO_UINT64_C(c) (c##ui64)
# endif
# define LZO_SIZEOF_LZO_INT64E_T LZO_SIZEOF___INT64
#else
#endif
#endif
#if defined(lzo_int64e_t)
LZO_COMPILE_TIME_ASSERT_HEADER(sizeof(lzo_int64e_t) == 8)
LZO_COMPILE_TIME_ASSERT_HEADER(sizeof(lzo_int64e_t) == LZO_SIZEOF_LZO_INT64E_T)
#endif
#if !defined(lzo_int32l_t)
#if defined(lzo_int32e_t)
# define lzo_int32l_t lzo_int32e_t
# define lzo_uint32l_t lzo_uint32e_t
# define LZO_SIZEOF_LZO_INT32L_T LZO_SIZEOF_LZO_INT32E_T
#elif (LZO_SIZEOF_INT >= 4) && (LZO_SIZEOF_INT < LZO_SIZEOF_LONG)
# define lzo_int32l_t int
# define lzo_uint32l_t unsigned int
# define LZO_SIZEOF_LZO_INT32L_T LZO_SIZEOF_INT
#elif (LZO_SIZEOF_LONG >= 4)
# define lzo_int32l_t long int
# define lzo_uint32l_t unsigned long int
# define LZO_SIZEOF_LZO_INT32L_T LZO_SIZEOF_LONG
#else
# error "lzo_int32l_t"
#endif
#endif
#if 1
LZO_COMPILE_TIME_ASSERT_HEADER(sizeof(lzo_int32l_t) >= 4)
LZO_COMPILE_TIME_ASSERT_HEADER(sizeof(lzo_int32l_t) == LZO_SIZEOF_LZO_INT32L_T)
#endif
#if !defined(lzo_int64l_t)
#if defined(lzo_int64e_t)
# define lzo_int64l_t lzo_int64e_t
# define lzo_uint64l_t lzo_uint64e_t
# define LZO_SIZEOF_LZO_INT64L_T LZO_SIZEOF_LZO_INT64E_T
#else
#endif
#endif
#if defined(lzo_int64l_t)
LZO_COMPILE_TIME_ASSERT_HEADER(sizeof(lzo_int64l_t) >= 8)
LZO_COMPILE_TIME_ASSERT_HEADER(sizeof(lzo_int64l_t) == LZO_SIZEOF_LZO_INT64L_T)
#endif
#if !defined(lzo_int32f_t)
#if (LZO_SIZEOF_SIZE_T >= 8)
# define lzo_int32f_t lzo_int64l_t
# define lzo_uint32f_t lzo_uint64l_t
# define LZO_SIZEOF_LZO_INT32F_T LZO_SIZEOF_LZO_INT64L_T
#else
# define lzo_int32f_t lzo_int32l_t
# define lzo_uint32f_t lzo_uint32l_t
# define LZO_SIZEOF_LZO_INT32F_T LZO_SIZEOF_LZO_INT32L_T
#endif
#endif
#if 1
LZO_COMPILE_TIME_ASSERT_HEADER(sizeof(lzo_int32f_t) >= 4)
LZO_COMPILE_TIME_ASSERT_HEADER(sizeof(lzo_int32f_t) == LZO_SIZEOF_LZO_INT32F_T)
#endif
#if !defined(lzo_int64f_t)
#if defined(lzo_int64l_t)
# define lzo_int64f_t lzo_int64l_t
# define lzo_uint64f_t lzo_uint64l_t
# define LZO_SIZEOF_LZO_INT64F_T LZO_SIZEOF_LZO_INT64L_T
#else
#endif
#endif
#if defined(lzo_int64f_t)
LZO_COMPILE_TIME_ASSERT_HEADER(sizeof(lzo_int64f_t) >= 8)
LZO_COMPILE_TIME_ASSERT_HEADER(sizeof(lzo_int64f_t) == LZO_SIZEOF_LZO_INT64F_T)
#endif
#if !defined(lzo_intptr_t)
#if 1 && (LZO_OS_OS400 && (LZO_SIZEOF_VOID_P == 16))
# define __LZO_INTPTR_T_IS_POINTER 1
typedef char* lzo_intptr_t;
typedef char* lzo_uintptr_t;
# define lzo_intptr_t lzo_intptr_t
# define lzo_uintptr_t lzo_uintptr_t
# define LZO_SIZEOF_LZO_INTPTR_T LZO_SIZEOF_VOID_P
#elif (LZO_CC_MSC && (_MSC_VER >= 1300) && (LZO_SIZEOF_VOID_P == 4) && (LZO_SIZEOF_INT == 4))
typedef __w64 int lzo_intptr_t;
typedef __w64 unsigned int lzo_uintptr_t;
# define lzo_intptr_t lzo_intptr_t
# define lzo_uintptr_t lzo_uintptr_t
# define LZO_SIZEOF_LZO_INTPTR_T LZO_SIZEOF_INT
#elif (LZO_SIZEOF_SHORT == LZO_SIZEOF_VOID_P) && (LZO_SIZEOF_INT > LZO_SIZEOF_VOID_P)
# define lzo_intptr_t short
# define lzo_uintptr_t unsigned short
# define LZO_SIZEOF_LZO_INTPTR_T LZO_SIZEOF_SHORT
#elif (LZO_SIZEOF_INT >= LZO_SIZEOF_VOID_P) && (LZO_SIZEOF_INT < LZO_SIZEOF_LONG)
# define lzo_intptr_t int
# define lzo_uintptr_t unsigned int
# define LZO_SIZEOF_LZO_INTPTR_T LZO_SIZEOF_INT
#elif (LZO_SIZEOF_LONG >= LZO_SIZEOF_VOID_P)
# define lzo_intptr_t long
# define lzo_uintptr_t unsigned long
# define LZO_SIZEOF_LZO_INTPTR_T LZO_SIZEOF_LONG
#elif (LZO_SIZEOF_LZO_INT64L_T >= LZO_SIZEOF_VOID_P)
# define lzo_intptr_t lzo_int64l_t
# define lzo_uintptr_t lzo_uint64l_t
# define LZO_SIZEOF_LZO_INTPTR_T LZO_SIZEOF_LZO_INT64L_T
#else
# error "lzo_intptr_t"
#endif
#endif
#if 1
LZO_COMPILE_TIME_ASSERT_HEADER(sizeof(lzo_intptr_t) >= sizeof(void *))
LZO_COMPILE_TIME_ASSERT_HEADER(sizeof(lzo_intptr_t) == sizeof(lzo_uintptr_t))
#endif
#if !defined(lzo_word_t)
#if defined(LZO_WORDSIZE) && (LZO_WORDSIZE+0 > 0)
#if (LZO_WORDSIZE == LZO_SIZEOF_LZO_INTPTR_T) && !(__LZO_INTPTR_T_IS_POINTER)
# define lzo_word_t lzo_uintptr_t
# define lzo_sword_t lzo_intptr_t
# define LZO_SIZEOF_LZO_WORD_T LZO_SIZEOF_LZO_INTPTR_T
#elif (LZO_WORDSIZE == LZO_SIZEOF_LONG)
# define lzo_word_t unsigned long
# define lzo_sword_t long
# define LZO_SIZEOF_LZO_WORD_T LZO_SIZEOF_LONG
#elif (LZO_WORDSIZE == LZO_SIZEOF_INT)
# define lzo_word_t unsigned int
# define lzo_sword_t int
# define LZO_SIZEOF_LZO_WORD_T LZO_SIZEOF_INT
#elif (LZO_WORDSIZE == LZO_SIZEOF_SHORT)
# define lzo_word_t unsigned short
# define lzo_sword_t short
# define LZO_SIZEOF_LZO_WORD_T LZO_SIZEOF_SHORT
#elif (LZO_WORDSIZE == 1)
# define lzo_word_t unsigned char
# define lzo_sword_t signed char
# define LZO_SIZEOF_LZO_WORD_T 1
#elif (LZO_WORDSIZE == LZO_SIZEOF_LZO_INT64L_T)
# define lzo_word_t lzo_uint64l_t
# define lzo_sword_t lzo_int64l_t
# define LZO_SIZEOF_LZO_WORD_T LZO_SIZEOF_LZO_INT64L_T
#elif (LZO_ARCH_SPU) && (LZO_CC_GNUC)
#if 0
typedef unsigned lzo_word_t __attribute__((__mode__(__V16QI__)));
typedef int lzo_sword_t __attribute__((__mode__(__V16QI__)));
# define lzo_word_t lzo_word_t
# define lzo_sword_t lzo_sword_t
# define LZO_SIZEOF_LZO_WORD_T 16
#endif
#else
# error "lzo_word_t"
#endif
#endif
#endif
#if 1 && defined(lzo_word_t)
LZO_COMPILE_TIME_ASSERT_HEADER(sizeof(lzo_word_t) == LZO_WORDSIZE)
LZO_COMPILE_TIME_ASSERT_HEADER(sizeof(lzo_sword_t) == LZO_WORDSIZE)
#endif
#if 1
#define lzo_int8_t signed char
#define lzo_uint8_t unsigned char
#define LZO_SIZEOF_LZO_INT8_T 1
LZO_COMPILE_TIME_ASSERT_HEADER(sizeof(lzo_int8_t) == 1)
LZO_COMPILE_TIME_ASSERT_HEADER(sizeof(lzo_int8_t) == sizeof(lzo_uint8_t))
#endif
#if defined(lzo_int16e_t)
#define lzo_int16_t lzo_int16e_t
#define lzo_uint16_t lzo_uint16e_t
#define LZO_SIZEOF_LZO_INT16_T LZO_SIZEOF_LZO_INT16E_T
LZO_COMPILE_TIME_ASSERT_HEADER(sizeof(lzo_int16_t) == 2)
LZO_COMPILE_TIME_ASSERT_HEADER(sizeof(lzo_int16_t) == sizeof(lzo_uint16_t))
#endif
#if defined(lzo_int32e_t)
#define lzo_int32_t lzo_int32e_t
#define lzo_uint32_t lzo_uint32e_t
#define LZO_SIZEOF_LZO_INT32_T LZO_SIZEOF_LZO_INT32E_T
LZO_COMPILE_TIME_ASSERT_HEADER(sizeof(lzo_int32_t) == 4)
LZO_COMPILE_TIME_ASSERT_HEADER(sizeof(lzo_int32_t) == sizeof(lzo_uint32_t))
#endif
#if defined(lzo_int64e_t)
#define lzo_int64_t lzo_int64e_t
#define lzo_uint64_t lzo_uint64e_t
#define LZO_SIZEOF_LZO_INT64_T LZO_SIZEOF_LZO_INT64E_T
LZO_COMPILE_TIME_ASSERT_HEADER(sizeof(lzo_int64_t) == 8)
LZO_COMPILE_TIME_ASSERT_HEADER(sizeof(lzo_int64_t) == sizeof(lzo_uint64_t))
#endif
#if 1
#define lzo_int_least32_t lzo_int32l_t
#define lzo_uint_least32_t lzo_uint32l_t
#define LZO_SIZEOF_LZO_INT_LEAST32_T LZO_SIZEOF_LZO_INT32L_T
LZO_COMPILE_TIME_ASSERT_HEADER(sizeof(lzo_int_least32_t) >= 4)
LZO_COMPILE_TIME_ASSERT_HEADER(sizeof(lzo_int_least32_t) == sizeof(lzo_uint_least32_t))
#endif
#if defined(lzo_int64l_t)
#define lzo_int_least64_t lzo_int64l_t
#define lzo_uint_least64_t lzo_uint64l_t
#define LZO_SIZEOF_LZO_INT_LEAST64_T LZO_SIZEOF_LZO_INT64L_T
LZO_COMPILE_TIME_ASSERT_HEADER(sizeof(lzo_int_least64_t) >= 8)
LZO_COMPILE_TIME_ASSERT_HEADER(sizeof(lzo_int_least64_t) == sizeof(lzo_uint_least64_t))
#endif
#if 1
#define lzo_int_fast32_t lzo_int32f_t
#define lzo_uint_fast32_t lzo_uint32f_t
#define LZO_SIZEOF_LZO_INT_FAST32_T LZO_SIZEOF_LZO_INT32F_T
LZO_COMPILE_TIME_ASSERT_HEADER(sizeof(lzo_int_fast32_t) >= 4)
LZO_COMPILE_TIME_ASSERT_HEADER(sizeof(lzo_int_fast32_t) == sizeof(lzo_uint_fast32_t))
#endif
#if defined(lzo_int64f_t)
#define lzo_int_fast64_t lzo_int64f_t
#define lzo_uint_fast64_t lzo_uint64f_t
#define LZO_SIZEOF_LZO_INT_FAST64_T LZO_SIZEOF_LZO_INT64F_T
LZO_COMPILE_TIME_ASSERT_HEADER(sizeof(lzo_int_fast64_t) >= 8)
LZO_COMPILE_TIME_ASSERT_HEADER(sizeof(lzo_int_fast64_t) == sizeof(lzo_uint_fast64_t))
#endif
#if !defined(LZO_INT16_C)
# if (LZO_BROKEN_INTEGRAL_CONSTANTS) && (LZO_SIZEOF_INT >= 2)
# define LZO_INT16_C(c) ((c) + 0)
# define LZO_UINT16_C(c) ((c) + 0U)
# elif (LZO_BROKEN_INTEGRAL_CONSTANTS) && (LZO_SIZEOF_LONG >= 2)
# define LZO_INT16_C(c) ((c) + 0L)
# define LZO_UINT16_C(c) ((c) + 0UL)
# elif (LZO_SIZEOF_INT >= 2)
# define LZO_INT16_C(c) (c)
# define LZO_UINT16_C(c) (c##U)
# elif (LZO_SIZEOF_LONG >= 2)
# define LZO_INT16_C(c) (c##L)
# define LZO_UINT16_C(c) (c##UL)
# else
# error "LZO_INT16_C"
# endif
#endif
#if !defined(LZO_INT32_C)
# if (LZO_BROKEN_INTEGRAL_CONSTANTS) && (LZO_SIZEOF_INT >= 4)
# define LZO_INT32_C(c) ((c) + 0)
# define LZO_UINT32_C(c) ((c) + 0U)
# elif (LZO_BROKEN_INTEGRAL_CONSTANTS) && (LZO_SIZEOF_LONG >= 4)
# define LZO_INT32_C(c) ((c) + 0L)
# define LZO_UINT32_C(c) ((c) + 0UL)
# elif (LZO_SIZEOF_INT >= 4)
# define LZO_INT32_C(c) (c)
# define LZO_UINT32_C(c) (c##U)
# elif (LZO_SIZEOF_LONG >= 4)
# define LZO_INT32_C(c) (c##L)
# define LZO_UINT32_C(c) (c##UL)
# elif (LZO_SIZEOF_LONG_LONG >= 4)
# define LZO_INT32_C(c) (c##LL)
# define LZO_UINT32_C(c) (c##ULL)
# else
# error "LZO_INT32_C"
# endif
#endif
#if !defined(LZO_INT64_C) && defined(lzo_int64l_t)
# if (LZO_BROKEN_INTEGRAL_CONSTANTS) && (LZO_SIZEOF_INT >= 8)
# define LZO_INT64_C(c) ((c) + 0)
# define LZO_UINT64_C(c) ((c) + 0U)
# elif (LZO_BROKEN_INTEGRAL_CONSTANTS) && (LZO_SIZEOF_LONG >= 8)
# define LZO_INT64_C(c) ((c) + 0L)
# define LZO_UINT64_C(c) ((c) + 0UL)
# elif (LZO_SIZEOF_INT >= 8)
# define LZO_INT64_C(c) (c)
# define LZO_UINT64_C(c) (c##U)
# elif (LZO_SIZEOF_LONG >= 8)
# define LZO_INT64_C(c) (c##L)
# define LZO_UINT64_C(c) (c##UL)
# else
# error "LZO_INT64_C"
# endif
#endif
#endif
#endif /* already included */
/* vim:set ts=4 sw=4 et: */

View File

@@ -1,6053 +0,0 @@
/* minilzo.c -- mini subset of the LZO real-time data compression library
This file is part of the LZO real-time data compression library.
Copyright (C) 1996-2014 Markus Franz Xaver Johannes Oberhumer
All Rights Reserved.
The LZO library is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License as
published by the Free Software Foundation; either version 2 of
the License, or (at your option) any later version.
The LZO library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with the LZO library; see the file COPYING.
If not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
Markus F.X.J. Oberhumer
<markus@oberhumer.com>
http://www.oberhumer.com/opensource/lzo/
*/
/*
* NOTE:
* the full LZO package can be found at
* http://www.oberhumer.com/opensource/lzo/
*/
#define __LZO_IN_MINILZO 1
#if defined(LZO_CFG_FREESTANDING)
# undef MINILZO_HAVE_CONFIG_H
# define LZO_LIBC_FREESTANDING 1
# define LZO_OS_FREESTANDING 1
#endif
#ifdef MINILZO_HAVE_CONFIG_H
# include <config.h>
#endif
#include <limits.h>
#include <stddef.h>
#if defined(MINILZO_CFG_USE_INTERNAL_LZODEFS)
#ifndef __LZODEFS_H_INCLUDED
#define __LZODEFS_H_INCLUDED 1
#if defined(__CYGWIN32__) && !defined(__CYGWIN__)
# define __CYGWIN__ __CYGWIN32__
#endif
#if 1 && defined(__INTERIX) && defined(__GNUC__) && !defined(_ALL_SOURCE)
# define _ALL_SOURCE 1
#endif
#if defined(__mips__) && defined(__R5900__)
# if !defined(__LONG_MAX__)
# define __LONG_MAX__ 9223372036854775807L
# endif
#endif
#if !defined(LZO_CFG_NO_DISABLE_WUNDEF)
#if defined(__ARMCC_VERSION)
# pragma diag_suppress 193
#elif defined(__clang__) && defined(__clang_minor__)
# pragma clang diagnostic ignored "-Wundef"
#elif defined(__INTEL_COMPILER)
# pragma warning(disable: 193)
#elif defined(__KEIL__) && defined(__C166__)
# pragma warning disable = 322
#elif defined(__GNUC__) && defined(__GNUC_MINOR__) && !defined(__PATHSCALE__)
# if ((__GNUC__-0) >= 5 || ((__GNUC__-0) == 4 && (__GNUC_MINOR__-0) >= 2))
# pragma GCC diagnostic ignored "-Wundef"
# endif
#elif defined(_MSC_VER) && !defined(__clang__) && !defined(__INTEL_COMPILER) && !defined(__MWERKS__)
# if ((_MSC_VER-0) >= 1300)
# pragma warning(disable: 4668)
# endif
#endif
#endif
#if 0 && defined(__POCC__) && defined(_WIN32)
# if (__POCC__ >= 400)
# pragma warn(disable: 2216)
# endif
#endif
#if 0 && defined(__WATCOMC__)
# if (__WATCOMC__ >= 1050) && (__WATCOMC__ < 1060)
# pragma warning 203 9
# endif
#endif
#if defined(__BORLANDC__) && defined(__MSDOS__) && !defined(__FLAT__)
# pragma option -h
#endif
#if !(LZO_CFG_NO_DISABLE_WCRTNONSTDC)
#ifndef _CRT_NONSTDC_NO_DEPRECATE
#define _CRT_NONSTDC_NO_DEPRECATE 1
#endif
#ifndef _CRT_NONSTDC_NO_WARNINGS
#define _CRT_NONSTDC_NO_WARNINGS 1
#endif
#ifndef _CRT_SECURE_NO_DEPRECATE
#define _CRT_SECURE_NO_DEPRECATE 1
#endif
#ifndef _CRT_SECURE_NO_WARNINGS
#define _CRT_SECURE_NO_WARNINGS 1
#endif
#endif
#if 0
#define LZO_0xffffUL 0xfffful
#define LZO_0xffffffffUL 0xfffffffful
#else
#define LZO_0xffffUL 65535ul
#define LZO_0xffffffffUL 4294967295ul
#endif
#define LZO_0xffffL LZO_0xffffUL
#define LZO_0xffffffffL LZO_0xffffffffUL
#if (LZO_0xffffL == LZO_0xffffffffL)
# error "your preprocessor is broken 1"
#endif
#if (16ul * 16384ul != 262144ul)
# error "your preprocessor is broken 2"
#endif
#if 0
#if (32767 >= 4294967295ul)
# error "your preprocessor is broken 3"
#endif
#if (65535u >= 4294967295ul)
# error "your preprocessor is broken 4"
#endif
#endif
#if defined(__COUNTER__)
# ifndef LZO_CFG_USE_COUNTER
# define LZO_CFG_USE_COUNTER 1
# endif
#else
# undef LZO_CFG_USE_COUNTER
#endif
#if (UINT_MAX == LZO_0xffffL)
#if defined(__ZTC__) && defined(__I86__) && !defined(__OS2__)
# if !defined(MSDOS)
# define MSDOS 1
# endif
# if !defined(_MSDOS)
# define _MSDOS 1
# endif
#elif 0 && defined(__VERSION) && defined(MB_LEN_MAX)
# if (__VERSION == 520) && (MB_LEN_MAX == 1)
# if !defined(__AZTEC_C__)
# define __AZTEC_C__ __VERSION
# endif
# if !defined(__DOS__)
# define __DOS__ 1
# endif
# endif
#endif
#endif
#if defined(_MSC_VER) && defined(M_I86HM) && (UINT_MAX == LZO_0xffffL)
# define ptrdiff_t long
# define _PTRDIFF_T_DEFINED 1
#endif
#if (UINT_MAX == LZO_0xffffL)
# undef __LZO_RENAME_A
# undef __LZO_RENAME_B
# if defined(__AZTEC_C__) && defined(__DOS__)
# define __LZO_RENAME_A 1
# elif defined(_MSC_VER) && defined(MSDOS)
# if (_MSC_VER < 600)
# define __LZO_RENAME_A 1
# elif (_MSC_VER < 700)
# define __LZO_RENAME_B 1
# endif
# elif defined(__TSC__) && defined(__OS2__)
# define __LZO_RENAME_A 1
# elif defined(__MSDOS__) && defined(__TURBOC__) && (__TURBOC__ < 0x0410)
# define __LZO_RENAME_A 1
# elif defined(__PACIFIC__) && defined(DOS)
# if !defined(__far)
# define __far far
# endif
# if !defined(__near)
# define __near near
# endif
# endif
# if defined(__LZO_RENAME_A)
# if !defined(__cdecl)
# define __cdecl cdecl
# endif
# if !defined(__far)
# define __far far
# endif
# if !defined(__huge)
# define __huge huge
# endif
# if !defined(__near)
# define __near near
# endif
# if !defined(__pascal)
# define __pascal pascal
# endif
# if !defined(__huge)
# define __huge huge
# endif
# elif defined(__LZO_RENAME_B)
# if !defined(__cdecl)
# define __cdecl _cdecl
# endif
# if !defined(__far)
# define __far _far
# endif
# if !defined(__huge)
# define __huge _huge
# endif
# if !defined(__near)
# define __near _near
# endif
# if !defined(__pascal)
# define __pascal _pascal
# endif
# elif (defined(__PUREC__) || defined(__TURBOC__)) && defined(__TOS__)
# if !defined(__cdecl)
# define __cdecl cdecl
# endif
# if !defined(__pascal)
# define __pascal pascal
# endif
# endif
# undef __LZO_RENAME_A
# undef __LZO_RENAME_B
#endif
#if (UINT_MAX == LZO_0xffffL)
#if defined(__AZTEC_C__) && defined(__DOS__)
# define LZO_BROKEN_CDECL_ALT_SYNTAX 1
#elif defined(_MSC_VER) && defined(MSDOS)
# if (_MSC_VER < 600)
# define LZO_BROKEN_INTEGRAL_CONSTANTS 1
# endif
# if (_MSC_VER < 700)
# define LZO_BROKEN_INTEGRAL_PROMOTION 1
# define LZO_BROKEN_SIZEOF 1
# endif
#elif defined(__PACIFIC__) && defined(DOS)
# define LZO_BROKEN_INTEGRAL_CONSTANTS 1
#elif defined(__TURBOC__) && defined(__MSDOS__)
# if (__TURBOC__ < 0x0150)
# define LZO_BROKEN_CDECL_ALT_SYNTAX 1
# define LZO_BROKEN_INTEGRAL_CONSTANTS 1
# define LZO_BROKEN_INTEGRAL_PROMOTION 1
# endif
# if (__TURBOC__ < 0x0200)
# define LZO_BROKEN_SIZEOF 1
# endif
# if (__TURBOC__ < 0x0400) && defined(__cplusplus)
# define LZO_BROKEN_CDECL_ALT_SYNTAX 1
# endif
#elif (defined(__PUREC__) || defined(__TURBOC__)) && defined(__TOS__)
# define LZO_BROKEN_CDECL_ALT_SYNTAX 1
# define LZO_BROKEN_SIZEOF 1
#endif
#endif
#if defined(__WATCOMC__) && (__WATCOMC__ < 900)
# define LZO_BROKEN_INTEGRAL_CONSTANTS 1
#endif
#if defined(_CRAY) && defined(_CRAY1)
# define LZO_BROKEN_SIGNED_RIGHT_SHIFT 1
#endif
#define LZO_PP_STRINGIZE(x) #x
#define LZO_PP_MACRO_EXPAND(x) LZO_PP_STRINGIZE(x)
#define LZO_PP_CONCAT0() /*empty*/
#define LZO_PP_CONCAT1(a) a
#define LZO_PP_CONCAT2(a,b) a ## b
#define LZO_PP_CONCAT3(a,b,c) a ## b ## c
#define LZO_PP_CONCAT4(a,b,c,d) a ## b ## c ## d
#define LZO_PP_CONCAT5(a,b,c,d,e) a ## b ## c ## d ## e
#define LZO_PP_CONCAT6(a,b,c,d,e,f) a ## b ## c ## d ## e ## f
#define LZO_PP_CONCAT7(a,b,c,d,e,f,g) a ## b ## c ## d ## e ## f ## g
#define LZO_PP_ECONCAT0() LZO_PP_CONCAT0()
#define LZO_PP_ECONCAT1(a) LZO_PP_CONCAT1(a)
#define LZO_PP_ECONCAT2(a,b) LZO_PP_CONCAT2(a,b)
#define LZO_PP_ECONCAT3(a,b,c) LZO_PP_CONCAT3(a,b,c)
#define LZO_PP_ECONCAT4(a,b,c,d) LZO_PP_CONCAT4(a,b,c,d)
#define LZO_PP_ECONCAT5(a,b,c,d,e) LZO_PP_CONCAT5(a,b,c,d,e)
#define LZO_PP_ECONCAT6(a,b,c,d,e,f) LZO_PP_CONCAT6(a,b,c,d,e,f)
#define LZO_PP_ECONCAT7(a,b,c,d,e,f,g) LZO_PP_CONCAT7(a,b,c,d,e,f,g)
#define LZO_PP_EMPTY /*empty*/
#define LZO_PP_EMPTY0() /*empty*/
#define LZO_PP_EMPTY1(a) /*empty*/
#define LZO_PP_EMPTY2(a,b) /*empty*/
#define LZO_PP_EMPTY3(a,b,c) /*empty*/
#define LZO_PP_EMPTY4(a,b,c,d) /*empty*/
#define LZO_PP_EMPTY5(a,b,c,d,e) /*empty*/
#define LZO_PP_EMPTY6(a,b,c,d,e,f) /*empty*/
#define LZO_PP_EMPTY7(a,b,c,d,e,f,g) /*empty*/
#if 1
#define LZO_CPP_STRINGIZE(x) #x
#define LZO_CPP_MACRO_EXPAND(x) LZO_CPP_STRINGIZE(x)
#define LZO_CPP_CONCAT2(a,b) a ## b
#define LZO_CPP_CONCAT3(a,b,c) a ## b ## c
#define LZO_CPP_CONCAT4(a,b,c,d) a ## b ## c ## d
#define LZO_CPP_CONCAT5(a,b,c,d,e) a ## b ## c ## d ## e
#define LZO_CPP_CONCAT6(a,b,c,d,e,f) a ## b ## c ## d ## e ## f
#define LZO_CPP_CONCAT7(a,b,c,d,e,f,g) a ## b ## c ## d ## e ## f ## g
#define LZO_CPP_ECONCAT2(a,b) LZO_CPP_CONCAT2(a,b)
#define LZO_CPP_ECONCAT3(a,b,c) LZO_CPP_CONCAT3(a,b,c)
#define LZO_CPP_ECONCAT4(a,b,c,d) LZO_CPP_CONCAT4(a,b,c,d)
#define LZO_CPP_ECONCAT5(a,b,c,d,e) LZO_CPP_CONCAT5(a,b,c,d,e)
#define LZO_CPP_ECONCAT6(a,b,c,d,e,f) LZO_CPP_CONCAT6(a,b,c,d,e,f)
#define LZO_CPP_ECONCAT7(a,b,c,d,e,f,g) LZO_CPP_CONCAT7(a,b,c,d,e,f,g)
#endif
#define __LZO_MASK_GEN(o,b) (((((o) << ((b)-!!(b))) - (o)) << 1) + (o)*!!(b))
#if 1 && defined(__cplusplus)
# if !defined(__STDC_CONSTANT_MACROS)
# define __STDC_CONSTANT_MACROS 1
# endif
# if !defined(__STDC_LIMIT_MACROS)
# define __STDC_LIMIT_MACROS 1
# endif
#endif
#if defined(__cplusplus)
# define LZO_EXTERN_C extern "C"
# define LZO_EXTERN_C_BEGIN extern "C" {
# define LZO_EXTERN_C_END }
#else
# define LZO_EXTERN_C extern
# define LZO_EXTERN_C_BEGIN /*empty*/
# define LZO_EXTERN_C_END /*empty*/
#endif
#if !defined(__LZO_OS_OVERRIDE)
#if (LZO_OS_FREESTANDING)
# define LZO_INFO_OS "freestanding"
#elif (LZO_OS_EMBEDDED)
# define LZO_INFO_OS "embedded"
#elif 1 && defined(__IAR_SYSTEMS_ICC__)
# define LZO_OS_EMBEDDED 1
# define LZO_INFO_OS "embedded"
#elif defined(__CYGWIN__) && defined(__GNUC__)
# define LZO_OS_CYGWIN 1
# define LZO_INFO_OS "cygwin"
#elif defined(__EMX__) && defined(__GNUC__)
# define LZO_OS_EMX 1
# define LZO_INFO_OS "emx"
#elif defined(__BEOS__)
# define LZO_OS_BEOS 1
# define LZO_INFO_OS "beos"
#elif defined(__Lynx__)
# define LZO_OS_LYNXOS 1
# define LZO_INFO_OS "lynxos"
#elif defined(__OS400__)
# define LZO_OS_OS400 1
# define LZO_INFO_OS "os400"
#elif defined(__QNX__)
# define LZO_OS_QNX 1
# define LZO_INFO_OS "qnx"
#elif defined(__BORLANDC__) && defined(__DPMI32__) && (__BORLANDC__ >= 0x0460)
# define LZO_OS_DOS32 1
# define LZO_INFO_OS "dos32"
#elif defined(__BORLANDC__) && defined(__DPMI16__)
# define LZO_OS_DOS16 1
# define LZO_INFO_OS "dos16"
#elif defined(__ZTC__) && defined(DOS386)
# define LZO_OS_DOS32 1
# define LZO_INFO_OS "dos32"
#elif defined(__OS2__) || defined(__OS2V2__)
# if (UINT_MAX == LZO_0xffffL)
# define LZO_OS_OS216 1
# define LZO_INFO_OS "os216"
# elif (UINT_MAX == LZO_0xffffffffL)
# define LZO_OS_OS2 1
# define LZO_INFO_OS "os2"
# else
# error "check your limits.h header"
# endif
#elif defined(__WIN64__) || defined(_WIN64) || defined(WIN64)
# define LZO_OS_WIN64 1
# define LZO_INFO_OS "win64"
#elif defined(__WIN32__) || defined(_WIN32) || defined(WIN32) || defined(__WINDOWS_386__)
# define LZO_OS_WIN32 1
# define LZO_INFO_OS "win32"
#elif defined(__MWERKS__) && defined(__INTEL__)
# define LZO_OS_WIN32 1
# define LZO_INFO_OS "win32"
#elif defined(__WINDOWS__) || defined(_WINDOWS) || defined(_Windows)
# if (UINT_MAX == LZO_0xffffL)
# define LZO_OS_WIN16 1
# define LZO_INFO_OS "win16"
# elif (UINT_MAX == LZO_0xffffffffL)
# define LZO_OS_WIN32 1
# define LZO_INFO_OS "win32"
# else
# error "check your limits.h header"
# endif
#elif defined(__DOS__) || defined(__MSDOS__) || defined(_MSDOS) || defined(MSDOS) || (defined(__PACIFIC__) && defined(DOS))
# if (UINT_MAX == LZO_0xffffL)
# define LZO_OS_DOS16 1
# define LZO_INFO_OS "dos16"
# elif (UINT_MAX == LZO_0xffffffffL)
# define LZO_OS_DOS32 1
# define LZO_INFO_OS "dos32"
# else
# error "check your limits.h header"
# endif
#elif defined(__WATCOMC__)
# if defined(__NT__) && (UINT_MAX == LZO_0xffffL)
# define LZO_OS_DOS16 1
# define LZO_INFO_OS "dos16"
# elif defined(__NT__) && (__WATCOMC__ < 1100)
# define LZO_OS_WIN32 1
# define LZO_INFO_OS "win32"
# elif defined(__linux__) || defined(__LINUX__)
# define LZO_OS_POSIX 1
# define LZO_INFO_OS "posix"
# else
# error "please specify a target using the -bt compiler option"
# endif
#elif defined(__palmos__)
# define LZO_OS_PALMOS 1
# define LZO_INFO_OS "palmos"
#elif defined(__TOS__) || defined(__atarist__)
# define LZO_OS_TOS 1
# define LZO_INFO_OS "tos"
#elif defined(macintosh) && !defined(__ppc__)
# define LZO_OS_MACCLASSIC 1
# define LZO_INFO_OS "macclassic"
#elif defined(__VMS)
# define LZO_OS_VMS 1
# define LZO_INFO_OS "vms"
#elif (defined(__mips__) && defined(__R5900__)) || defined(__MIPS_PSX2__)
# define LZO_OS_CONSOLE 1
# define LZO_OS_CONSOLE_PS2 1
# define LZO_INFO_OS "console"
# define LZO_INFO_OS_CONSOLE "ps2"
#elif defined(__mips__) && defined(__psp__)
# define LZO_OS_CONSOLE 1
# define LZO_OS_CONSOLE_PSP 1
# define LZO_INFO_OS "console"
# define LZO_INFO_OS_CONSOLE "psp"
#else
# define LZO_OS_POSIX 1
# define LZO_INFO_OS "posix"
#endif
#if (LZO_OS_POSIX)
# if defined(_AIX) || defined(__AIX__) || defined(__aix__)
# define LZO_OS_POSIX_AIX 1
# define LZO_INFO_OS_POSIX "aix"
# elif defined(__FreeBSD__)
# define LZO_OS_POSIX_FREEBSD 1
# define LZO_INFO_OS_POSIX "freebsd"
# elif defined(__hpux__) || defined(__hpux)
# define LZO_OS_POSIX_HPUX 1
# define LZO_INFO_OS_POSIX "hpux"
# elif defined(__INTERIX)
# define LZO_OS_POSIX_INTERIX 1
# define LZO_INFO_OS_POSIX "interix"
# elif defined(__IRIX__) || defined(__irix__)
# define LZO_OS_POSIX_IRIX 1
# define LZO_INFO_OS_POSIX "irix"
# elif defined(__linux__) || defined(__linux) || defined(__LINUX__)
# define LZO_OS_POSIX_LINUX 1
# define LZO_INFO_OS_POSIX "linux"
# elif defined(__APPLE__) && defined(__MACH__)
# if ((__ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__-0) >= 20000)
# define LZO_OS_POSIX_DARWIN 1040
# define LZO_INFO_OS_POSIX "darwin_iphone"
# elif ((__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__-0) >= 1040)
# define LZO_OS_POSIX_DARWIN __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__
# define LZO_INFO_OS_POSIX "darwin"
# else
# define LZO_OS_POSIX_DARWIN 1
# define LZO_INFO_OS_POSIX "darwin"
# endif
# define LZO_OS_POSIX_MACOSX LZO_OS_POSIX_DARWIN
# elif defined(__minix__) || defined(__minix)
# define LZO_OS_POSIX_MINIX 1
# define LZO_INFO_OS_POSIX "minix"
# elif defined(__NetBSD__)
# define LZO_OS_POSIX_NETBSD 1
# define LZO_INFO_OS_POSIX "netbsd"
# elif defined(__OpenBSD__)
# define LZO_OS_POSIX_OPENBSD 1
# define LZO_INFO_OS_POSIX "openbsd"
# elif defined(__osf__)
# define LZO_OS_POSIX_OSF 1
# define LZO_INFO_OS_POSIX "osf"
# elif defined(__solaris__) || defined(__sun)
# if defined(__SVR4) || defined(__svr4__)
# define LZO_OS_POSIX_SOLARIS 1
# define LZO_INFO_OS_POSIX "solaris"
# else
# define LZO_OS_POSIX_SUNOS 1
# define LZO_INFO_OS_POSIX "sunos"
# endif
# elif defined(__ultrix__) || defined(__ultrix)
# define LZO_OS_POSIX_ULTRIX 1
# define LZO_INFO_OS_POSIX "ultrix"
# elif defined(_UNICOS)
# define LZO_OS_POSIX_UNICOS 1
# define LZO_INFO_OS_POSIX "unicos"
# else
# define LZO_OS_POSIX_UNKNOWN 1
# define LZO_INFO_OS_POSIX "unknown"
# endif
#endif
#endif
#if (LZO_OS_DOS16 || LZO_OS_OS216 || LZO_OS_WIN16)
# if (UINT_MAX != LZO_0xffffL)
# error "unexpected configuration - check your compiler defines"
# endif
# if (ULONG_MAX != LZO_0xffffffffL)
# error "unexpected configuration - check your compiler defines"
# endif
#endif
#if (LZO_OS_DOS32 || LZO_OS_OS2 || LZO_OS_WIN32 || LZO_OS_WIN64)
# if (UINT_MAX != LZO_0xffffffffL)
# error "unexpected configuration - check your compiler defines"
# endif
# if (ULONG_MAX != LZO_0xffffffffL)
# error "unexpected configuration - check your compiler defines"
# endif
#endif
#if defined(CIL) && defined(_GNUCC) && defined(__GNUC__)
# define LZO_CC_CILLY 1
# define LZO_INFO_CC "Cilly"
# if defined(__CILLY__)
# define LZO_INFO_CCVER LZO_PP_MACRO_EXPAND(__CILLY__)
# else
# define LZO_INFO_CCVER "unknown"
# endif
#elif 0 && defined(SDCC) && defined(__VERSION__) && !defined(__GNUC__)
# define LZO_CC_SDCC 1
# define LZO_INFO_CC "sdcc"
# define LZO_INFO_CCVER LZO_PP_MACRO_EXPAND(SDCC)
#elif defined(__PATHSCALE__) && defined(__PATHCC_PATCHLEVEL__)
# define LZO_CC_PATHSCALE (__PATHCC__ * 0x10000L + (__PATHCC_MINOR__-0) * 0x100 + (__PATHCC_PATCHLEVEL__-0))
# define LZO_INFO_CC "Pathscale C"
# define LZO_INFO_CCVER __PATHSCALE__
# if defined(__GNUC__) && defined(__GNUC_MINOR__) && defined(__VERSION__)
# define LZO_CC_PATHSCALE_GNUC (__GNUC__ * 0x10000L + (__GNUC_MINOR__-0) * 0x100 + (__GNUC_PATCHLEVEL__-0))
# endif
#elif defined(__INTEL_COMPILER) && ((__INTEL_COMPILER-0) > 0)
# define LZO_CC_INTELC __INTEL_COMPILER
# define LZO_INFO_CC "Intel C"
# define LZO_INFO_CCVER LZO_PP_MACRO_EXPAND(__INTEL_COMPILER)
# if defined(_MSC_VER) && ((_MSC_VER-0) > 0)
# define LZO_CC_INTELC_MSC _MSC_VER
# elif defined(__GNUC__) && defined(__GNUC_MINOR__) && defined(__VERSION__)
# define LZO_CC_INTELC_GNUC (__GNUC__ * 0x10000L + (__GNUC_MINOR__-0) * 0x100 + (__GNUC_PATCHLEVEL__-0))
# endif
#elif defined(__POCC__) && defined(_WIN32)
# define LZO_CC_PELLESC 1
# define LZO_INFO_CC "Pelles C"
# define LZO_INFO_CCVER LZO_PP_MACRO_EXPAND(__POCC__)
#elif defined(__ARMCC_VERSION) && defined(__GNUC__) && defined(__GNUC_MINOR__) && defined(__VERSION__)
# if defined(__GNUC_PATCHLEVEL__)
# define LZO_CC_ARMCC_GNUC (__GNUC__ * 0x10000L + (__GNUC_MINOR__-0) * 0x100 + (__GNUC_PATCHLEVEL__-0))
# else
# define LZO_CC_ARMCC_GNUC (__GNUC__ * 0x10000L + (__GNUC_MINOR__-0) * 0x100)
# endif
# define LZO_CC_ARMCC __ARMCC_VERSION
# define LZO_INFO_CC "ARM C Compiler"
# define LZO_INFO_CCVER __VERSION__
#elif defined(__clang__) && defined(__llvm__) && defined(__VERSION__)
# if defined(__clang_major__) && defined(__clang_minor__) && defined(__clang_patchlevel__)
# define LZO_CC_CLANG (__clang_major__ * 0x10000L + (__clang_minor__-0) * 0x100 + (__clang_patchlevel__-0))
# else
# define LZO_CC_CLANG 0x010000L
# endif
# if defined(_MSC_VER) && ((_MSC_VER-0) > 0)
# define LZO_CC_CLANG_MSC _MSC_VER
# elif defined(__GNUC__) && defined(__GNUC_MINOR__) && defined(__VERSION__)
# define LZO_CC_CLANG_GNUC (__GNUC__ * 0x10000L + (__GNUC_MINOR__-0) * 0x100 + (__GNUC_PATCHLEVEL__-0))
# endif
# define LZO_INFO_CC "clang"
# define LZO_INFO_CCVER __VERSION__
#elif defined(__llvm__) && defined(__GNUC__) && defined(__GNUC_MINOR__) && defined(__VERSION__)
# if defined(__GNUC_PATCHLEVEL__)
# define LZO_CC_LLVM_GNUC (__GNUC__ * 0x10000L + (__GNUC_MINOR__-0) * 0x100 + (__GNUC_PATCHLEVEL__-0))
# else
# define LZO_CC_LLVM_GNUC (__GNUC__ * 0x10000L + (__GNUC_MINOR__-0) * 0x100)
# endif
# define LZO_CC_LLVM LZO_CC_LLVM_GNUC
# define LZO_INFO_CC "llvm-gcc"
# define LZO_INFO_CCVER __VERSION__
#elif defined(__ACK__) && defined(_ACK)
# define LZO_CC_ACK 1
# define LZO_INFO_CC "Amsterdam Compiler Kit C"
# define LZO_INFO_CCVER "unknown"
#elif defined(__ARMCC_VERSION) && !defined(__GNUC__)
# define LZO_CC_ARMCC __ARMCC_VERSION
# define LZO_CC_ARMCC_ARMCC __ARMCC_VERSION
# define LZO_INFO_CC "ARM C Compiler"
# define LZO_INFO_CCVER LZO_PP_MACRO_EXPAND(__ARMCC_VERSION)
#elif defined(__AZTEC_C__)
# define LZO_CC_AZTECC 1
# define LZO_INFO_CC "Aztec C"
# define LZO_INFO_CCVER LZO_PP_MACRO_EXPAND(__AZTEC_C__)
#elif defined(__CODEGEARC__)
# define LZO_CC_CODEGEARC 1
# define LZO_INFO_CC "CodeGear C"
# define LZO_INFO_CCVER LZO_PP_MACRO_EXPAND(__CODEGEARC__)
#elif defined(__BORLANDC__)
# define LZO_CC_BORLANDC 1
# define LZO_INFO_CC "Borland C"
# define LZO_INFO_CCVER LZO_PP_MACRO_EXPAND(__BORLANDC__)
#elif defined(_CRAYC) && defined(_RELEASE)
# define LZO_CC_CRAYC 1
# define LZO_INFO_CC "Cray C"
# define LZO_INFO_CCVER LZO_PP_MACRO_EXPAND(_RELEASE)
#elif defined(__DMC__) && defined(__SC__)
# define LZO_CC_DMC 1
# define LZO_INFO_CC "Digital Mars C"
# define LZO_INFO_CCVER LZO_PP_MACRO_EXPAND(__DMC__)
#elif defined(__DECC)
# define LZO_CC_DECC 1
# define LZO_INFO_CC "DEC C"
# define LZO_INFO_CCVER LZO_PP_MACRO_EXPAND(__DECC)
#elif (defined(__ghs) || defined(__ghs__)) && defined(__GHS_VERSION_NUMBER) && ((__GHS_VERSION_NUMBER-0) > 0)
# define LZO_CC_GHS 1
# define LZO_INFO_CC "Green Hills C"
# define LZO_INFO_CCVER LZO_PP_MACRO_EXPAND(__GHS_VERSION_NUMBER)
# if defined(_MSC_VER) && ((_MSC_VER-0) > 0)
# define LZO_CC_GHS_MSC _MSC_VER
# elif defined(__GNUC__) && defined(__GNUC_MINOR__) && defined(__VERSION__)
# define LZO_CC_GHS_GNUC (__GNUC__ * 0x10000L + (__GNUC_MINOR__-0) * 0x100 + (__GNUC_PATCHLEVEL__-0))
# endif
#elif defined(__HIGHC__)
# define LZO_CC_HIGHC 1
# define LZO_INFO_CC "MetaWare High C"
# define LZO_INFO_CCVER "unknown"
#elif defined(__HP_aCC) && ((__HP_aCC-0) > 0)
# define LZO_CC_HPACC __HP_aCC
# define LZO_INFO_CC "HP aCC"
# define LZO_INFO_CCVER LZO_PP_MACRO_EXPAND(__HP_aCC)
#elif defined(__IAR_SYSTEMS_ICC__)
# define LZO_CC_IARC 1
# define LZO_INFO_CC "IAR C"
# if defined(__VER__)
# define LZO_INFO_CCVER LZO_PP_MACRO_EXPAND(__VER__)
# else
# define LZO_INFO_CCVER "unknown"
# endif
#elif defined(__IBMC__) && ((__IBMC__-0) > 0)
# define LZO_CC_IBMC __IBMC__
# define LZO_INFO_CC "IBM C"
# define LZO_INFO_CCVER LZO_PP_MACRO_EXPAND(__IBMC__)
#elif defined(__IBMCPP__) && ((__IBMCPP__-0) > 0)
# define LZO_CC_IBMC __IBMCPP__
# define LZO_INFO_CC "IBM C"
# define LZO_INFO_CCVER LZO_PP_MACRO_EXPAND(__IBMCPP__)
#elif defined(__KEIL__) && defined(__C166__)
# define LZO_CC_KEILC 1
# define LZO_INFO_CC "Keil C"
# define LZO_INFO_CCVER LZO_PP_MACRO_EXPAND(__C166__)
#elif defined(__LCC__) && defined(_WIN32) && defined(__LCCOPTIMLEVEL)
# define LZO_CC_LCCWIN32 1
# define LZO_INFO_CC "lcc-win32"
# define LZO_INFO_CCVER "unknown"
#elif defined(__LCC__)
# define LZO_CC_LCC 1
# define LZO_INFO_CC "lcc"
# if defined(__LCC_VERSION__)
# define LZO_INFO_CCVER LZO_PP_MACRO_EXPAND(__LCC_VERSION__)
# else
# define LZO_INFO_CCVER "unknown"
# endif
#elif defined(__MWERKS__) && ((__MWERKS__-0) > 0)
# define LZO_CC_MWERKS __MWERKS__
# define LZO_INFO_CC "Metrowerks C"
# define LZO_INFO_CCVER LZO_PP_MACRO_EXPAND(__MWERKS__)
#elif (defined(__NDPC__) || defined(__NDPX__)) && defined(__i386)
# define LZO_CC_NDPC 1
# define LZO_INFO_CC "Microway NDP C"
# define LZO_INFO_CCVER "unknown"
#elif defined(__PACIFIC__)
# define LZO_CC_PACIFICC 1
# define LZO_INFO_CC "Pacific C"
# define LZO_INFO_CCVER LZO_PP_MACRO_EXPAND(__PACIFIC__)
#elif defined(__PGI) && defined(__PGIC__) && defined(__PGIC_MINOR__)
# if defined(__PGIC_PATCHLEVEL__)
# define LZO_CC_PGI (__PGIC__ * 0x10000L + (__PGIC_MINOR__-0) * 0x100 + (__PGIC_PATCHLEVEL__-0))
# define LZO_INFO_CCVER LZO_PP_MACRO_EXPAND(__PGIC__) "." LZO_PP_MACRO_EXPAND(__PGIC_MINOR__) "." LZO_PP_MACRO_EXPAND(__PGIC_PATCHLEVEL__)
# else
# define LZO_CC_PGI (__PGIC__ * 0x10000L + (__PGIC_MINOR__-0) * 0x100)
# define LZO_INFO_CCVER LZO_PP_MACRO_EXPAND(__PGIC__) "." LZO_PP_MACRO_EXPAND(__PGIC_MINOR__) ".0"
# endif
# define LZO_INFO_CC "Portland Group PGI C"
#elif defined(__PGI) && (defined(__linux__) || defined(__WIN32__))
# define LZO_CC_PGI 1
# define LZO_INFO_CC "Portland Group PGI C"
# define LZO_INFO_CCVER "unknown"
#elif defined(__PUREC__) && defined(__TOS__)
# define LZO_CC_PUREC 1
# define LZO_INFO_CC "Pure C"
# define LZO_INFO_CCVER LZO_PP_MACRO_EXPAND(__PUREC__)
#elif defined(__SC__) && defined(__ZTC__)
# define LZO_CC_SYMANTECC 1
# define LZO_INFO_CC "Symantec C"
# define LZO_INFO_CCVER LZO_PP_MACRO_EXPAND(__SC__)
#elif defined(__SUNPRO_C)
# define LZO_INFO_CC "SunPro C"
# if ((__SUNPRO_C-0) > 0)
# define LZO_CC_SUNPROC __SUNPRO_C
# define LZO_INFO_CCVER LZO_PP_MACRO_EXPAND(__SUNPRO_C)
# else
# define LZO_CC_SUNPROC 1
# define LZO_INFO_CCVER "unknown"
# endif
#elif defined(__SUNPRO_CC)
# define LZO_INFO_CC "SunPro C"
# if ((__SUNPRO_CC-0) > 0)
# define LZO_CC_SUNPROC __SUNPRO_CC
# define LZO_INFO_CCVER LZO_PP_MACRO_EXPAND(__SUNPRO_CC)
# else
# define LZO_CC_SUNPROC 1
# define LZO_INFO_CCVER "unknown"
# endif
#elif defined(__TINYC__)
# define LZO_CC_TINYC 1
# define LZO_INFO_CC "Tiny C"
# define LZO_INFO_CCVER LZO_PP_MACRO_EXPAND(__TINYC__)
#elif defined(__TSC__)
# define LZO_CC_TOPSPEEDC 1
# define LZO_INFO_CC "TopSpeed C"
# define LZO_INFO_CCVER LZO_PP_MACRO_EXPAND(__TSC__)
#elif defined(__WATCOMC__)
# define LZO_CC_WATCOMC 1
# define LZO_INFO_CC "Watcom C"
# define LZO_INFO_CCVER LZO_PP_MACRO_EXPAND(__WATCOMC__)
#elif defined(__TURBOC__)
# define LZO_CC_TURBOC 1
# define LZO_INFO_CC "Turbo C"
# define LZO_INFO_CCVER LZO_PP_MACRO_EXPAND(__TURBOC__)
#elif defined(__ZTC__)
# define LZO_CC_ZORTECHC 1
# define LZO_INFO_CC "Zortech C"
# if ((__ZTC__-0) == 0x310)
# define LZO_INFO_CCVER "0x310"
# else
# define LZO_INFO_CCVER LZO_PP_MACRO_EXPAND(__ZTC__)
# endif
#elif defined(__GNUC__) && defined(__VERSION__)
# if defined(__GNUC_MINOR__) && defined(__GNUC_PATCHLEVEL__)
# define LZO_CC_GNUC (__GNUC__ * 0x10000L + (__GNUC_MINOR__-0) * 0x100 + (__GNUC_PATCHLEVEL__-0))
# elif defined(__GNUC_MINOR__)
# define LZO_CC_GNUC (__GNUC__ * 0x10000L + (__GNUC_MINOR__-0) * 0x100)
# else
# define LZO_CC_GNUC (__GNUC__ * 0x10000L)
# endif
# define LZO_INFO_CC "gcc"
# define LZO_INFO_CCVER __VERSION__
#elif defined(_MSC_VER) && ((_MSC_VER-0) > 0)
# define LZO_CC_MSC _MSC_VER
# define LZO_INFO_CC "Microsoft C"
# if defined(_MSC_FULL_VER)
# define LZO_INFO_CCVER LZO_PP_MACRO_EXPAND(_MSC_VER) "." LZO_PP_MACRO_EXPAND(_MSC_FULL_VER)
# else
# define LZO_INFO_CCVER LZO_PP_MACRO_EXPAND(_MSC_VER)
# endif
#else
# define LZO_CC_UNKNOWN 1
# define LZO_INFO_CC "unknown"
# define LZO_INFO_CCVER "unknown"
#endif
#if (LZO_CC_GNUC) && defined(__OPEN64__)
# if defined(__OPENCC__) && defined(__OPENCC_MINOR__) && defined(__OPENCC_PATCHLEVEL__)
# define LZO_CC_OPEN64 (__OPENCC__ * 0x10000L + (__OPENCC_MINOR__-0) * 0x100 + (__OPENCC_PATCHLEVEL__-0))
# define LZO_CC_OPEN64_GNUC LZO_CC_GNUC
# endif
#endif
#if (LZO_CC_GNUC) && defined(__PCC__)
# if defined(__PCC__) && defined(__PCC_MINOR__) && defined(__PCC_MINORMINOR__)
# define LZO_CC_PCC (__PCC__ * 0x10000L + (__PCC_MINOR__-0) * 0x100 + (__PCC_MINORMINOR__-0))
# define LZO_CC_PCC_GNUC LZO_CC_GNUC
# endif
#endif
#if 0 && (LZO_CC_MSC && (_MSC_VER >= 1200)) && !defined(_MSC_FULL_VER)
# error "LZO_CC_MSC: _MSC_FULL_VER is not defined"
#endif
#if !defined(__LZO_ARCH_OVERRIDE) && !(LZO_ARCH_GENERIC) && defined(_CRAY)
# if (UINT_MAX > LZO_0xffffffffL) && defined(_CRAY)
# if defined(_CRAYMPP) || defined(_CRAYT3D) || defined(_CRAYT3E)
# define LZO_ARCH_CRAY_MPP 1
# elif defined(_CRAY1)
# define LZO_ARCH_CRAY_PVP 1
# endif
# endif
#endif
#if !defined(__LZO_ARCH_OVERRIDE)
#if (LZO_ARCH_GENERIC)
# define LZO_INFO_ARCH "generic"
#elif (LZO_OS_DOS16 || LZO_OS_OS216 || LZO_OS_WIN16)
# define LZO_ARCH_I086 1
# define LZO_INFO_ARCH "i086"
#elif defined(__aarch64__) || defined(_M_ARM64)
# define LZO_ARCH_ARM64 1
# define LZO_INFO_ARCH "arm64"
#elif defined(__alpha__) || defined(__alpha) || defined(_M_ALPHA)
# define LZO_ARCH_ALPHA 1
# define LZO_INFO_ARCH "alpha"
#elif (LZO_ARCH_CRAY_MPP) && (defined(_CRAYT3D) || defined(_CRAYT3E))
# define LZO_ARCH_ALPHA 1
# define LZO_INFO_ARCH "alpha"
#elif defined(__amd64__) || defined(__x86_64__) || defined(_M_AMD64)
# define LZO_ARCH_AMD64 1
# define LZO_INFO_ARCH "amd64"
#elif defined(__thumb__) || (defined(_M_ARM) && defined(_M_THUMB))
# define LZO_ARCH_ARM 1
# define LZO_ARCH_ARM_THUMB 1
# define LZO_INFO_ARCH "arm_thumb"
#elif defined(__IAR_SYSTEMS_ICC__) && defined(__ICCARM__)
# define LZO_ARCH_ARM 1
# if defined(__CPU_MODE__) && ((__CPU_MODE__-0) == 1)
# define LZO_ARCH_ARM_THUMB 1
# define LZO_INFO_ARCH "arm_thumb"
# elif defined(__CPU_MODE__) && ((__CPU_MODE__-0) == 2)
# define LZO_INFO_ARCH "arm"
# else
# define LZO_INFO_ARCH "arm"
# endif
#elif defined(__arm__) || defined(_M_ARM)
# define LZO_ARCH_ARM 1
# define LZO_INFO_ARCH "arm"
#elif (UINT_MAX <= LZO_0xffffL) && defined(__AVR__)
# define LZO_ARCH_AVR 1
# define LZO_INFO_ARCH "avr"
#elif defined(__avr32__) || defined(__AVR32__)
# define LZO_ARCH_AVR32 1
# define LZO_INFO_ARCH "avr32"
#elif defined(__bfin__)
# define LZO_ARCH_BLACKFIN 1
# define LZO_INFO_ARCH "blackfin"
#elif (UINT_MAX == LZO_0xffffL) && defined(__C166__)
# define LZO_ARCH_C166 1
# define LZO_INFO_ARCH "c166"
#elif defined(__cris__)
# define LZO_ARCH_CRIS 1
# define LZO_INFO_ARCH "cris"
#elif defined(__IAR_SYSTEMS_ICC__) && defined(__ICCEZ80__)
# define LZO_ARCH_EZ80 1
# define LZO_INFO_ARCH "ez80"
#elif defined(__H8300__) || defined(__H8300H__) || defined(__H8300S__) || defined(__H8300SX__)
# define LZO_ARCH_H8300 1
# define LZO_INFO_ARCH "h8300"
#elif defined(__hppa__) || defined(__hppa)
# define LZO_ARCH_HPPA 1
# define LZO_INFO_ARCH "hppa"
#elif defined(__386__) || defined(__i386__) || defined(__i386) || defined(_M_IX86) || defined(_M_I386)
# define LZO_ARCH_I386 1
# define LZO_ARCH_IA32 1
# define LZO_INFO_ARCH "i386"
#elif (LZO_CC_ZORTECHC && defined(__I86__))
# define LZO_ARCH_I386 1
# define LZO_ARCH_IA32 1
# define LZO_INFO_ARCH "i386"
#elif (LZO_OS_DOS32 && LZO_CC_HIGHC) && defined(_I386)
# define LZO_ARCH_I386 1
# define LZO_ARCH_IA32 1
# define LZO_INFO_ARCH "i386"
#elif defined(__ia64__) || defined(__ia64) || defined(_M_IA64)
# define LZO_ARCH_IA64 1
# define LZO_INFO_ARCH "ia64"
#elif (UINT_MAX == LZO_0xffffL) && defined(__m32c__)
# define LZO_ARCH_M16C 1
# define LZO_INFO_ARCH "m16c"
#elif defined(__IAR_SYSTEMS_ICC__) && defined(__ICCM16C__)
# define LZO_ARCH_M16C 1
# define LZO_INFO_ARCH "m16c"
#elif defined(__m32r__)
# define LZO_ARCH_M32R 1
# define LZO_INFO_ARCH "m32r"
#elif (LZO_OS_TOS) || defined(__m68k__) || defined(__m68000__) || defined(__mc68000__) || defined(__mc68020__) || defined(_M_M68K)
# define LZO_ARCH_M68K 1
# define LZO_INFO_ARCH "m68k"
#elif (UINT_MAX == LZO_0xffffL) && defined(__C251__)
# define LZO_ARCH_MCS251 1
# define LZO_INFO_ARCH "mcs251"
#elif (UINT_MAX == LZO_0xffffL) && defined(__C51__)
# define LZO_ARCH_MCS51 1
# define LZO_INFO_ARCH "mcs51"
#elif defined(__IAR_SYSTEMS_ICC__) && defined(__ICC8051__)
# define LZO_ARCH_MCS51 1
# define LZO_INFO_ARCH "mcs51"
#elif defined(__mips__) || defined(__mips) || defined(_MIPS_ARCH) || defined(_M_MRX000)
# define LZO_ARCH_MIPS 1
# define LZO_INFO_ARCH "mips"
#elif (UINT_MAX == LZO_0xffffL) && defined(__MSP430__)
# define LZO_ARCH_MSP430 1
# define LZO_INFO_ARCH "msp430"
#elif defined(__IAR_SYSTEMS_ICC__) && defined(__ICC430__)
# define LZO_ARCH_MSP430 1
# define LZO_INFO_ARCH "msp430"
#elif defined(__powerpc__) || defined(__powerpc) || defined(__ppc__) || defined(__PPC__) || defined(_M_PPC) || defined(_ARCH_PPC) || defined(_ARCH_PWR)
# define LZO_ARCH_POWERPC 1
# define LZO_INFO_ARCH "powerpc"
#elif defined(__s390__) || defined(__s390) || defined(__s390x__) || defined(__s390x)
# define LZO_ARCH_S390 1
# define LZO_INFO_ARCH "s390"
#elif defined(__sh__) || defined(_M_SH)
# define LZO_ARCH_SH 1
# define LZO_INFO_ARCH "sh"
#elif defined(__sparc__) || defined(__sparc) || defined(__sparcv8)
# define LZO_ARCH_SPARC 1
# define LZO_INFO_ARCH "sparc"
#elif defined(__SPU__)
# define LZO_ARCH_SPU 1
# define LZO_INFO_ARCH "spu"
#elif (UINT_MAX == LZO_0xffffL) && defined(__z80)
# define LZO_ARCH_Z80 1
# define LZO_INFO_ARCH "z80"
#elif (LZO_ARCH_CRAY_PVP)
# if defined(_CRAYSV1)
# define LZO_ARCH_CRAY_SV1 1
# define LZO_INFO_ARCH "cray_sv1"
# elif (_ADDR64)
# define LZO_ARCH_CRAY_T90 1
# define LZO_INFO_ARCH "cray_t90"
# elif (_ADDR32)
# define LZO_ARCH_CRAY_YMP 1
# define LZO_INFO_ARCH "cray_ymp"
# else
# define LZO_ARCH_CRAY_XMP 1
# define LZO_INFO_ARCH "cray_xmp"
# endif
#else
# define LZO_ARCH_UNKNOWN 1
# define LZO_INFO_ARCH "unknown"
#endif
#endif
#if 1 && (LZO_ARCH_UNKNOWN) && (LZO_OS_DOS32 || LZO_OS_OS2)
# error "FIXME - missing define for CPU architecture"
#endif
#if 1 && (LZO_ARCH_UNKNOWN) && (LZO_OS_WIN32)
# error "FIXME - missing LZO_OS_WIN32 define for CPU architecture"
#endif
#if 1 && (LZO_ARCH_UNKNOWN) && (LZO_OS_WIN64)
# error "FIXME - missing LZO_OS_WIN64 define for CPU architecture"
#endif
#if (LZO_OS_OS216 || LZO_OS_WIN16)
# define LZO_ARCH_I086PM 1
#elif 1 && (LZO_OS_DOS16 && defined(BLX286))
# define LZO_ARCH_I086PM 1
#elif 1 && (LZO_OS_DOS16 && defined(DOSX286))
# define LZO_ARCH_I086PM 1
#elif 1 && (LZO_OS_DOS16 && LZO_CC_BORLANDC && defined(__DPMI16__))
# define LZO_ARCH_I086PM 1
#endif
#if (LZO_ARCH_AMD64 && !LZO_ARCH_X64)
# define LZO_ARCH_X64 1
#elif (!LZO_ARCH_AMD64 && LZO_ARCH_X64) && defined(__LZO_ARCH_OVERRIDE)
# define LZO_ARCH_AMD64 1
#endif
#if (LZO_ARCH_ARM64 && !LZO_ARCH_AARCH64)
# define LZO_ARCH_AARCH64 1
#elif (!LZO_ARCH_ARM64 && LZO_ARCH_AARCH64) && defined(__LZO_ARCH_OVERRIDE)
# define LZO_ARCH_ARM64 1
#endif
#if (LZO_ARCH_I386 && !LZO_ARCH_X86)
# define LZO_ARCH_X86 1
#elif (!LZO_ARCH_I386 && LZO_ARCH_X86) && defined(__LZO_ARCH_OVERRIDE)
# define LZO_ARCH_I386 1
#endif
#if (LZO_ARCH_AMD64 && !LZO_ARCH_X64) || (!LZO_ARCH_AMD64 && LZO_ARCH_X64)
# error "unexpected configuration - check your compiler defines"
#endif
#if (LZO_ARCH_ARM64 && !LZO_ARCH_AARCH64) || (!LZO_ARCH_ARM64 && LZO_ARCH_AARCH64)
# error "unexpected configuration - check your compiler defines"
#endif
#if (LZO_ARCH_I386 && !LZO_ARCH_X86) || (!LZO_ARCH_I386 && LZO_ARCH_X86)
# error "unexpected configuration - check your compiler defines"
#endif
#if (LZO_ARCH_ARM_THUMB && !LZO_ARCH_ARM)
# error "unexpected configuration - check your compiler defines"
#endif
#if (LZO_ARCH_ARM_THUMB1 && !LZO_ARCH_ARM_THUMB)
# error "unexpected configuration - check your compiler defines"
#endif
#if (LZO_ARCH_ARM_THUMB2 && !LZO_ARCH_ARM_THUMB)
# error "unexpected configuration - check your compiler defines"
#endif
#if (LZO_ARCH_ARM_THUMB1 && LZO_ARCH_ARM_THUMB2)
# error "unexpected configuration - check your compiler defines"
#endif
#if (LZO_ARCH_I086PM && !LZO_ARCH_I086)
# error "unexpected configuration - check your compiler defines"
#endif
#if (LZO_ARCH_I086)
# if (UINT_MAX != LZO_0xffffL)
# error "unexpected configuration - check your compiler defines"
# endif
# if (ULONG_MAX != LZO_0xffffffffL)
# error "unexpected configuration - check your compiler defines"
# endif
#endif
#if (LZO_ARCH_I386)
# if (UINT_MAX != LZO_0xffffL) && defined(__i386_int16__)
# error "unexpected configuration - check your compiler defines"
# endif
# if (UINT_MAX != LZO_0xffffffffL) && !defined(__i386_int16__)
# error "unexpected configuration - check your compiler defines"
# endif
# if (ULONG_MAX != LZO_0xffffffffL)
# error "unexpected configuration - check your compiler defines"
# endif
#endif
#if (LZO_ARCH_AMD64 || LZO_ARCH_I386)
# if !defined(LZO_TARGET_FEATURE_SSE2)
# if defined(__SSE2__)
# define LZO_TARGET_FEATURE_SSE2 1
# elif defined(_MSC_VER) && ((defined(_M_IX86_FP) && ((_M_IX86_FP)+0 >= 2)) || defined(_M_AMD64))
# define LZO_TARGET_FEATURE_SSE2 1
# endif
# endif
# if !defined(LZO_TARGET_FEATURE_SSSE3)
# if (LZO_TARGET_FEATURE_SSE2)
# if defined(__SSSE3__)
# define LZO_TARGET_FEATURE_SSSE3 1
# elif defined(_MSC_VER) && defined(__AVX__)
# define LZO_TARGET_FEATURE_SSSE3 1
# endif
# endif
# endif
# if !defined(LZO_TARGET_FEATURE_SSE4_2)
# if (LZO_TARGET_FEATURE_SSSE3)
# if defined(__SSE4_2__)
# define LZO_TARGET_FEATURE_SSE4_2 1
# endif
# endif
# endif
# if !defined(LZO_TARGET_FEATURE_AVX)
# if (LZO_TARGET_FEATURE_SSSE3)
# if defined(__AVX__)
# define LZO_TARGET_FEATURE_AVX 1
# endif
# endif
# endif
# if !defined(LZO_TARGET_FEATURE_AVX2)
# if (LZO_TARGET_FEATURE_AVX)
# if defined(__AVX2__)
# define LZO_TARGET_FEATURE_AVX2 1
# endif
# endif
# endif
#endif
#if (LZO_TARGET_FEATURE_SSSE3 && !(LZO_TARGET_FEATURE_SSE2))
# error "unexpected configuration - check your compiler defines"
#endif
#if (LZO_TARGET_FEATURE_SSE4_2 && !(LZO_TARGET_FEATURE_SSSE3))
# error "unexpected configuration - check your compiler defines"
#endif
#if (LZO_TARGET_FEATURE_AVX && !(LZO_TARGET_FEATURE_SSSE3))
# error "unexpected configuration - check your compiler defines"
#endif
#if (LZO_TARGET_FEATURE_AVX2 && !(LZO_TARGET_FEATURE_AVX))
# error "unexpected configuration - check your compiler defines"
#endif
#if (LZO_ARCH_ARM)
# if !defined(LZO_TARGET_FEATURE_NEON)
# if defined(__ARM_NEON__)
# define LZO_TARGET_FEATURE_NEON 1
# endif
# endif
#elif (LZO_ARCH_ARM64)
# if !defined(LZO_TARGET_FEATURE_NEON)
# if 1
# define LZO_TARGET_FEATURE_NEON 1
# endif
# endif
#endif
#if 0
#elif !defined(__LZO_MM_OVERRIDE)
#if (LZO_ARCH_I086)
#if (UINT_MAX != LZO_0xffffL)
# error "unexpected configuration - check your compiler defines"
#endif
#if defined(__TINY__) || defined(M_I86TM) || defined(_M_I86TM)
# define LZO_MM_TINY 1
#elif defined(__HUGE__) || defined(_HUGE_) || defined(M_I86HM) || defined(_M_I86HM)
# define LZO_MM_HUGE 1
#elif defined(__SMALL__) || defined(M_I86SM) || defined(_M_I86SM) || defined(SMALL_MODEL)
# define LZO_MM_SMALL 1
#elif defined(__MEDIUM__) || defined(M_I86MM) || defined(_M_I86MM)
# define LZO_MM_MEDIUM 1
#elif defined(__COMPACT__) || defined(M_I86CM) || defined(_M_I86CM)
# define LZO_MM_COMPACT 1
#elif defined(__LARGE__) || defined(M_I86LM) || defined(_M_I86LM) || defined(LARGE_MODEL)
# define LZO_MM_LARGE 1
#elif (LZO_CC_AZTECC)
# if defined(_LARGE_CODE) && defined(_LARGE_DATA)
# define LZO_MM_LARGE 1
# elif defined(_LARGE_CODE)
# define LZO_MM_MEDIUM 1
# elif defined(_LARGE_DATA)
# define LZO_MM_COMPACT 1
# else
# define LZO_MM_SMALL 1
# endif
#elif (LZO_CC_ZORTECHC && defined(__VCM__))
# define LZO_MM_LARGE 1
#else
# error "unknown LZO_ARCH_I086 memory model"
#endif
#if (LZO_OS_DOS16 || LZO_OS_OS216 || LZO_OS_WIN16)
#define LZO_HAVE_MM_HUGE_PTR 1
#define LZO_HAVE_MM_HUGE_ARRAY 1
#if (LZO_MM_TINY)
# undef LZO_HAVE_MM_HUGE_ARRAY
#endif
#if (LZO_CC_AZTECC || LZO_CC_PACIFICC || LZO_CC_ZORTECHC)
# undef LZO_HAVE_MM_HUGE_PTR
# undef LZO_HAVE_MM_HUGE_ARRAY
#elif (LZO_CC_DMC || LZO_CC_SYMANTECC)
# undef LZO_HAVE_MM_HUGE_ARRAY
#elif (LZO_CC_MSC && defined(_QC))
# undef LZO_HAVE_MM_HUGE_ARRAY
# if (_MSC_VER < 600)
# undef LZO_HAVE_MM_HUGE_PTR
# endif
#elif (LZO_CC_TURBOC && (__TURBOC__ < 0x0295))
# undef LZO_HAVE_MM_HUGE_ARRAY
#endif
#if (LZO_ARCH_I086PM) && !(LZO_HAVE_MM_HUGE_PTR)
# if (LZO_OS_DOS16)
# error "unexpected configuration - check your compiler defines"
# elif (LZO_CC_ZORTECHC)
# else
# error "unexpected configuration - check your compiler defines"
# endif
#endif
#ifdef __cplusplus
extern "C" {
#endif
#if (LZO_CC_BORLANDC && (__BORLANDC__ >= 0x0200))
extern void __near __cdecl _AHSHIFT(void);
# define LZO_MM_AHSHIFT ((unsigned) _AHSHIFT)
#elif (LZO_CC_DMC || LZO_CC_SYMANTECC || LZO_CC_ZORTECHC)
extern void __near __cdecl _AHSHIFT(void);
# define LZO_MM_AHSHIFT ((unsigned) _AHSHIFT)
#elif (LZO_CC_MSC || LZO_CC_TOPSPEEDC)
extern void __near __cdecl _AHSHIFT(void);
# define LZO_MM_AHSHIFT ((unsigned) _AHSHIFT)
#elif (LZO_CC_TURBOC && (__TURBOC__ >= 0x0295))
extern void __near __cdecl _AHSHIFT(void);
# define LZO_MM_AHSHIFT ((unsigned) _AHSHIFT)
#elif ((LZO_CC_AZTECC || LZO_CC_PACIFICC || LZO_CC_TURBOC) && LZO_OS_DOS16)
# define LZO_MM_AHSHIFT 12
#elif (LZO_CC_WATCOMC)
extern unsigned char _HShift;
# define LZO_MM_AHSHIFT ((unsigned) _HShift)
#else
# error "FIXME - implement LZO_MM_AHSHIFT"
#endif
#ifdef __cplusplus
}
#endif
#endif
#elif (LZO_ARCH_C166)
#if !defined(__MODEL__)
# error "FIXME - LZO_ARCH_C166 __MODEL__"
#elif ((__MODEL__) == 0)
# define LZO_MM_SMALL 1
#elif ((__MODEL__) == 1)
# define LZO_MM_SMALL 1
#elif ((__MODEL__) == 2)
# define LZO_MM_LARGE 1
#elif ((__MODEL__) == 3)
# define LZO_MM_TINY 1
#elif ((__MODEL__) == 4)
# define LZO_MM_XTINY 1
#elif ((__MODEL__) == 5)
# define LZO_MM_XSMALL 1
#else
# error "FIXME - LZO_ARCH_C166 __MODEL__"
#endif
#elif (LZO_ARCH_MCS251)
#if !defined(__MODEL__)
# error "FIXME - LZO_ARCH_MCS251 __MODEL__"
#elif ((__MODEL__) == 0)
# define LZO_MM_SMALL 1
#elif ((__MODEL__) == 2)
# define LZO_MM_LARGE 1
#elif ((__MODEL__) == 3)
# define LZO_MM_TINY 1
#elif ((__MODEL__) == 4)
# define LZO_MM_XTINY 1
#elif ((__MODEL__) == 5)
# define LZO_MM_XSMALL 1
#else
# error "FIXME - LZO_ARCH_MCS251 __MODEL__"
#endif
#elif (LZO_ARCH_MCS51)
#if !defined(__MODEL__)
# error "FIXME - LZO_ARCH_MCS51 __MODEL__"
#elif ((__MODEL__) == 1)
# define LZO_MM_SMALL 1
#elif ((__MODEL__) == 2)
# define LZO_MM_LARGE 1
#elif ((__MODEL__) == 3)
# define LZO_MM_TINY 1
#elif ((__MODEL__) == 4)
# define LZO_MM_XTINY 1
#elif ((__MODEL__) == 5)
# define LZO_MM_XSMALL 1
#else
# error "FIXME - LZO_ARCH_MCS51 __MODEL__"
#endif
#elif (LZO_ARCH_CRAY_PVP)
# define LZO_MM_PVP 1
#else
# define LZO_MM_FLAT 1
#endif
#if (LZO_MM_COMPACT)
# define LZO_INFO_MM "compact"
#elif (LZO_MM_FLAT)
# define LZO_INFO_MM "flat"
#elif (LZO_MM_HUGE)
# define LZO_INFO_MM "huge"
#elif (LZO_MM_LARGE)
# define LZO_INFO_MM "large"
#elif (LZO_MM_MEDIUM)
# define LZO_INFO_MM "medium"
#elif (LZO_MM_PVP)
# define LZO_INFO_MM "pvp"
#elif (LZO_MM_SMALL)
# define LZO_INFO_MM "small"
#elif (LZO_MM_TINY)
# define LZO_INFO_MM "tiny"
#else
# error "unknown memory model"
#endif
#endif
#if !defined(__lzo_gnuc_extension__)
#if (LZO_CC_GNUC >= 0x020800ul)
# define __lzo_gnuc_extension__ __extension__
#elif (LZO_CC_ARMCC_GNUC || LZO_CC_CLANG || LZO_CC_LLVM || LZO_CC_PATHSCALE)
# define __lzo_gnuc_extension__ __extension__
#elif (LZO_CC_IBMC >= 600)
# define __lzo_gnuc_extension__ __extension__
#else
#endif
#endif
#if !defined(__lzo_gnuc_extension__)
# define __lzo_gnuc_extension__ /*empty*/
#endif
#if !defined(LZO_CFG_USE_NEW_STYLE_CASTS) && defined(__cplusplus) && 0
# if (LZO_CC_GNUC && (LZO_CC_GNUC < 0x020800ul))
# define LZO_CFG_USE_NEW_STYLE_CASTS 0
# elif (LZO_CC_INTELC && (__INTEL_COMPILER < 1200))
# define LZO_CFG_USE_NEW_STYLE_CASTS 0
# else
# define LZO_CFG_USE_NEW_STYLE_CASTS 1
# endif
#endif
#if !defined(LZO_CFG_USE_NEW_STYLE_CASTS)
# define LZO_CFG_USE_NEW_STYLE_CASTS 0
#endif
#if !defined(__cplusplus)
# if defined(LZO_CFG_USE_NEW_STYLE_CASTS)
# undef LZO_CFG_USE_NEW_STYLE_CASTS
# endif
# define LZO_CFG_USE_NEW_STYLE_CASTS 0
#endif
#if !defined(LZO_REINTERPRET_CAST)
# if (LZO_CFG_USE_NEW_STYLE_CASTS)
# define LZO_REINTERPRET_CAST(t,e) (reinterpret_cast<t> (e))
# endif
#endif
#if !defined(LZO_REINTERPRET_CAST)
# define LZO_REINTERPRET_CAST(t,e) ((t) (e))
#endif
#if !defined(LZO_STATIC_CAST)
# if (LZO_CFG_USE_NEW_STYLE_CASTS)
# define LZO_STATIC_CAST(t,e) (static_cast<t> (e))
# endif
#endif
#if !defined(LZO_STATIC_CAST)
# define LZO_STATIC_CAST(t,e) ((t) (e))
#endif
#if !defined(LZO_STATIC_CAST2)
# define LZO_STATIC_CAST2(t1,t2,e) LZO_STATIC_CAST(t1, LZO_STATIC_CAST(t2, e))
#endif
#if !defined(LZO_UNCONST_CAST)
# if (LZO_CFG_USE_NEW_STYLE_CASTS)
# define LZO_UNCONST_CAST(t,e) (const_cast<t> (e))
# elif (LZO_HAVE_MM_HUGE_PTR)
# define LZO_UNCONST_CAST(t,e) ((t) (e))
# elif (LZO_CC_ARMCC_GNUC || LZO_CC_CLANG || LZO_CC_GNUC || LZO_CC_LLVM || LZO_CC_PATHSCALE)
# define LZO_UNCONST_CAST(t,e) ((t) ((void *) ((lzo_uintptr_t) ((const void *) (e)))))
# endif
#endif
#if !defined(LZO_UNCONST_CAST)
# define LZO_UNCONST_CAST(t,e) ((t) ((void *) ((const void *) (e))))
#endif
#if !defined(LZO_UNCONST_VOLATILE_CAST)
# if (LZO_CFG_USE_NEW_STYLE_CASTS)
# define LZO_UNCONST_VOLATILE_CAST(t,e) (const_cast<t> (e))
# elif (LZO_HAVE_MM_HUGE_PTR)
# define LZO_UNCONST_VOLATILE_CAST(t,e) ((t) (e))
# elif (LZO_CC_ARMCC_GNUC || LZO_CC_CLANG || LZO_CC_GNUC || LZO_CC_LLVM || LZO_CC_PATHSCALE)
# define LZO_UNCONST_VOLATILE_CAST(t,e) ((t) ((volatile void *) ((lzo_uintptr_t) ((volatile const void *) (e)))))
# endif
#endif
#if !defined(LZO_UNCONST_VOLATILE_CAST)
# define LZO_UNCONST_VOLATILE_CAST(t,e) ((t) ((volatile void *) ((volatile const void *) (e))))
#endif
#if !defined(LZO_UNVOLATILE_CAST)
# if (LZO_CFG_USE_NEW_STYLE_CASTS)
# define LZO_UNVOLATILE_CAST(t,e) (const_cast<t> (e))
# elif (LZO_HAVE_MM_HUGE_PTR)
# define LZO_UNVOLATILE_CAST(t,e) ((t) (e))
# elif (LZO_CC_ARMCC_GNUC || LZO_CC_CLANG || LZO_CC_GNUC || LZO_CC_LLVM || LZO_CC_PATHSCALE)
# define LZO_UNVOLATILE_CAST(t,e) ((t) ((void *) ((lzo_uintptr_t) ((volatile void *) (e)))))
# endif
#endif
#if !defined(LZO_UNVOLATILE_CAST)
# define LZO_UNVOLATILE_CAST(t,e) ((t) ((void *) ((volatile void *) (e))))
#endif
#if !defined(LZO_UNVOLATILE_CONST_CAST)
# if (LZO_CFG_USE_NEW_STYLE_CASTS)
# define LZO_UNVOLATILE_CONST_CAST(t,e) (const_cast<t> (e))
# elif (LZO_HAVE_MM_HUGE_PTR)
# define LZO_UNVOLATILE_CONST_CAST(t,e) ((t) (e))
# elif (LZO_CC_ARMCC_GNUC || LZO_CC_CLANG || LZO_CC_GNUC || LZO_CC_LLVM || LZO_CC_PATHSCALE)
# define LZO_UNVOLATILE_CONST_CAST(t,e) ((t) ((const void *) ((lzo_uintptr_t) ((volatile const void *) (e)))))
# endif
#endif
#if !defined(LZO_UNVOLATILE_CONST_CAST)
# define LZO_UNVOLATILE_CONST_CAST(t,e) ((t) ((const void *) ((volatile const void *) (e))))
#endif
#if !defined(LZO_PCAST)
# if (LZO_HAVE_MM_HUGE_PTR)
# define LZO_PCAST(t,e) ((t) (e))
# endif
#endif
#if !defined(LZO_PCAST)
# define LZO_PCAST(t,e) LZO_STATIC_CAST(t, LZO_STATIC_CAST(void *, e))
#endif
#if !defined(LZO_CCAST)
# if (LZO_HAVE_MM_HUGE_PTR)
# define LZO_CCAST(t,e) ((t) (e))
# endif
#endif
#if !defined(LZO_CCAST)
# define LZO_CCAST(t,e) LZO_STATIC_CAST(t, LZO_STATIC_CAST(const void *, e))
#endif
#if !defined(LZO_ICONV)
# define LZO_ICONV(t,e) LZO_STATIC_CAST(t, e)
#endif
#if !defined(LZO_ICAST)
# define LZO_ICAST(t,e) LZO_STATIC_CAST(t, e)
#endif
#if !defined(LZO_ITRUNC)
# define LZO_ITRUNC(t,e) LZO_STATIC_CAST(t, e)
#endif
#if !defined(__lzo_cte)
# if (LZO_CC_MSC || LZO_CC_WATCOMC)
# define __lzo_cte(e) ((void)0,(e))
# elif 1
# define __lzo_cte(e) ((void)0,(e))
# endif
#endif
#if !defined(__lzo_cte)
# define __lzo_cte(e) (e)
#endif
#if !defined(LZO_BLOCK_BEGIN)
# define LZO_BLOCK_BEGIN do {
# define LZO_BLOCK_END } while __lzo_cte(0)
#endif
#if !defined(LZO_UNUSED)
# if (LZO_CC_BORLANDC && (__BORLANDC__ >= 0x0600))
# define LZO_UNUSED(var) ((void) &var)
# elif (LZO_CC_BORLANDC || LZO_CC_HIGHC || LZO_CC_NDPC || LZO_CC_PELLESC || LZO_CC_TURBOC)
# define LZO_UNUSED(var) if (&var) ; else
# elif (LZO_CC_CLANG && (LZO_CC_CLANG >= 0x030200ul))
# define LZO_UNUSED(var) ((void) &var)
# elif (LZO_CC_CLANG || LZO_CC_GNUC || LZO_CC_LLVM || LZO_CC_PATHSCALE)
# define LZO_UNUSED(var) ((void) var)
# elif (LZO_CC_MSC && (_MSC_VER < 900))
# define LZO_UNUSED(var) if (&var) ; else
# elif (LZO_CC_KEILC)
# define LZO_UNUSED(var) {LZO_EXTERN_C int lzo_unused__[1-2*!(sizeof(var)>0)];}
# elif (LZO_CC_PACIFICC)
# define LZO_UNUSED(var) ((void) sizeof(var))
# elif (LZO_CC_WATCOMC) && defined(__cplusplus)
# define LZO_UNUSED(var) ((void) var)
# else
# define LZO_UNUSED(var) ((void) &var)
# endif
#endif
#if !defined(LZO_UNUSED_FUNC)
# if (LZO_CC_BORLANDC && (__BORLANDC__ >= 0x0600))
# define LZO_UNUSED_FUNC(func) ((void) func)
# elif (LZO_CC_BORLANDC || LZO_CC_NDPC || LZO_CC_TURBOC)
# define LZO_UNUSED_FUNC(func) if (func) ; else
# elif (LZO_CC_CLANG || LZO_CC_LLVM)
# define LZO_UNUSED_FUNC(func) ((void) &func)
# elif (LZO_CC_MSC && (_MSC_VER < 900))
# define LZO_UNUSED_FUNC(func) if (func) ; else
# elif (LZO_CC_MSC)
# define LZO_UNUSED_FUNC(func) ((void) &func)
# elif (LZO_CC_KEILC || LZO_CC_PELLESC)
# define LZO_UNUSED_FUNC(func) {LZO_EXTERN_C int lzo_unused_func__[1-2*!(sizeof((int)func)>0)];}
# else
# define LZO_UNUSED_FUNC(func) ((void) func)
# endif
#endif
#if !defined(LZO_UNUSED_LABEL)
# if (LZO_CC_CLANG >= 0x020800ul)
# define LZO_UNUSED_LABEL(l) (__lzo_gnuc_extension__ ((void) ((const void *) &&l)))
# elif (LZO_CC_ARMCC || LZO_CC_CLANG || LZO_CC_INTELC || LZO_CC_WATCOMC)
# define LZO_UNUSED_LABEL(l) if __lzo_cte(0) goto l
# else
# define LZO_UNUSED_LABEL(l) switch (0) case 1:goto l
# endif
#endif
#if !defined(LZO_DEFINE_UNINITIALIZED_VAR)
# if 0
# define LZO_DEFINE_UNINITIALIZED_VAR(type,var,init) type var
# elif 0 && (LZO_CC_GNUC)
# define LZO_DEFINE_UNINITIALIZED_VAR(type,var,init) type var = var
# else
# define LZO_DEFINE_UNINITIALIZED_VAR(type,var,init) type var = init
# endif
#endif
#if !defined(__lzo_inline)
#if (LZO_CC_TURBOC && (__TURBOC__ <= 0x0295))
#elif defined(__cplusplus)
# define __lzo_inline inline
#elif defined(__STDC_VERSION__) && (__STDC_VERSION__-0 >= 199901L)
# define __lzo_inline inline
#elif (LZO_CC_BORLANDC && (__BORLANDC__ >= 0x0550))
# define __lzo_inline __inline
#elif (LZO_CC_ARMCC_GNUC || LZO_CC_CILLY || LZO_CC_CLANG || LZO_CC_GNUC || LZO_CC_LLVM || LZO_CC_PATHSCALE || LZO_CC_PGI)
# define __lzo_inline __inline__
#elif (LZO_CC_DMC)
# define __lzo_inline __inline
#elif (LZO_CC_GHS)
# define __lzo_inline __inline__
#elif (LZO_CC_IBMC >= 600)
# define __lzo_inline __inline__
#elif (LZO_CC_INTELC)
# define __lzo_inline __inline
#elif (LZO_CC_MWERKS && (__MWERKS__ >= 0x2405))
# define __lzo_inline __inline
#elif (LZO_CC_MSC && (_MSC_VER >= 900))
# define __lzo_inline __inline
#elif (LZO_CC_SUNPROC >= 0x5100)
# define __lzo_inline __inline__
#endif
#endif
#if defined(__lzo_inline)
# ifndef __lzo_HAVE_inline
# define __lzo_HAVE_inline 1
# endif
#else
# define __lzo_inline /*empty*/
#endif
#if !defined(__lzo_forceinline)
#if (LZO_CC_GNUC >= 0x030200ul)
# define __lzo_forceinline __inline__ __attribute__((__always_inline__))
#elif (LZO_CC_IBMC >= 700)
# define __lzo_forceinline __inline__ __attribute__((__always_inline__))
#elif (LZO_CC_INTELC_MSC && (__INTEL_COMPILER >= 450))
# define __lzo_forceinline __forceinline
#elif (LZO_CC_INTELC_GNUC && (__INTEL_COMPILER >= 800))
# define __lzo_forceinline __inline__ __attribute__((__always_inline__))
#elif (LZO_CC_ARMCC_GNUC || LZO_CC_CLANG || LZO_CC_LLVM || LZO_CC_PATHSCALE)
# define __lzo_forceinline __inline__ __attribute__((__always_inline__))
#elif (LZO_CC_MSC && (_MSC_VER >= 1200))
# define __lzo_forceinline __forceinline
#elif (LZO_CC_PGI >= 0x0d0a00ul)
# define __lzo_forceinline __inline__ __attribute__((__always_inline__))
#elif (LZO_CC_SUNPROC >= 0x5100)
# define __lzo_forceinline __inline__ __attribute__((__always_inline__))
#endif
#endif
#if defined(__lzo_forceinline)
# ifndef __lzo_HAVE_forceinline
# define __lzo_HAVE_forceinline 1
# endif
#else
# define __lzo_forceinline __lzo_inline
#endif
#if !defined(__lzo_noinline)
#if 1 && (LZO_ARCH_I386) && (LZO_CC_GNUC >= 0x040000ul) && (LZO_CC_GNUC < 0x040003ul)
# define __lzo_noinline __attribute__((__noinline__,__used__))
#elif (LZO_CC_GNUC >= 0x030200ul)
# define __lzo_noinline __attribute__((__noinline__))
#elif (LZO_CC_IBMC >= 700)
# define __lzo_noinline __attribute__((__noinline__))
#elif (LZO_CC_INTELC_MSC && (__INTEL_COMPILER >= 600))
# define __lzo_noinline __declspec(noinline)
#elif (LZO_CC_INTELC_GNUC && (__INTEL_COMPILER >= 800))
# define __lzo_noinline __attribute__((__noinline__))
#elif (LZO_CC_ARMCC_GNUC || LZO_CC_CLANG || LZO_CC_LLVM || LZO_CC_PATHSCALE)
# define __lzo_noinline __attribute__((__noinline__))
#elif (LZO_CC_MSC && (_MSC_VER >= 1300))
# define __lzo_noinline __declspec(noinline)
#elif (LZO_CC_MWERKS && (__MWERKS__ >= 0x3200) && (LZO_OS_WIN32 || LZO_OS_WIN64))
# if defined(__cplusplus)
# else
# define __lzo_noinline __declspec(noinline)
# endif
#elif (LZO_CC_PGI >= 0x0d0a00ul)
# define __lzo_noinline __attribute__((__noinline__))
#elif (LZO_CC_SUNPROC >= 0x5100)
# define __lzo_noinline __attribute__((__noinline__))
#endif
#endif
#if defined(__lzo_noinline)
# ifndef __lzo_HAVE_noinline
# define __lzo_HAVE_noinline 1
# endif
#else
# define __lzo_noinline /*empty*/
#endif
#if (__lzo_HAVE_forceinline || __lzo_HAVE_noinline) && !(__lzo_HAVE_inline)
# error "unexpected configuration - check your compiler defines"
#endif
#if !defined(__lzo_static_inline)
#if (LZO_CC_IBMC)
# define __lzo_static_inline __lzo_gnuc_extension__ static __lzo_inline
#endif
#endif
#if !defined(__lzo_static_inline)
# define __lzo_static_inline static __lzo_inline
#endif
#if !defined(__lzo_static_forceinline)
#if (LZO_CC_IBMC)
# define __lzo_static_forceinline __lzo_gnuc_extension__ static __lzo_forceinline
#endif
#endif
#if !defined(__lzo_static_forceinline)
# define __lzo_static_forceinline static __lzo_forceinline
#endif
#if !defined(__lzo_static_noinline)
#if (LZO_CC_IBMC)
# define __lzo_static_noinline __lzo_gnuc_extension__ static __lzo_noinline
#endif
#endif
#if !defined(__lzo_static_noinline)
# define __lzo_static_noinline static __lzo_noinline
#endif
#if !defined(__lzo_c99_extern_inline)
#if defined(__GNUC_GNU_INLINE__)
# define __lzo_c99_extern_inline __lzo_inline
#elif defined(__GNUC_STDC_INLINE__)
# define __lzo_c99_extern_inline extern __lzo_inline
#elif defined(__STDC_VERSION__) && (__STDC_VERSION__-0 >= 199901L)
# define __lzo_c99_extern_inline extern __lzo_inline
#endif
#if !defined(__lzo_c99_extern_inline) && (__lzo_HAVE_inline)
# define __lzo_c99_extern_inline __lzo_inline
#endif
#endif
#if defined(__lzo_c99_extern_inline)
# ifndef __lzo_HAVE_c99_extern_inline
# define __lzo_HAVE_c99_extern_inline 1
# endif
#else
# define __lzo_c99_extern_inline /*empty*/
#endif
#if !defined(__lzo_may_alias)
#if (LZO_CC_GNUC >= 0x030400ul)
# define __lzo_may_alias __attribute__((__may_alias__))
#elif (LZO_CC_CLANG >= 0x020900ul)
# define __lzo_may_alias __attribute__((__may_alias__))
#elif (LZO_CC_INTELC_GNUC && (__INTEL_COMPILER >= 1210)) && 0
# define __lzo_may_alias __attribute__((__may_alias__))
#elif (LZO_CC_PGI >= 0x0d0a00ul) && 0
# define __lzo_may_alias __attribute__((__may_alias__))
#endif
#endif
#if defined(__lzo_may_alias)
# ifndef __lzo_HAVE_may_alias
# define __lzo_HAVE_may_alias 1
# endif
#else
# define __lzo_may_alias /*empty*/
#endif
#if !defined(__lzo_noreturn)
#if (LZO_CC_GNUC >= 0x020700ul)
# define __lzo_noreturn __attribute__((__noreturn__))
#elif (LZO_CC_IBMC >= 700)
# define __lzo_noreturn __attribute__((__noreturn__))
#elif (LZO_CC_INTELC_MSC && (__INTEL_COMPILER >= 450))
# define __lzo_noreturn __declspec(noreturn)
#elif (LZO_CC_INTELC_GNUC && (__INTEL_COMPILER >= 600))
# define __lzo_noreturn __attribute__((__noreturn__))
#elif (LZO_CC_ARMCC_GNUC || LZO_CC_CLANG || LZO_CC_LLVM || LZO_CC_PATHSCALE)
# define __lzo_noreturn __attribute__((__noreturn__))
#elif (LZO_CC_MSC && (_MSC_VER >= 1200))
# define __lzo_noreturn __declspec(noreturn)
#elif (LZO_CC_PGI >= 0x0d0a00ul)
# define __lzo_noreturn __attribute__((__noreturn__))
#endif
#endif
#if defined(__lzo_noreturn)
# ifndef __lzo_HAVE_noreturn
# define __lzo_HAVE_noreturn 1
# endif
#else
# define __lzo_noreturn /*empty*/
#endif
#if !defined(__lzo_nothrow)
#if (LZO_CC_GNUC >= 0x030300ul)
# define __lzo_nothrow __attribute__((__nothrow__))
#elif (LZO_CC_INTELC_MSC && (__INTEL_COMPILER >= 450)) && defined(__cplusplus)
# define __lzo_nothrow __declspec(nothrow)
#elif (LZO_CC_INTELC_GNUC && (__INTEL_COMPILER >= 900))
# define __lzo_nothrow __attribute__((__nothrow__))
#elif (LZO_CC_ARMCC_GNUC || LZO_CC_CLANG || LZO_CC_LLVM || LZO_CC_PATHSCALE)
# define __lzo_nothrow __attribute__((__nothrow__))
#elif (LZO_CC_MSC && (_MSC_VER >= 1200)) && defined(__cplusplus)
# define __lzo_nothrow __declspec(nothrow)
#endif
#endif
#if defined(__lzo_nothrow)
# ifndef __lzo_HAVE_nothrow
# define __lzo_HAVE_nothrow 1
# endif
#else
# define __lzo_nothrow /*empty*/
#endif
#if !defined(__lzo_restrict)
#if (LZO_CC_GNUC >= 0x030400ul)
# define __lzo_restrict __restrict__
#elif (LZO_CC_IBMC >= 800) && !defined(__cplusplus)
# define __lzo_restrict __restrict__
#elif (LZO_CC_IBMC >= 1210)
# define __lzo_restrict __restrict__
#elif (LZO_CC_INTELC_MSC && (__INTEL_COMPILER >= 600))
#elif (LZO_CC_INTELC_GNUC && (__INTEL_COMPILER >= 600))
# define __lzo_restrict __restrict__
#elif (LZO_CC_ARMCC_GNUC || LZO_CC_CLANG || LZO_CC_LLVM)
# define __lzo_restrict __restrict__
#elif (LZO_CC_MSC && (_MSC_VER >= 1400))
# define __lzo_restrict __restrict
#elif (LZO_CC_PGI >= 0x0d0a00ul)
# define __lzo_restrict __restrict__
#endif
#endif
#if defined(__lzo_restrict)
# ifndef __lzo_HAVE_restrict
# define __lzo_HAVE_restrict 1
# endif
#else
# define __lzo_restrict /*empty*/
#endif
#if !defined(__lzo_alignof)
#if (LZO_CC_ARMCC || LZO_CC_CILLY || LZO_CC_CLANG || LZO_CC_GNUC || LZO_CC_LLVM || LZO_CC_PATHSCALE || LZO_CC_PGI)
# define __lzo_alignof(e) __alignof__(e)
#elif (LZO_CC_GHS) && !defined(__cplusplus)
# define __lzo_alignof(e) __alignof__(e)
#elif (LZO_CC_IBMC >= 600)
# define __lzo_alignof(e) (__lzo_gnuc_extension__ __alignof__(e))
#elif (LZO_CC_INTELC && (__INTEL_COMPILER >= 700))
# define __lzo_alignof(e) __alignof__(e)
#elif (LZO_CC_MSC && (_MSC_VER >= 1300))
# define __lzo_alignof(e) __alignof(e)
#elif (LZO_CC_SUNPROC >= 0x5100)
# define __lzo_alignof(e) __alignof__(e)
#endif
#endif
#if defined(__lzo_alignof)
# ifndef __lzo_HAVE_alignof
# define __lzo_HAVE_alignof 1
# endif
#endif
#if !defined(__lzo_struct_packed)
#if (LZO_CC_CLANG && (LZO_CC_CLANG < 0x020800ul)) && defined(__cplusplus)
#elif (LZO_CC_GNUC && (LZO_CC_GNUC < 0x020700ul))
#elif (LZO_CC_GNUC && (LZO_CC_GNUC < 0x020800ul)) && defined(__cplusplus)
#elif (LZO_CC_PCC && (LZO_CC_PCC < 0x010100ul))
#elif (LZO_CC_SUNPROC && (LZO_CC_SUNPROC < 0x5110)) && !defined(__cplusplus)
#elif (LZO_CC_GNUC >= 0x030400ul) && !(LZO_CC_PCC_GNUC) && (LZO_ARCH_AMD64 || LZO_ARCH_I386)
# define __lzo_struct_packed(s) struct s {
# define __lzo_struct_packed_end() } __attribute__((__gcc_struct__,__packed__));
# define __lzo_struct_packed_ma_end() } __lzo_may_alias __attribute__((__gcc_struct__,__packed__));
#elif (LZO_CC_ARMCC || LZO_CC_CLANG || LZO_CC_GNUC || LZO_CC_INTELC_GNUC || LZO_CC_LLVM || LZO_CC_PATHSCALE || (LZO_CC_PGI >= 0x0d0a00ul) || (LZO_CC_SUNPROC >= 0x5100))
# define __lzo_struct_packed(s) struct s {
# define __lzo_struct_packed_end() } __attribute__((__packed__));
# define __lzo_struct_packed_ma_end() } __lzo_may_alias __attribute__((__packed__));
#elif (LZO_CC_IBMC >= 700)
# define __lzo_struct_packed(s) __lzo_gnuc_extension__ struct s {
# define __lzo_struct_packed_end() } __attribute__((__packed__));
# define __lzo_struct_packed_ma_end() } __lzo_may_alias __attribute__((__packed__));
#elif (LZO_CC_INTELC_MSC) || (LZO_CC_MSC && (_MSC_VER >= 1300))
# define __lzo_struct_packed(s) __pragma(pack(push,1)) struct s {
# define __lzo_struct_packed_end() } __pragma(pack(pop));
#elif (LZO_CC_WATCOMC && (__WATCOMC__ >= 900))
# define __lzo_struct_packed(s) _Packed struct s {
# define __lzo_struct_packed_end() };
#endif
#endif
#if defined(__lzo_struct_packed) && !defined(__lzo_struct_packed_ma)
# define __lzo_struct_packed_ma(s) __lzo_struct_packed(s)
#endif
#if defined(__lzo_struct_packed_end) && !defined(__lzo_struct_packed_ma_end)
# define __lzo_struct_packed_ma_end() __lzo_struct_packed_end()
#endif
#if !defined(__lzo_byte_struct)
#if defined(__lzo_struct_packed)
# define __lzo_byte_struct(s,n) __lzo_struct_packed(s) unsigned char a[n]; __lzo_struct_packed_end()
# define __lzo_byte_struct_ma(s,n) __lzo_struct_packed_ma(s) unsigned char a[n]; __lzo_struct_packed_ma_end()
#elif (LZO_CC_CILLY || LZO_CC_CLANG || LZO_CC_PGI || (LZO_CC_SUNPROC >= 0x5100))
# define __lzo_byte_struct(s,n) struct s { unsigned char a[n]; } __attribute__((__packed__));
# define __lzo_byte_struct_ma(s,n) struct s { unsigned char a[n]; } __lzo_may_alias __attribute__((__packed__));
#endif
#endif
#if defined(__lzo_byte_struct) && !defined(__lzo_byte_struct_ma)
# define __lzo_byte_struct_ma(s,n) __lzo_byte_struct(s,n)
#endif
#if !defined(__lzo_struct_align16) && (__lzo_HAVE_alignof)
#if (LZO_CC_GNUC && (LZO_CC_GNUC < 0x030000ul))
#elif (LZO_CC_CLANG && (LZO_CC_CLANG < 0x020800ul)) && defined(__cplusplus)
#elif (LZO_CC_CILLY || LZO_CC_PCC)
#elif (LZO_CC_INTELC_MSC) || (LZO_CC_MSC && (_MSC_VER >= 1300))
# define __lzo_struct_align16(s) struct __declspec(align(16)) s {
# define __lzo_struct_align16_end() };
# define __lzo_struct_align32(s) struct __declspec(align(32)) s {
# define __lzo_struct_align32_end() };
# define __lzo_struct_align64(s) struct __declspec(align(64)) s {
# define __lzo_struct_align64_end() };
#elif (LZO_CC_ARMCC || LZO_CC_CLANG || LZO_CC_GNUC || (LZO_CC_IBMC >= 700) || LZO_CC_INTELC_GNUC || LZO_CC_LLVM || LZO_CC_PATHSCALE)
# define __lzo_struct_align16(s) struct s {
# define __lzo_struct_align16_end() } __attribute__((__aligned__(16)));
# define __lzo_struct_align32(s) struct s {
# define __lzo_struct_align32_end() } __attribute__((__aligned__(32)));
# define __lzo_struct_align64(s) struct s {
# define __lzo_struct_align64_end() } __attribute__((__aligned__(64)));
#endif
#endif
#if !defined(__lzo_union_um)
#if (LZO_CC_CLANG && (LZO_CC_CLANG < 0x020800ul)) && defined(__cplusplus)
#elif (LZO_CC_GNUC && (LZO_CC_GNUC < 0x020700ul))
#elif (LZO_CC_GNUC && (LZO_CC_GNUC < 0x020800ul)) && defined(__cplusplus)
#elif (LZO_CC_INTELC_GNUC && (__INTEL_COMPILER < 810))
#elif (LZO_CC_PCC && (LZO_CC_PCC < 0x010100ul))
#elif (LZO_CC_SUNPROC && (LZO_CC_SUNPROC < 0x5110)) && !defined(__cplusplus)
#elif (LZO_CC_ARMCC || LZO_CC_CLANG || LZO_CC_GNUC || LZO_CC_INTELC_GNUC || LZO_CC_LLVM || LZO_CC_PATHSCALE || (LZO_CC_PGI >= 0x0d0a00ul) || (LZO_CC_SUNPROC >= 0x5100))
# define __lzo_union_am(s) union s {
# define __lzo_union_am_end() } __lzo_may_alias;
# define __lzo_union_um(s) union s {
# define __lzo_union_um_end() } __lzo_may_alias __attribute__((__packed__));
#elif (LZO_CC_IBMC >= 700)
# define __lzo_union_am(s) __lzo_gnuc_extension__ union s {
# define __lzo_union_am_end() } __lzo_may_alias;
# define __lzo_union_um(s) __lzo_gnuc_extension__ union s {
# define __lzo_union_um_end() } __lzo_may_alias __attribute__((__packed__));
#elif (LZO_CC_INTELC_MSC) || (LZO_CC_MSC && (_MSC_VER >= 1300))
# define __lzo_union_um(s) __pragma(pack(push,1)) union s {
# define __lzo_union_um_end() } __pragma(pack(pop));
#elif (LZO_CC_WATCOMC && (__WATCOMC__ >= 900))
# define __lzo_union_um(s) _Packed union s {
# define __lzo_union_um_end() };
#endif
#endif
#if !defined(__lzo_union_am)
# define __lzo_union_am(s) union s {
# define __lzo_union_am_end() };
#endif
#if !defined(__lzo_constructor)
#if (LZO_CC_GNUC >= 0x030400ul)
# define __lzo_constructor __attribute__((__constructor__,__used__))
#elif (LZO_CC_GNUC >= 0x020700ul)
# define __lzo_constructor __attribute__((__constructor__))
#elif (LZO_CC_INTELC_GNUC && (__INTEL_COMPILER >= 800))
# define __lzo_constructor __attribute__((__constructor__,__used__))
#elif (LZO_CC_ARMCC_GNUC || LZO_CC_CLANG || LZO_CC_LLVM || LZO_CC_PATHSCALE)
# define __lzo_constructor __attribute__((__constructor__))
#endif
#endif
#if defined(__lzo_constructor)
# ifndef __lzo_HAVE_constructor
# define __lzo_HAVE_constructor 1
# endif
#endif
#if !defined(__lzo_destructor)
#if (LZO_CC_GNUC >= 0x030400ul)
# define __lzo_destructor __attribute__((__destructor__,__used__))
#elif (LZO_CC_GNUC >= 0x020700ul)
# define __lzo_destructor __attribute__((__destructor__))
#elif (LZO_CC_INTELC_GNUC && (__INTEL_COMPILER >= 800))
# define __lzo_destructor __attribute__((__destructor__,__used__))
#elif (LZO_CC_ARMCC_GNUC || LZO_CC_CLANG || LZO_CC_LLVM || LZO_CC_PATHSCALE)
# define __lzo_destructor __attribute__((__destructor__))
#endif
#endif
#if defined(__lzo_destructor)
# ifndef __lzo_HAVE_destructor
# define __lzo_HAVE_destructor 1
# endif
#endif
#if (__lzo_HAVE_destructor) && !(__lzo_HAVE_constructor)
# error "unexpected configuration - check your compiler defines"
#endif
#if !defined(__lzo_likely) && !defined(__lzo_unlikely)
#if (LZO_CC_GNUC >= 0x030200ul)
# define __lzo_likely(e) (__builtin_expect(!!(e),1))
# define __lzo_unlikely(e) (__builtin_expect(!!(e),0))
#elif (LZO_CC_IBMC >= 1010)
# define __lzo_likely(e) (__builtin_expect(!!(e),1))
# define __lzo_unlikely(e) (__builtin_expect(!!(e),0))
#elif (LZO_CC_INTELC && (__INTEL_COMPILER >= 800))
# define __lzo_likely(e) (__builtin_expect(!!(e),1))
# define __lzo_unlikely(e) (__builtin_expect(!!(e),0))
#elif (LZO_CC_ARMCC_GNUC || LZO_CC_CLANG || LZO_CC_LLVM || LZO_CC_PATHSCALE)
# define __lzo_likely(e) (__builtin_expect(!!(e),1))
# define __lzo_unlikely(e) (__builtin_expect(!!(e),0))
#endif
#endif
#if defined(__lzo_likely)
# ifndef __lzo_HAVE_likely
# define __lzo_HAVE_likely 1
# endif
#else
# define __lzo_likely(e) (e)
#endif
#if defined(__lzo_unlikely)
# ifndef __lzo_HAVE_unlikely
# define __lzo_HAVE_unlikely 1
# endif
#else
# define __lzo_unlikely(e) (e)
#endif
#if !defined(__lzo_static_unused_void_func)
# if 1 && (LZO_CC_ARMCC_GNUC || LZO_CC_CLANG || (LZO_CC_GNUC >= 0x020700ul) || LZO_CC_INTELC_GNUC || LZO_CC_LLVM || LZO_CC_PATHSCALE || LZO_CC_PGI)
# define __lzo_static_unused_void_func(f) static void __attribute__((__unused__)) f(void)
# else
# define __lzo_static_unused_void_func(f) static __lzo_inline void f(void)
# endif
#endif
#if !defined(__lzo_loop_forever)
# if (LZO_CC_IBMC)
# define __lzo_loop_forever() LZO_BLOCK_BEGIN for (;;) { ; } LZO_BLOCK_END
# else
# define __lzo_loop_forever() do { ; } while __lzo_cte(1)
# endif
#endif
#if !defined(__lzo_unreachable)
#if (LZO_CC_CLANG && (LZO_CC_CLANG >= 0x020800ul))
# define __lzo_unreachable() __builtin_unreachable();
#elif (LZO_CC_GNUC >= 0x040500ul)
# define __lzo_unreachable() __builtin_unreachable();
#elif (LZO_CC_INTELC_GNUC && (__INTEL_COMPILER >= 1300)) && 1
# define __lzo_unreachable() __builtin_unreachable();
#endif
#endif
#if defined(__lzo_unreachable)
# ifndef __lzo_HAVE_unreachable
# define __lzo_HAVE_unreachable 1
# endif
#else
# if 0
# define __lzo_unreachable() ((void)0);
# else
# define __lzo_unreachable() __lzo_loop_forever();
# endif
#endif
#ifndef __LZO_CTA_NAME
#if (LZO_CFG_USE_COUNTER)
# define __LZO_CTA_NAME(a) LZO_PP_ECONCAT2(a,__COUNTER__)
#else
# define __LZO_CTA_NAME(a) LZO_PP_ECONCAT2(a,__LINE__)
#endif
#endif
#if !defined(LZO_COMPILE_TIME_ASSERT_HEADER)
# if (LZO_CC_AZTECC || LZO_CC_ZORTECHC)
# define LZO_COMPILE_TIME_ASSERT_HEADER(e) LZO_EXTERN_C_BEGIN extern int __LZO_CTA_NAME(lzo_cta__)[1-!(e)]; LZO_EXTERN_C_END
# elif (LZO_CC_DMC || LZO_CC_SYMANTECC)
# define LZO_COMPILE_TIME_ASSERT_HEADER(e) LZO_EXTERN_C_BEGIN extern int __LZO_CTA_NAME(lzo_cta__)[1u-2*!(e)]; LZO_EXTERN_C_END
# elif (LZO_CC_TURBOC && (__TURBOC__ == 0x0295))
# define LZO_COMPILE_TIME_ASSERT_HEADER(e) LZO_EXTERN_C_BEGIN extern int __LZO_CTA_NAME(lzo_cta__)[1-!(e)]; LZO_EXTERN_C_END
# elif (LZO_CC_CLANG && (LZO_CC_CLANG < 0x020900ul)) && defined(__cplusplus)
# define LZO_COMPILE_TIME_ASSERT_HEADER(e) LZO_EXTERN_C_BEGIN int __LZO_CTA_NAME(lzo_cta_f__)(int [1-2*!(e)]); LZO_EXTERN_C_END
# elif (LZO_CC_GNUC) && defined(__CHECKER__) && defined(__SPARSE_CHECKER__)
# define LZO_COMPILE_TIME_ASSERT_HEADER(e) LZO_EXTERN_C_BEGIN enum {__LZO_CTA_NAME(lzo_cta_e__)=1/!!(e)} __attribute__((__unused__)); LZO_EXTERN_C_END
# else
# define LZO_COMPILE_TIME_ASSERT_HEADER(e) LZO_EXTERN_C_BEGIN extern int __LZO_CTA_NAME(lzo_cta__)[1-2*!(e)]; LZO_EXTERN_C_END
# endif
#endif
#if !defined(LZO_COMPILE_TIME_ASSERT)
# if (LZO_CC_AZTECC)
# define LZO_COMPILE_TIME_ASSERT(e) {typedef int __LZO_CTA_NAME(lzo_cta_t__)[1-!(e)];}
# elif (LZO_CC_DMC || LZO_CC_PACIFICC || LZO_CC_SYMANTECC || LZO_CC_ZORTECHC)
# define LZO_COMPILE_TIME_ASSERT(e) switch(0) case 1:case !(e):break;
# elif (LZO_CC_GNUC) && defined(__CHECKER__) && defined(__SPARSE_CHECKER__)
# define LZO_COMPILE_TIME_ASSERT(e) {(void) (0/!!(e));}
# elif (LZO_CC_GNUC >= 0x040700ul) && (LZO_CFG_USE_COUNTER) && defined(__cplusplus)
# define LZO_COMPILE_TIME_ASSERT(e) {enum {__LZO_CTA_NAME(lzo_cta_e__)=1/!!(e)} __attribute__((__unused__));}
# elif (LZO_CC_GNUC >= 0x040700ul)
# define LZO_COMPILE_TIME_ASSERT(e) {typedef int __LZO_CTA_NAME(lzo_cta_t__)[1-2*!(e)] __attribute__((__unused__));}
# elif (LZO_CC_MSC && (_MSC_VER < 900))
# define LZO_COMPILE_TIME_ASSERT(e) switch(0) case 1:case !(e):break;
# elif (LZO_CC_TURBOC && (__TURBOC__ == 0x0295))
# define LZO_COMPILE_TIME_ASSERT(e) switch(0) case 1:case !(e):break;
# else
# define LZO_COMPILE_TIME_ASSERT(e) {typedef int __LZO_CTA_NAME(lzo_cta_t__)[1-2*!(e)];}
# endif
#endif
LZO_COMPILE_TIME_ASSERT_HEADER(1 == 1)
#if defined(__cplusplus)
extern "C" { LZO_COMPILE_TIME_ASSERT_HEADER(2 == 2) }
#endif
LZO_COMPILE_TIME_ASSERT_HEADER(3 == 3)
#if (LZO_ARCH_I086 || LZO_ARCH_I386) && (LZO_OS_DOS16 || LZO_OS_DOS32 || LZO_OS_OS2 || LZO_OS_OS216 || LZO_OS_WIN16 || LZO_OS_WIN32 || LZO_OS_WIN64)
# if (LZO_CC_GNUC || LZO_CC_HIGHC || LZO_CC_NDPC || LZO_CC_PACIFICC)
# elif (LZO_CC_DMC || LZO_CC_SYMANTECC || LZO_CC_ZORTECHC)
# define __lzo_cdecl __cdecl
# define __lzo_cdecl_atexit /*empty*/
# define __lzo_cdecl_main __cdecl
# if (LZO_OS_OS2 && (LZO_CC_DMC || LZO_CC_SYMANTECC))
# define __lzo_cdecl_qsort __pascal
# elif (LZO_OS_OS2 && (LZO_CC_ZORTECHC))
# define __lzo_cdecl_qsort _stdcall
# else
# define __lzo_cdecl_qsort __cdecl
# endif
# elif (LZO_CC_WATCOMC)
# define __lzo_cdecl __cdecl
# else
# define __lzo_cdecl __cdecl
# define __lzo_cdecl_atexit __cdecl
# define __lzo_cdecl_main __cdecl
# define __lzo_cdecl_qsort __cdecl
# endif
# if (LZO_CC_GNUC || LZO_CC_HIGHC || LZO_CC_NDPC || LZO_CC_PACIFICC || LZO_CC_WATCOMC)
# elif (LZO_OS_OS2 && (LZO_CC_DMC || LZO_CC_SYMANTECC))
# define __lzo_cdecl_sighandler __pascal
# elif (LZO_OS_OS2 && (LZO_CC_ZORTECHC))
# define __lzo_cdecl_sighandler _stdcall
# elif (LZO_CC_MSC && (_MSC_VER >= 1400)) && defined(_M_CEE_PURE)
# define __lzo_cdecl_sighandler __clrcall
# elif (LZO_CC_MSC && (_MSC_VER >= 600 && _MSC_VER < 700))
# if defined(_DLL)
# define __lzo_cdecl_sighandler _far _cdecl _loadds
# elif defined(_MT)
# define __lzo_cdecl_sighandler _far _cdecl
# else
# define __lzo_cdecl_sighandler _cdecl
# endif
# else
# define __lzo_cdecl_sighandler __cdecl
# endif
#elif (LZO_ARCH_I386) && (LZO_CC_WATCOMC)
# define __lzo_cdecl __cdecl
#elif (LZO_ARCH_M68K && LZO_OS_TOS && (LZO_CC_PUREC || LZO_CC_TURBOC))
# define __lzo_cdecl cdecl
#endif
#if !defined(__lzo_cdecl)
# define __lzo_cdecl /*empty*/
#endif
#if !defined(__lzo_cdecl_atexit)
# define __lzo_cdecl_atexit /*empty*/
#endif
#if !defined(__lzo_cdecl_main)
# define __lzo_cdecl_main /*empty*/
#endif
#if !defined(__lzo_cdecl_qsort)
# define __lzo_cdecl_qsort /*empty*/
#endif
#if !defined(__lzo_cdecl_sighandler)
# define __lzo_cdecl_sighandler /*empty*/
#endif
#if !defined(__lzo_cdecl_va)
# define __lzo_cdecl_va __lzo_cdecl
#endif
#if !(LZO_CFG_NO_WINDOWS_H)
#if !defined(LZO_HAVE_WINDOWS_H)
#if (LZO_OS_CYGWIN || (LZO_OS_EMX && defined(__RSXNT__)) || LZO_OS_WIN32 || LZO_OS_WIN64)
# if (LZO_CC_WATCOMC && (__WATCOMC__ < 1000))
# elif (LZO_OS_WIN32 && LZO_CC_GNUC) && defined(__PW32__)
# elif ((LZO_OS_CYGWIN || defined(__MINGW32__)) && (LZO_CC_GNUC && (LZO_CC_GNUC < 0x025f00ul)))
# else
# define LZO_HAVE_WINDOWS_H 1
# endif
#endif
#endif
#endif
#ifndef LZO_SIZEOF_SHORT
#if defined(SIZEOF_SHORT)
# define LZO_SIZEOF_SHORT (SIZEOF_SHORT)
#elif defined(__SIZEOF_SHORT__)
# define LZO_SIZEOF_SHORT (__SIZEOF_SHORT__)
#endif
#endif
#ifndef LZO_SIZEOF_INT
#if defined(SIZEOF_INT)
# define LZO_SIZEOF_INT (SIZEOF_INT)
#elif defined(__SIZEOF_INT__)
# define LZO_SIZEOF_INT (__SIZEOF_INT__)
#endif
#endif
#ifndef LZO_SIZEOF_LONG
#if defined(SIZEOF_LONG)
# define LZO_SIZEOF_LONG (SIZEOF_LONG)
#elif defined(__SIZEOF_LONG__)
# define LZO_SIZEOF_LONG (__SIZEOF_LONG__)
#endif
#endif
#ifndef LZO_SIZEOF_LONG_LONG
#if defined(SIZEOF_LONG_LONG)
# define LZO_SIZEOF_LONG_LONG (SIZEOF_LONG_LONG)
#elif defined(__SIZEOF_LONG_LONG__)
# define LZO_SIZEOF_LONG_LONG (__SIZEOF_LONG_LONG__)
#endif
#endif
#ifndef LZO_SIZEOF___INT16
#if defined(SIZEOF___INT16)
# define LZO_SIZEOF___INT16 (SIZEOF___INT16)
#endif
#endif
#ifndef LZO_SIZEOF___INT32
#if defined(SIZEOF___INT32)
# define LZO_SIZEOF___INT32 (SIZEOF___INT32)
#endif
#endif
#ifndef LZO_SIZEOF___INT64
#if defined(SIZEOF___INT64)
# define LZO_SIZEOF___INT64 (SIZEOF___INT64)
#endif
#endif
#ifndef LZO_SIZEOF_VOID_P
#if defined(SIZEOF_VOID_P)
# define LZO_SIZEOF_VOID_P (SIZEOF_VOID_P)
#elif defined(__SIZEOF_POINTER__)
# define LZO_SIZEOF_VOID_P (__SIZEOF_POINTER__)
#endif
#endif
#ifndef LZO_SIZEOF_SIZE_T
#if defined(SIZEOF_SIZE_T)
# define LZO_SIZEOF_SIZE_T (SIZEOF_SIZE_T)
#elif defined(__SIZEOF_SIZE_T__)
# define LZO_SIZEOF_SIZE_T (__SIZEOF_SIZE_T__)
#endif
#endif
#ifndef LZO_SIZEOF_PTRDIFF_T
#if defined(SIZEOF_PTRDIFF_T)
# define LZO_SIZEOF_PTRDIFF_T (SIZEOF_PTRDIFF_T)
#elif defined(__SIZEOF_PTRDIFF_T__)
# define LZO_SIZEOF_PTRDIFF_T (__SIZEOF_PTRDIFF_T__)
#endif
#endif
#define __LZO_LSR(x,b) (((x)+0ul) >> (b))
#if !defined(LZO_SIZEOF_SHORT)
# if (LZO_ARCH_CRAY_PVP)
# define LZO_SIZEOF_SHORT 8
# elif (USHRT_MAX == LZO_0xffffL)
# define LZO_SIZEOF_SHORT 2
# elif (__LZO_LSR(USHRT_MAX,7) == 1)
# define LZO_SIZEOF_SHORT 1
# elif (__LZO_LSR(USHRT_MAX,15) == 1)
# define LZO_SIZEOF_SHORT 2
# elif (__LZO_LSR(USHRT_MAX,31) == 1)
# define LZO_SIZEOF_SHORT 4
# elif (__LZO_LSR(USHRT_MAX,63) == 1)
# define LZO_SIZEOF_SHORT 8
# elif (__LZO_LSR(USHRT_MAX,127) == 1)
# define LZO_SIZEOF_SHORT 16
# else
# error "LZO_SIZEOF_SHORT"
# endif
#endif
LZO_COMPILE_TIME_ASSERT_HEADER(LZO_SIZEOF_SHORT == sizeof(short))
#if !defined(LZO_SIZEOF_INT)
# if (LZO_ARCH_CRAY_PVP)
# define LZO_SIZEOF_INT 8
# elif (UINT_MAX == LZO_0xffffL)
# define LZO_SIZEOF_INT 2
# elif (UINT_MAX == LZO_0xffffffffL)
# define LZO_SIZEOF_INT 4
# elif (__LZO_LSR(UINT_MAX,7) == 1)
# define LZO_SIZEOF_INT 1
# elif (__LZO_LSR(UINT_MAX,15) == 1)
# define LZO_SIZEOF_INT 2
# elif (__LZO_LSR(UINT_MAX,31) == 1)
# define LZO_SIZEOF_INT 4
# elif (__LZO_LSR(UINT_MAX,63) == 1)
# define LZO_SIZEOF_INT 8
# elif (__LZO_LSR(UINT_MAX,127) == 1)
# define LZO_SIZEOF_INT 16
# else
# error "LZO_SIZEOF_INT"
# endif
#endif
LZO_COMPILE_TIME_ASSERT_HEADER(LZO_SIZEOF_INT == sizeof(int))
#if !defined(LZO_SIZEOF_LONG)
# if (ULONG_MAX == LZO_0xffffffffL)
# define LZO_SIZEOF_LONG 4
# elif (__LZO_LSR(ULONG_MAX,7) == 1)
# define LZO_SIZEOF_LONG 1
# elif (__LZO_LSR(ULONG_MAX,15) == 1)
# define LZO_SIZEOF_LONG 2
# elif (__LZO_LSR(ULONG_MAX,31) == 1)
# define LZO_SIZEOF_LONG 4
# elif (__LZO_LSR(ULONG_MAX,39) == 1)
# define LZO_SIZEOF_LONG 5
# elif (__LZO_LSR(ULONG_MAX,63) == 1)
# define LZO_SIZEOF_LONG 8
# elif (__LZO_LSR(ULONG_MAX,127) == 1)
# define LZO_SIZEOF_LONG 16
# else
# error "LZO_SIZEOF_LONG"
# endif
#endif
LZO_COMPILE_TIME_ASSERT_HEADER(LZO_SIZEOF_LONG == sizeof(long))
#if !defined(LZO_SIZEOF_LONG_LONG) && !defined(LZO_SIZEOF___INT64)
#if (LZO_SIZEOF_LONG > 0 && LZO_SIZEOF_LONG < 8)
# if defined(__LONG_MAX__) && defined(__LONG_LONG_MAX__)
# if (LZO_CC_GNUC >= 0x030300ul)
# if ((__LONG_MAX__-0) == (__LONG_LONG_MAX__-0))
# define LZO_SIZEOF_LONG_LONG LZO_SIZEOF_LONG
# elif (__LZO_LSR(__LONG_LONG_MAX__,30) == 1)
# define LZO_SIZEOF_LONG_LONG 4
# endif
# endif
# endif
#endif
#endif
#if !defined(LZO_SIZEOF_LONG_LONG) && !defined(LZO_SIZEOF___INT64)
#if (LZO_SIZEOF_LONG > 0 && LZO_SIZEOF_LONG < 8)
#if (LZO_ARCH_I086 && LZO_CC_DMC)
#elif (LZO_CC_CILLY) && defined(__GNUC__)
# define LZO_SIZEOF_LONG_LONG 8
#elif (LZO_CC_ARMCC_GNUC || LZO_CC_CLANG || LZO_CC_GNUC || LZO_CC_LLVM || LZO_CC_PATHSCALE)
# define LZO_SIZEOF_LONG_LONG 8
#elif ((LZO_OS_WIN32 || LZO_OS_WIN64 || defined(_WIN32)) && LZO_CC_MSC && (_MSC_VER >= 1400))
# define LZO_SIZEOF_LONG_LONG 8
#elif (LZO_OS_WIN64 || defined(_WIN64))
# define LZO_SIZEOF___INT64 8
#elif (LZO_ARCH_I386 && (LZO_CC_DMC))
# define LZO_SIZEOF_LONG_LONG 8
#elif (LZO_ARCH_I386 && (LZO_CC_SYMANTECC && (__SC__ >= 0x700)))
# define LZO_SIZEOF_LONG_LONG 8
#elif (LZO_ARCH_I386 && (LZO_CC_INTELC && defined(__linux__)))
# define LZO_SIZEOF_LONG_LONG 8
#elif (LZO_ARCH_I386 && (LZO_CC_MWERKS || LZO_CC_PELLESC || LZO_CC_PGI || LZO_CC_SUNPROC))
# define LZO_SIZEOF_LONG_LONG 8
#elif (LZO_ARCH_I386 && (LZO_CC_INTELC || LZO_CC_MSC))
# define LZO_SIZEOF___INT64 8
#elif ((LZO_OS_WIN32 || defined(_WIN32)) && (LZO_CC_MSC))
# define LZO_SIZEOF___INT64 8
#elif (LZO_ARCH_I386 && (LZO_CC_BORLANDC && (__BORLANDC__ >= 0x0520)))
# define LZO_SIZEOF___INT64 8
#elif (LZO_ARCH_I386 && (LZO_CC_WATCOMC && (__WATCOMC__ >= 1100)))
# define LZO_SIZEOF___INT64 8
#elif (LZO_CC_GHS && defined(__LLONG_BIT) && ((__LLONG_BIT-0) == 64))
# define LZO_SIZEOF_LONG_LONG 8
#elif (LZO_CC_WATCOMC && defined(_INTEGRAL_MAX_BITS) && ((_INTEGRAL_MAX_BITS-0) == 64))
# define LZO_SIZEOF___INT64 8
#elif (LZO_OS_OS400 || defined(__OS400__)) && defined(__LLP64_IFC__)
# define LZO_SIZEOF_LONG_LONG 8
#elif (defined(__vms) || defined(__VMS)) && ((__INITIAL_POINTER_SIZE-0) == 64)
# define LZO_SIZEOF_LONG_LONG 8
#elif (LZO_CC_SDCC) && (LZO_SIZEOF_INT == 2)
#elif 1 && defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L)
# define LZO_SIZEOF_LONG_LONG 8
#endif
#endif
#endif
#if defined(__cplusplus) && (LZO_CC_GNUC)
# if (LZO_CC_GNUC < 0x020800ul)
# undef LZO_SIZEOF_LONG_LONG
# endif
#endif
#if (LZO_CFG_NO_LONG_LONG)
# undef LZO_SIZEOF_LONG_LONG
#elif defined(__NO_LONG_LONG)
# undef LZO_SIZEOF_LONG_LONG
#elif defined(_NO_LONGLONG)
# undef LZO_SIZEOF_LONG_LONG
#endif
#if !defined(LZO_WORDSIZE)
#if (LZO_ARCH_ALPHA)
# define LZO_WORDSIZE 8
#elif (LZO_ARCH_AMD64)
# define LZO_WORDSIZE 8
#elif (LZO_ARCH_AVR)
# define LZO_WORDSIZE 1
#elif (LZO_ARCH_H8300)
# if defined(__NORMAL_MODE__)
# define LZO_WORDSIZE 4
# elif defined(__H8300H__) || defined(__H8300S__) || defined(__H8300SX__)
# define LZO_WORDSIZE 4
# else
# define LZO_WORDSIZE 2
# endif
#elif (LZO_ARCH_I086)
# define LZO_WORDSIZE 2
#elif (LZO_ARCH_IA64)
# define LZO_WORDSIZE 8
#elif (LZO_ARCH_M16C)
# define LZO_WORDSIZE 2
#elif (LZO_ARCH_SPU)
# define LZO_WORDSIZE 4
#elif (LZO_ARCH_Z80)
# define LZO_WORDSIZE 1
#elif (LZO_SIZEOF_LONG == 8) && ((defined(__mips__) && defined(__R5900__)) || defined(__MIPS_PSX2__))
# define LZO_WORDSIZE 8
#elif (LZO_OS_OS400 || defined(__OS400__))
# define LZO_WORDSIZE 8
#elif (defined(__vms) || defined(__VMS)) && (__INITIAL_POINTER_SIZE+0 == 64)
# define LZO_WORDSIZE 8
#endif
#endif
#if !defined(LZO_SIZEOF_VOID_P)
#if defined(__ILP32__) || defined(__ILP32) || defined(_ILP32)
LZO_COMPILE_TIME_ASSERT_HEADER(sizeof(int) == 4)
LZO_COMPILE_TIME_ASSERT_HEADER(sizeof(long) == 4)
# define LZO_SIZEOF_VOID_P 4
#elif defined(__ILP64__) || defined(__ILP64) || defined(_ILP64)
LZO_COMPILE_TIME_ASSERT_HEADER(sizeof(int) == 8)
LZO_COMPILE_TIME_ASSERT_HEADER(sizeof(long) == 8)
# define LZO_SIZEOF_VOID_P 8
#elif defined(__LLP64__) || defined(__LLP64) || defined(_LLP64) || defined(_WIN64)
LZO_COMPILE_TIME_ASSERT_HEADER(sizeof(long) == 4)
# define LZO_SIZEOF_VOID_P 8
#elif defined(__LP64__) || defined(__LP64) || defined(_LP64)
LZO_COMPILE_TIME_ASSERT_HEADER(sizeof(long) == 8)
# define LZO_SIZEOF_VOID_P 8
#elif (LZO_ARCH_AVR)
# define LZO_SIZEOF_VOID_P 2
#elif (LZO_ARCH_C166 || LZO_ARCH_MCS51 || LZO_ARCH_MCS251 || LZO_ARCH_MSP430)
# define LZO_SIZEOF_VOID_P 2
#elif (LZO_ARCH_H8300)
# if defined(__NORMAL_MODE__)
# define LZO_SIZEOF_VOID_P 2
# elif defined(__H8300H__) || defined(__H8300S__) || defined(__H8300SX__)
# define LZO_SIZEOF_VOID_P 4
# else
# define LZO_SIZEOF_VOID_P 2
# endif
# if (LZO_CC_GNUC && (LZO_CC_GNUC < 0x040000ul)) && (LZO_SIZEOF_INT == 4)
# define LZO_SIZEOF_SIZE_T LZO_SIZEOF_INT
# define LZO_SIZEOF_PTRDIFF_T LZO_SIZEOF_INT
# endif
#elif (LZO_ARCH_I086)
# if (LZO_MM_TINY || LZO_MM_SMALL || LZO_MM_MEDIUM)
# define LZO_SIZEOF_VOID_P 2
# elif (LZO_MM_COMPACT || LZO_MM_LARGE || LZO_MM_HUGE)
# define LZO_SIZEOF_VOID_P 4
# else
# error "invalid LZO_ARCH_I086 memory model"
# endif
#elif (LZO_ARCH_M16C)
# if defined(__m32c_cpu__) || defined(__m32cm_cpu__)
# define LZO_SIZEOF_VOID_P 4
# else
# define LZO_SIZEOF_VOID_P 2
# endif
#elif (LZO_ARCH_SPU)
# define LZO_SIZEOF_VOID_P 4
#elif (LZO_ARCH_Z80)
# define LZO_SIZEOF_VOID_P 2
#elif (LZO_SIZEOF_LONG == 8) && ((defined(__mips__) && defined(__R5900__)) || defined(__MIPS_PSX2__))
# define LZO_SIZEOF_VOID_P 4
#elif (LZO_OS_OS400 || defined(__OS400__))
# if defined(__LLP64_IFC__)
# define LZO_SIZEOF_VOID_P 8
# define LZO_SIZEOF_SIZE_T LZO_SIZEOF_LONG
# define LZO_SIZEOF_PTRDIFF_T LZO_SIZEOF_LONG
# else
# define LZO_SIZEOF_VOID_P 16
# define LZO_SIZEOF_SIZE_T LZO_SIZEOF_LONG
# define LZO_SIZEOF_PTRDIFF_T LZO_SIZEOF_LONG
# endif
#elif (defined(__vms) || defined(__VMS)) && (__INITIAL_POINTER_SIZE+0 == 64)
# define LZO_SIZEOF_VOID_P 8
# define LZO_SIZEOF_SIZE_T LZO_SIZEOF_LONG
# define LZO_SIZEOF_PTRDIFF_T LZO_SIZEOF_LONG
#endif
#endif
#if !defined(LZO_SIZEOF_VOID_P)
# define LZO_SIZEOF_VOID_P LZO_SIZEOF_LONG
#endif
LZO_COMPILE_TIME_ASSERT_HEADER(LZO_SIZEOF_VOID_P == sizeof(void *))
#if !defined(LZO_SIZEOF_SIZE_T)
#if (LZO_ARCH_I086 || LZO_ARCH_M16C)
# define LZO_SIZEOF_SIZE_T 2
#endif
#endif
#if !defined(LZO_SIZEOF_SIZE_T)
# define LZO_SIZEOF_SIZE_T LZO_SIZEOF_VOID_P
#endif
#if defined(offsetof)
LZO_COMPILE_TIME_ASSERT_HEADER(LZO_SIZEOF_SIZE_T == sizeof(size_t))
#endif
#if !defined(LZO_SIZEOF_PTRDIFF_T)
#if (LZO_ARCH_I086)
# if (LZO_MM_TINY || LZO_MM_SMALL || LZO_MM_MEDIUM || LZO_MM_HUGE)
# define LZO_SIZEOF_PTRDIFF_T LZO_SIZEOF_VOID_P
# elif (LZO_MM_COMPACT || LZO_MM_LARGE)
# if (LZO_CC_BORLANDC || LZO_CC_TURBOC)
# define LZO_SIZEOF_PTRDIFF_T 4
# else
# define LZO_SIZEOF_PTRDIFF_T 2
# endif
# else
# error "invalid LZO_ARCH_I086 memory model"
# endif
#endif
#endif
#if !defined(LZO_SIZEOF_PTRDIFF_T)
# define LZO_SIZEOF_PTRDIFF_T LZO_SIZEOF_SIZE_T
#endif
#if defined(offsetof)
LZO_COMPILE_TIME_ASSERT_HEADER(LZO_SIZEOF_PTRDIFF_T == sizeof(ptrdiff_t))
#endif
#if !defined(LZO_WORDSIZE)
# define LZO_WORDSIZE LZO_SIZEOF_VOID_P
#endif
#if (LZO_ABI_NEUTRAL_ENDIAN)
# undef LZO_ABI_BIG_ENDIAN
# undef LZO_ABI_LITTLE_ENDIAN
#elif !(LZO_ABI_BIG_ENDIAN) && !(LZO_ABI_LITTLE_ENDIAN)
#if (LZO_ARCH_ALPHA) && (LZO_ARCH_CRAY_MPP)
# define LZO_ABI_BIG_ENDIAN 1
#elif (LZO_ARCH_IA64) && (LZO_OS_POSIX_LINUX || LZO_OS_WIN64)
# define LZO_ABI_LITTLE_ENDIAN 1
#elif (LZO_ARCH_ALPHA || LZO_ARCH_AMD64 || LZO_ARCH_BLACKFIN || LZO_ARCH_CRIS || LZO_ARCH_I086 || LZO_ARCH_I386 || LZO_ARCH_MSP430)
# define LZO_ABI_LITTLE_ENDIAN 1
#elif (LZO_ARCH_AVR32 || LZO_ARCH_M68K || LZO_ARCH_S390 || LZO_ARCH_SPU)
# define LZO_ABI_BIG_ENDIAN 1
#elif 1 && defined(__IAR_SYSTEMS_ICC__) && defined(__LITTLE_ENDIAN__)
# if (__LITTLE_ENDIAN__ == 1)
# define LZO_ABI_LITTLE_ENDIAN 1
# else
# define LZO_ABI_BIG_ENDIAN 1
# endif
#elif 1 && defined(__BIG_ENDIAN__) && !defined(__LITTLE_ENDIAN__)
# define LZO_ABI_BIG_ENDIAN 1
#elif 1 && defined(__LITTLE_ENDIAN__) && !defined(__BIG_ENDIAN__)
# define LZO_ABI_LITTLE_ENDIAN 1
#elif 1 && (LZO_ARCH_ARM) && defined(__ARMEB__) && !defined(__ARMEL__)
# define LZO_ABI_BIG_ENDIAN 1
#elif 1 && (LZO_ARCH_ARM) && defined(__ARMEL__) && !defined(__ARMEB__)
# define LZO_ABI_LITTLE_ENDIAN 1
#elif 1 && (LZO_ARCH_ARM && LZO_CC_ARMCC_ARMCC)
# if defined(__BIG_ENDIAN) && defined(__LITTLE_ENDIAN)
# error "unexpected configuration - check your compiler defines"
# elif defined(__BIG_ENDIAN)
# define LZO_ABI_BIG_ENDIAN 1
# else
# define LZO_ABI_LITTLE_ENDIAN 1
# endif
# define LZO_ABI_LITTLE_ENDIAN 1
#elif 1 && (LZO_ARCH_ARM64) && defined(__AARCH64EB__) && !defined(__AARCH64EL__)
# define LZO_ABI_BIG_ENDIAN 1
#elif 1 && (LZO_ARCH_ARM64) && defined(__AARCH64EL__) && !defined(__AARCH64EB__)
# define LZO_ABI_LITTLE_ENDIAN 1
#elif 1 && (LZO_ARCH_MIPS) && defined(__MIPSEB__) && !defined(__MIPSEL__)
# define LZO_ABI_BIG_ENDIAN 1
#elif 1 && (LZO_ARCH_MIPS) && defined(__MIPSEL__) && !defined(__MIPSEB__)
# define LZO_ABI_LITTLE_ENDIAN 1
#endif
#endif
#if (LZO_ABI_BIG_ENDIAN) && (LZO_ABI_LITTLE_ENDIAN)
# error "unexpected configuration - check your compiler defines"
#endif
#if (LZO_ABI_BIG_ENDIAN)
# define LZO_INFO_ABI_ENDIAN "be"
#elif (LZO_ABI_LITTLE_ENDIAN)
# define LZO_INFO_ABI_ENDIAN "le"
#elif (LZO_ABI_NEUTRAL_ENDIAN)
# define LZO_INFO_ABI_ENDIAN "neutral"
#endif
#if (LZO_SIZEOF_INT == 1 && LZO_SIZEOF_LONG == 2 && LZO_SIZEOF_VOID_P == 2)
# define LZO_ABI_I8LP16 1
# define LZO_INFO_ABI_PM "i8lp16"
#elif (LZO_SIZEOF_INT == 2 && LZO_SIZEOF_LONG == 2 && LZO_SIZEOF_VOID_P == 2)
# define LZO_ABI_ILP16 1
# define LZO_INFO_ABI_PM "ilp16"
#elif (LZO_SIZEOF_INT == 2 && LZO_SIZEOF_LONG == 4 && LZO_SIZEOF_VOID_P == 4)
# define LZO_ABI_LP32 1
# define LZO_INFO_ABI_PM "lp32"
#elif (LZO_SIZEOF_INT == 4 && LZO_SIZEOF_LONG == 4 && LZO_SIZEOF_VOID_P == 4)
# define LZO_ABI_ILP32 1
# define LZO_INFO_ABI_PM "ilp32"
#elif (LZO_SIZEOF_INT == 4 && LZO_SIZEOF_LONG == 4 && LZO_SIZEOF_VOID_P == 8 && LZO_SIZEOF_SIZE_T == 8)
# define LZO_ABI_LLP64 1
# define LZO_INFO_ABI_PM "llp64"
#elif (LZO_SIZEOF_INT == 4 && LZO_SIZEOF_LONG == 8 && LZO_SIZEOF_VOID_P == 8)
# define LZO_ABI_LP64 1
# define LZO_INFO_ABI_PM "lp64"
#elif (LZO_SIZEOF_INT == 8 && LZO_SIZEOF_LONG == 8 && LZO_SIZEOF_VOID_P == 8)
# define LZO_ABI_ILP64 1
# define LZO_INFO_ABI_PM "ilp64"
#elif (LZO_SIZEOF_INT == 4 && LZO_SIZEOF_LONG == 8 && LZO_SIZEOF_VOID_P == 4)
# define LZO_ABI_IP32L64 1
# define LZO_INFO_ABI_PM "ip32l64"
#endif
#if 0
#elif !defined(__LZO_LIBC_OVERRIDE)
#if (LZO_LIBC_NAKED)
# define LZO_INFO_LIBC "naked"
#elif (LZO_LIBC_FREESTANDING)
# define LZO_INFO_LIBC "freestanding"
#elif (LZO_LIBC_MOSTLY_FREESTANDING)
# define LZO_INFO_LIBC "mfreestanding"
#elif (LZO_LIBC_ISOC90)
# define LZO_INFO_LIBC "isoc90"
#elif (LZO_LIBC_ISOC99)
# define LZO_INFO_LIBC "isoc99"
#elif (LZO_CC_ARMCC_ARMCC) && defined(__ARMCLIB_VERSION)
# define LZO_LIBC_ISOC90 1
# define LZO_INFO_LIBC "isoc90"
#elif defined(__dietlibc__)
# define LZO_LIBC_DIETLIBC 1
# define LZO_INFO_LIBC "dietlibc"
#elif defined(_NEWLIB_VERSION)
# define LZO_LIBC_NEWLIB 1
# define LZO_INFO_LIBC "newlib"
#elif defined(__UCLIBC__) && defined(__UCLIBC_MAJOR__) && defined(__UCLIBC_MINOR__)
# if defined(__UCLIBC_SUBLEVEL__)
# define LZO_LIBC_UCLIBC (__UCLIBC_MAJOR__ * 0x10000L + (__UCLIBC_MINOR__-0) * 0x100 + (__UCLIBC_SUBLEVEL__-0))
# else
# define LZO_LIBC_UCLIBC 0x00090bL
# endif
# define LZO_INFO_LIBC "uc" "libc"
#elif defined(__GLIBC__) && defined(__GLIBC_MINOR__)
# define LZO_LIBC_GLIBC (__GLIBC__ * 0x10000L + (__GLIBC_MINOR__-0) * 0x100)
# define LZO_INFO_LIBC "glibc"
#elif (LZO_CC_MWERKS) && defined(__MSL__)
# define LZO_LIBC_MSL __MSL__
# define LZO_INFO_LIBC "msl"
#elif 1 && defined(__IAR_SYSTEMS_ICC__)
# define LZO_LIBC_ISOC90 1
# define LZO_INFO_LIBC "isoc90"
#else
# define LZO_LIBC_DEFAULT 1
# define LZO_INFO_LIBC "default"
#endif
#endif
#if (LZO_ARCH_I386 && (LZO_OS_DOS32 || LZO_OS_WIN32) && (LZO_CC_DMC || LZO_CC_INTELC || LZO_CC_MSC || LZO_CC_PELLESC))
# define LZO_ASM_SYNTAX_MSC 1
#elif (LZO_OS_WIN64 && (LZO_CC_DMC || LZO_CC_INTELC || LZO_CC_MSC || LZO_CC_PELLESC))
#elif (LZO_ARCH_I386 && LZO_CC_GNUC && (LZO_CC_GNUC == 0x011f00ul))
#elif (LZO_ARCH_I386 && (LZO_CC_CLANG || LZO_CC_GNUC || LZO_CC_INTELC || LZO_CC_PATHSCALE))
# define LZO_ASM_SYNTAX_GNUC 1
#elif (LZO_ARCH_AMD64 && (LZO_CC_CLANG || LZO_CC_GNUC || LZO_CC_INTELC || LZO_CC_PATHSCALE))
# define LZO_ASM_SYNTAX_GNUC 1
#elif (LZO_CC_GNUC)
# define LZO_ASM_SYNTAX_GNUC 1
#endif
#if (LZO_ASM_SYNTAX_GNUC)
#if (LZO_ARCH_I386 && LZO_CC_GNUC && (LZO_CC_GNUC < 0x020000ul))
# define __LZO_ASM_CLOBBER "ax"
# define __LZO_ASM_CLOBBER_LIST_CC /*empty*/
# define __LZO_ASM_CLOBBER_LIST_CC_MEMORY /*empty*/
# define __LZO_ASM_CLOBBER_LIST_EMPTY /*empty*/
#elif (LZO_CC_INTELC && (__INTEL_COMPILER < 1000))
# define __LZO_ASM_CLOBBER "memory"
# define __LZO_ASM_CLOBBER_LIST_CC /*empty*/
# define __LZO_ASM_CLOBBER_LIST_CC_MEMORY : "memory"
# define __LZO_ASM_CLOBBER_LIST_EMPTY /*empty*/
#else
# define __LZO_ASM_CLOBBER "cc", "memory"
# define __LZO_ASM_CLOBBER_LIST_CC : "cc"
# define __LZO_ASM_CLOBBER_LIST_CC_MEMORY : "cc", "memory"
# define __LZO_ASM_CLOBBER_LIST_EMPTY /*empty*/
#endif
#endif
#if (LZO_ARCH_ALPHA)
# define LZO_OPT_AVOID_UINT_INDEX 1
#elif (LZO_ARCH_AMD64)
# define LZO_OPT_AVOID_INT_INDEX 1
# define LZO_OPT_AVOID_UINT_INDEX 1
# ifndef LZO_OPT_UNALIGNED16
# define LZO_OPT_UNALIGNED16 1
# endif
# ifndef LZO_OPT_UNALIGNED32
# define LZO_OPT_UNALIGNED32 1
# endif
# ifndef LZO_OPT_UNALIGNED64
# define LZO_OPT_UNALIGNED64 1
# endif
#elif (LZO_ARCH_ARM)
# if defined(__ARM_FEATURE_UNALIGNED)
# ifndef LZO_OPT_UNALIGNED16
# define LZO_OPT_UNALIGNED16 1
# endif
# ifndef LZO_OPT_UNALIGNED32
# define LZO_OPT_UNALIGNED32 1
# endif
# elif defined(__TARGET_ARCH_ARM) && ((__TARGET_ARCH_ARM+0) >= 7)
# ifndef LZO_OPT_UNALIGNED16
# define LZO_OPT_UNALIGNED16 1
# endif
# ifndef LZO_OPT_UNALIGNED32
# define LZO_OPT_UNALIGNED32 1
# endif
# elif defined(__TARGET_ARCH_ARM) && ((__TARGET_ARCH_ARM+0) >= 6) && !defined(__TARGET_PROFILE_M)
# ifndef LZO_OPT_UNALIGNED16
# define LZO_OPT_UNALIGNED16 1
# endif
# ifndef LZO_OPT_UNALIGNED32
# define LZO_OPT_UNALIGNED32 1
# endif
# endif
#elif (LZO_ARCH_ARM64)
# ifndef LZO_OPT_UNALIGNED16
# define LZO_OPT_UNALIGNED16 1
# endif
# ifndef LZO_OPT_UNALIGNED32
# define LZO_OPT_UNALIGNED32 1
# endif
# ifndef LZO_OPT_UNALIGNED64
# define LZO_OPT_UNALIGNED64 1
# endif
#elif (LZO_ARCH_CRIS)
# ifndef LZO_OPT_UNALIGNED16
# define LZO_OPT_UNALIGNED16 1
# endif
# ifndef LZO_OPT_UNALIGNED32
# define LZO_OPT_UNALIGNED32 1
# endif
#elif (LZO_ARCH_I386)
# ifndef LZO_OPT_UNALIGNED16
# define LZO_OPT_UNALIGNED16 1
# endif
# ifndef LZO_OPT_UNALIGNED32
# define LZO_OPT_UNALIGNED32 1
# endif
#elif (LZO_ARCH_IA64)
# define LZO_OPT_AVOID_INT_INDEX 1
# define LZO_OPT_AVOID_UINT_INDEX 1
# define LZO_OPT_PREFER_POSTINC 1
#elif (LZO_ARCH_M68K)
# define LZO_OPT_PREFER_POSTINC 1
# define LZO_OPT_PREFER_PREDEC 1
# if defined(__mc68020__) && !defined(__mcoldfire__)
# ifndef LZO_OPT_UNALIGNED16
# define LZO_OPT_UNALIGNED16 1
# endif
# ifndef LZO_OPT_UNALIGNED32
# define LZO_OPT_UNALIGNED32 1
# endif
# endif
#elif (LZO_ARCH_MIPS)
# define LZO_OPT_AVOID_UINT_INDEX 1
#elif (LZO_ARCH_POWERPC)
# define LZO_OPT_PREFER_PREINC 1
# define LZO_OPT_PREFER_PREDEC 1
# if (LZO_ABI_BIG_ENDIAN)
# ifndef LZO_OPT_UNALIGNED16
# define LZO_OPT_UNALIGNED16 1
# endif
# ifndef LZO_OPT_UNALIGNED32
# define LZO_OPT_UNALIGNED32 1
# endif
# if (LZO_WORDSIZE == 8)
# ifndef LZO_OPT_UNALIGNED64
# define LZO_OPT_UNALIGNED64 1
# endif
# endif
# endif
#elif (LZO_ARCH_S390)
# ifndef LZO_OPT_UNALIGNED16
# define LZO_OPT_UNALIGNED16 1
# endif
# ifndef LZO_OPT_UNALIGNED32
# define LZO_OPT_UNALIGNED32 1
# endif
# if (LZO_WORDSIZE == 8)
# ifndef LZO_OPT_UNALIGNED64
# define LZO_OPT_UNALIGNED64 1
# endif
# endif
#elif (LZO_ARCH_SH)
# define LZO_OPT_PREFER_POSTINC 1
# define LZO_OPT_PREFER_PREDEC 1
#endif
#ifndef LZO_CFG_NO_INLINE_ASM
#if (LZO_ABI_NEUTRAL_ENDIAN) || (LZO_ARCH_GENERIC)
# define LZO_CFG_NO_INLINE_ASM 1
#elif (LZO_CC_LLVM)
# define LZO_CFG_NO_INLINE_ASM 1
#endif
#endif
#if (LZO_CFG_NO_INLINE_ASM)
# undef LZO_ASM_SYNTAX_MSC
# undef LZO_ASM_SYNTAX_GNUC
# undef __LZO_ASM_CLOBBER
# undef __LZO_ASM_CLOBBER_LIST_CC
# undef __LZO_ASM_CLOBBER_LIST_CC_MEMORY
# undef __LZO_ASM_CLOBBER_LIST_EMPTY
#endif
#ifndef LZO_CFG_NO_UNALIGNED
#if (LZO_ABI_NEUTRAL_ENDIAN) || (LZO_ARCH_GENERIC)
# define LZO_CFG_NO_UNALIGNED 1
#endif
#endif
#if (LZO_CFG_NO_UNALIGNED)
# undef LZO_OPT_UNALIGNED16
# undef LZO_OPT_UNALIGNED32
# undef LZO_OPT_UNALIGNED64
#endif
#if defined(__LZO_INFOSTR_MM)
#elif (LZO_MM_FLAT) && (defined(__LZO_INFOSTR_PM) || defined(LZO_INFO_ABI_PM))
# define __LZO_INFOSTR_MM ""
#elif defined(LZO_INFO_MM)
# define __LZO_INFOSTR_MM "." LZO_INFO_MM
#else
# define __LZO_INFOSTR_MM ""
#endif
#if defined(__LZO_INFOSTR_PM)
#elif defined(LZO_INFO_ABI_PM)
# define __LZO_INFOSTR_PM "." LZO_INFO_ABI_PM
#else
# define __LZO_INFOSTR_PM ""
#endif
#if defined(__LZO_INFOSTR_ENDIAN)
#elif defined(LZO_INFO_ABI_ENDIAN)
# define __LZO_INFOSTR_ENDIAN "." LZO_INFO_ABI_ENDIAN
#else
# define __LZO_INFOSTR_ENDIAN ""
#endif
#if defined(__LZO_INFOSTR_OSNAME)
#elif defined(LZO_INFO_OS_CONSOLE)
# define __LZO_INFOSTR_OSNAME LZO_INFO_OS "." LZO_INFO_OS_CONSOLE
#elif defined(LZO_INFO_OS_POSIX)
# define __LZO_INFOSTR_OSNAME LZO_INFO_OS "." LZO_INFO_OS_POSIX
#else
# define __LZO_INFOSTR_OSNAME LZO_INFO_OS
#endif
#if defined(__LZO_INFOSTR_LIBC)
#elif defined(LZO_INFO_LIBC)
# define __LZO_INFOSTR_LIBC "." LZO_INFO_LIBC
#else
# define __LZO_INFOSTR_LIBC ""
#endif
#if defined(__LZO_INFOSTR_CCVER)
#elif defined(LZO_INFO_CCVER)
# define __LZO_INFOSTR_CCVER " " LZO_INFO_CCVER
#else
# define __LZO_INFOSTR_CCVER ""
#endif
#define LZO_INFO_STRING \
LZO_INFO_ARCH __LZO_INFOSTR_MM __LZO_INFOSTR_PM __LZO_INFOSTR_ENDIAN \
" " __LZO_INFOSTR_OSNAME __LZO_INFOSTR_LIBC " " LZO_INFO_CC __LZO_INFOSTR_CCVER
#if !(LZO_CFG_SKIP_LZO_TYPES)
#if (!(LZO_SIZEOF_SHORT+0 > 0 && LZO_SIZEOF_INT+0 > 0 && LZO_SIZEOF_LONG+0 > 0))
# error "missing defines for sizes"
#endif
#if (!(LZO_SIZEOF_PTRDIFF_T+0 > 0 && LZO_SIZEOF_SIZE_T+0 > 0 && LZO_SIZEOF_VOID_P+0 > 0))
# error "missing defines for sizes"
#endif
#if !defined(lzo_llong_t)
#if (LZO_SIZEOF_LONG_LONG+0 > 0)
__lzo_gnuc_extension__ typedef long long lzo_llong_t__;
__lzo_gnuc_extension__ typedef unsigned long long lzo_ullong_t__;
# define lzo_llong_t lzo_llong_t__
# define lzo_ullong_t lzo_ullong_t__
#endif
#endif
#if !defined(lzo_int16e_t)
#if (LZO_SIZEOF_LONG == 2)
# define lzo_int16e_t long
# define lzo_uint16e_t unsigned long
#elif (LZO_SIZEOF_INT == 2)
# define lzo_int16e_t int
# define lzo_uint16e_t unsigned int
#elif (LZO_SIZEOF_SHORT == 2)
# define lzo_int16e_t short int
# define lzo_uint16e_t unsigned short int
#elif 1 && !(LZO_CFG_TYPE_NO_MODE_HI) && (LZO_CC_CLANG || (LZO_CC_GNUC >= 0x025f00ul) || LZO_CC_LLVM)
typedef int lzo_int16e_hi_t__ __attribute__((__mode__(__HI__)));
typedef unsigned int lzo_uint16e_hi_t__ __attribute__((__mode__(__HI__)));
# define lzo_int16e_t lzo_int16e_hi_t__
# define lzo_uint16e_t lzo_uint16e_hi_t__
#elif (LZO_SIZEOF___INT16 == 2)
# define lzo_int16e_t __int16
# define lzo_uint16e_t unsigned __int16
#else
#endif
#endif
#if defined(lzo_int16e_t)
# define LZO_SIZEOF_LZO_INT16E_T 2
LZO_COMPILE_TIME_ASSERT_HEADER(sizeof(lzo_int16e_t) == 2)
LZO_COMPILE_TIME_ASSERT_HEADER(sizeof(lzo_int16e_t) == LZO_SIZEOF_LZO_INT16E_T)
#endif
#if !defined(lzo_int32e_t)
#if (LZO_SIZEOF_LONG == 4)
# define lzo_int32e_t long int
# define lzo_uint32e_t unsigned long int
#elif (LZO_SIZEOF_INT == 4)
# define lzo_int32e_t int
# define lzo_uint32e_t unsigned int
#elif (LZO_SIZEOF_SHORT == 4)
# define lzo_int32e_t short int
# define lzo_uint32e_t unsigned short int
#elif (LZO_SIZEOF_LONG_LONG == 4)
# define lzo_int32e_t lzo_llong_t
# define lzo_uint32e_t lzo_ullong_t
#elif 1 && !(LZO_CFG_TYPE_NO_MODE_SI) && (LZO_CC_CLANG || (LZO_CC_GNUC >= 0x025f00ul) || LZO_CC_LLVM) && (__INT_MAX__+0 > 2147483647L)
typedef int lzo_int32e_si_t__ __attribute__((__mode__(__SI__)));
typedef unsigned int lzo_uint32e_si_t__ __attribute__((__mode__(__SI__)));
# define lzo_int32e_t lzo_int32e_si_t__
# define lzo_uint32e_t lzo_uint32e_si_t__
#elif 1 && !(LZO_CFG_TYPE_NO_MODE_SI) && (LZO_CC_GNUC >= 0x025f00ul) && defined(__AVR__) && (__LONG_MAX__+0 == 32767L)
typedef int lzo_int32e_si_t__ __attribute__((__mode__(__SI__)));
typedef unsigned int lzo_uint32e_si_t__ __attribute__((__mode__(__SI__)));
# define lzo_int32e_t lzo_int32e_si_t__
# define lzo_uint32e_t lzo_uint32e_si_t__
# define LZO_INT32_C(c) (c##LL)
# define LZO_UINT32_C(c) (c##ULL)
#elif (LZO_SIZEOF___INT32 == 4)
# define lzo_int32e_t __int32
# define lzo_uint32e_t unsigned __int32
#else
#endif
#endif
#if defined(lzo_int32e_t)
# define LZO_SIZEOF_LZO_INT32E_T 4
LZO_COMPILE_TIME_ASSERT_HEADER(sizeof(lzo_int32e_t) == 4)
LZO_COMPILE_TIME_ASSERT_HEADER(sizeof(lzo_int32e_t) == LZO_SIZEOF_LZO_INT32E_T)
#endif
#if !defined(lzo_int64e_t)
#if (LZO_SIZEOF___INT64 == 8)
# if (LZO_CC_BORLANDC) && !(LZO_CFG_TYPE_PREFER___INT64)
# define LZO_CFG_TYPE_PREFER___INT64 1
# endif
#endif
#if (LZO_SIZEOF_INT == 8) && (LZO_SIZEOF_INT < LZO_SIZEOF_LONG)
# define lzo_int64e_t int
# define lzo_uint64e_t unsigned int
# define LZO_SIZEOF_LZO_INT64E_T LZO_SIZEOF_INT
#elif (LZO_SIZEOF_LONG == 8)
# define lzo_int64e_t long int
# define lzo_uint64e_t unsigned long int
# define LZO_SIZEOF_LZO_INT64E_T LZO_SIZEOF_LONG
#elif (LZO_SIZEOF_LONG_LONG == 8) && !(LZO_CFG_TYPE_PREFER___INT64)
# define lzo_int64e_t lzo_llong_t
# define lzo_uint64e_t lzo_ullong_t
# if (LZO_CC_BORLANDC)
# define LZO_INT64_C(c) ((c) + 0ll)
# define LZO_UINT64_C(c) ((c) + 0ull)
# elif 0
# define LZO_INT64_C(c) (__lzo_gnuc_extension__ (c##LL))
# define LZO_UINT64_C(c) (__lzo_gnuc_extension__ (c##ULL))
# else
# define LZO_INT64_C(c) (c##LL)
# define LZO_UINT64_C(c) (c##ULL)
# endif
# define LZO_SIZEOF_LZO_INT64E_T LZO_SIZEOF_LONG_LONG
#elif (LZO_SIZEOF___INT64 == 8)
# define lzo_int64e_t __int64
# define lzo_uint64e_t unsigned __int64
# if (LZO_CC_BORLANDC)
# define LZO_INT64_C(c) ((c) + 0i64)
# define LZO_UINT64_C(c) ((c) + 0ui64)
# else
# define LZO_INT64_C(c) (c##i64)
# define LZO_UINT64_C(c) (c##ui64)
# endif
# define LZO_SIZEOF_LZO_INT64E_T LZO_SIZEOF___INT64
#else
#endif
#endif
#if defined(lzo_int64e_t)
LZO_COMPILE_TIME_ASSERT_HEADER(sizeof(lzo_int64e_t) == 8)
LZO_COMPILE_TIME_ASSERT_HEADER(sizeof(lzo_int64e_t) == LZO_SIZEOF_LZO_INT64E_T)
#endif
#if !defined(lzo_int32l_t)
#if defined(lzo_int32e_t)
# define lzo_int32l_t lzo_int32e_t
# define lzo_uint32l_t lzo_uint32e_t
# define LZO_SIZEOF_LZO_INT32L_T LZO_SIZEOF_LZO_INT32E_T
#elif (LZO_SIZEOF_INT >= 4) && (LZO_SIZEOF_INT < LZO_SIZEOF_LONG)
# define lzo_int32l_t int
# define lzo_uint32l_t unsigned int
# define LZO_SIZEOF_LZO_INT32L_T LZO_SIZEOF_INT
#elif (LZO_SIZEOF_LONG >= 4)
# define lzo_int32l_t long int
# define lzo_uint32l_t unsigned long int
# define LZO_SIZEOF_LZO_INT32L_T LZO_SIZEOF_LONG
#else
# error "lzo_int32l_t"
#endif
#endif
#if 1
LZO_COMPILE_TIME_ASSERT_HEADER(sizeof(lzo_int32l_t) >= 4)
LZO_COMPILE_TIME_ASSERT_HEADER(sizeof(lzo_int32l_t) == LZO_SIZEOF_LZO_INT32L_T)
#endif
#if !defined(lzo_int64l_t)
#if defined(lzo_int64e_t)
# define lzo_int64l_t lzo_int64e_t
# define lzo_uint64l_t lzo_uint64e_t
# define LZO_SIZEOF_LZO_INT64L_T LZO_SIZEOF_LZO_INT64E_T
#else
#endif
#endif
#if defined(lzo_int64l_t)
LZO_COMPILE_TIME_ASSERT_HEADER(sizeof(lzo_int64l_t) >= 8)
LZO_COMPILE_TIME_ASSERT_HEADER(sizeof(lzo_int64l_t) == LZO_SIZEOF_LZO_INT64L_T)
#endif
#if !defined(lzo_int32f_t)
#if (LZO_SIZEOF_SIZE_T >= 8)
# define lzo_int32f_t lzo_int64l_t
# define lzo_uint32f_t lzo_uint64l_t
# define LZO_SIZEOF_LZO_INT32F_T LZO_SIZEOF_LZO_INT64L_T
#else
# define lzo_int32f_t lzo_int32l_t
# define lzo_uint32f_t lzo_uint32l_t
# define LZO_SIZEOF_LZO_INT32F_T LZO_SIZEOF_LZO_INT32L_T
#endif
#endif
#if 1
LZO_COMPILE_TIME_ASSERT_HEADER(sizeof(lzo_int32f_t) >= 4)
LZO_COMPILE_TIME_ASSERT_HEADER(sizeof(lzo_int32f_t) == LZO_SIZEOF_LZO_INT32F_T)
#endif
#if !defined(lzo_int64f_t)
#if defined(lzo_int64l_t)
# define lzo_int64f_t lzo_int64l_t
# define lzo_uint64f_t lzo_uint64l_t
# define LZO_SIZEOF_LZO_INT64F_T LZO_SIZEOF_LZO_INT64L_T
#else
#endif
#endif
#if defined(lzo_int64f_t)
LZO_COMPILE_TIME_ASSERT_HEADER(sizeof(lzo_int64f_t) >= 8)
LZO_COMPILE_TIME_ASSERT_HEADER(sizeof(lzo_int64f_t) == LZO_SIZEOF_LZO_INT64F_T)
#endif
#if !defined(lzo_intptr_t)
#if 1 && (LZO_OS_OS400 && (LZO_SIZEOF_VOID_P == 16))
# define __LZO_INTPTR_T_IS_POINTER 1
typedef char* lzo_intptr_t;
typedef char* lzo_uintptr_t;
# define lzo_intptr_t lzo_intptr_t
# define lzo_uintptr_t lzo_uintptr_t
# define LZO_SIZEOF_LZO_INTPTR_T LZO_SIZEOF_VOID_P
#elif (LZO_CC_MSC && (_MSC_VER >= 1300) && (LZO_SIZEOF_VOID_P == 4) && (LZO_SIZEOF_INT == 4))
typedef __w64 int lzo_intptr_t;
typedef __w64 unsigned int lzo_uintptr_t;
# define lzo_intptr_t lzo_intptr_t
# define lzo_uintptr_t lzo_uintptr_t
# define LZO_SIZEOF_LZO_INTPTR_T LZO_SIZEOF_INT
#elif (LZO_SIZEOF_SHORT == LZO_SIZEOF_VOID_P) && (LZO_SIZEOF_INT > LZO_SIZEOF_VOID_P)
# define lzo_intptr_t short
# define lzo_uintptr_t unsigned short
# define LZO_SIZEOF_LZO_INTPTR_T LZO_SIZEOF_SHORT
#elif (LZO_SIZEOF_INT >= LZO_SIZEOF_VOID_P) && (LZO_SIZEOF_INT < LZO_SIZEOF_LONG)
# define lzo_intptr_t int
# define lzo_uintptr_t unsigned int
# define LZO_SIZEOF_LZO_INTPTR_T LZO_SIZEOF_INT
#elif (LZO_SIZEOF_LONG >= LZO_SIZEOF_VOID_P)
# define lzo_intptr_t long
# define lzo_uintptr_t unsigned long
# define LZO_SIZEOF_LZO_INTPTR_T LZO_SIZEOF_LONG
#elif (LZO_SIZEOF_LZO_INT64L_T >= LZO_SIZEOF_VOID_P)
# define lzo_intptr_t lzo_int64l_t
# define lzo_uintptr_t lzo_uint64l_t
# define LZO_SIZEOF_LZO_INTPTR_T LZO_SIZEOF_LZO_INT64L_T
#else
# error "lzo_intptr_t"
#endif
#endif
#if 1
LZO_COMPILE_TIME_ASSERT_HEADER(sizeof(lzo_intptr_t) >= sizeof(void *))
LZO_COMPILE_TIME_ASSERT_HEADER(sizeof(lzo_intptr_t) == sizeof(lzo_uintptr_t))
#endif
#if !defined(lzo_word_t)
#if defined(LZO_WORDSIZE) && (LZO_WORDSIZE+0 > 0)
#if (LZO_WORDSIZE == LZO_SIZEOF_LZO_INTPTR_T) && !(__LZO_INTPTR_T_IS_POINTER)
# define lzo_word_t lzo_uintptr_t
# define lzo_sword_t lzo_intptr_t
# define LZO_SIZEOF_LZO_WORD_T LZO_SIZEOF_LZO_INTPTR_T
#elif (LZO_WORDSIZE == LZO_SIZEOF_LONG)
# define lzo_word_t unsigned long
# define lzo_sword_t long
# define LZO_SIZEOF_LZO_WORD_T LZO_SIZEOF_LONG
#elif (LZO_WORDSIZE == LZO_SIZEOF_INT)
# define lzo_word_t unsigned int
# define lzo_sword_t int
# define LZO_SIZEOF_LZO_WORD_T LZO_SIZEOF_INT
#elif (LZO_WORDSIZE == LZO_SIZEOF_SHORT)
# define lzo_word_t unsigned short
# define lzo_sword_t short
# define LZO_SIZEOF_LZO_WORD_T LZO_SIZEOF_SHORT
#elif (LZO_WORDSIZE == 1)
# define lzo_word_t unsigned char
# define lzo_sword_t signed char
# define LZO_SIZEOF_LZO_WORD_T 1
#elif (LZO_WORDSIZE == LZO_SIZEOF_LZO_INT64L_T)
# define lzo_word_t lzo_uint64l_t
# define lzo_sword_t lzo_int64l_t
# define LZO_SIZEOF_LZO_WORD_T LZO_SIZEOF_LZO_INT64L_T
#elif (LZO_ARCH_SPU) && (LZO_CC_GNUC)
#if 0
typedef unsigned lzo_word_t __attribute__((__mode__(__V16QI__)));
typedef int lzo_sword_t __attribute__((__mode__(__V16QI__)));
# define lzo_word_t lzo_word_t
# define lzo_sword_t lzo_sword_t
# define LZO_SIZEOF_LZO_WORD_T 16
#endif
#else
# error "lzo_word_t"
#endif
#endif
#endif
#if 1 && defined(lzo_word_t)
LZO_COMPILE_TIME_ASSERT_HEADER(sizeof(lzo_word_t) == LZO_WORDSIZE)
LZO_COMPILE_TIME_ASSERT_HEADER(sizeof(lzo_sword_t) == LZO_WORDSIZE)
#endif
#if 1
#define lzo_int8_t signed char
#define lzo_uint8_t unsigned char
#define LZO_SIZEOF_LZO_INT8_T 1
LZO_COMPILE_TIME_ASSERT_HEADER(sizeof(lzo_int8_t) == 1)
LZO_COMPILE_TIME_ASSERT_HEADER(sizeof(lzo_int8_t) == sizeof(lzo_uint8_t))
#endif
#if defined(lzo_int16e_t)
#define lzo_int16_t lzo_int16e_t
#define lzo_uint16_t lzo_uint16e_t
#define LZO_SIZEOF_LZO_INT16_T LZO_SIZEOF_LZO_INT16E_T
LZO_COMPILE_TIME_ASSERT_HEADER(sizeof(lzo_int16_t) == 2)
LZO_COMPILE_TIME_ASSERT_HEADER(sizeof(lzo_int16_t) == sizeof(lzo_uint16_t))
#endif
#if defined(lzo_int32e_t)
#define lzo_int32_t lzo_int32e_t
#define lzo_uint32_t lzo_uint32e_t
#define LZO_SIZEOF_LZO_INT32_T LZO_SIZEOF_LZO_INT32E_T
LZO_COMPILE_TIME_ASSERT_HEADER(sizeof(lzo_int32_t) == 4)
LZO_COMPILE_TIME_ASSERT_HEADER(sizeof(lzo_int32_t) == sizeof(lzo_uint32_t))
#endif
#if defined(lzo_int64e_t)
#define lzo_int64_t lzo_int64e_t
#define lzo_uint64_t lzo_uint64e_t
#define LZO_SIZEOF_LZO_INT64_T LZO_SIZEOF_LZO_INT64E_T
LZO_COMPILE_TIME_ASSERT_HEADER(sizeof(lzo_int64_t) == 8)
LZO_COMPILE_TIME_ASSERT_HEADER(sizeof(lzo_int64_t) == sizeof(lzo_uint64_t))
#endif
#if 1
#define lzo_int_least32_t lzo_int32l_t
#define lzo_uint_least32_t lzo_uint32l_t
#define LZO_SIZEOF_LZO_INT_LEAST32_T LZO_SIZEOF_LZO_INT32L_T
LZO_COMPILE_TIME_ASSERT_HEADER(sizeof(lzo_int_least32_t) >= 4)
LZO_COMPILE_TIME_ASSERT_HEADER(sizeof(lzo_int_least32_t) == sizeof(lzo_uint_least32_t))
#endif
#if defined(lzo_int64l_t)
#define lzo_int_least64_t lzo_int64l_t
#define lzo_uint_least64_t lzo_uint64l_t
#define LZO_SIZEOF_LZO_INT_LEAST64_T LZO_SIZEOF_LZO_INT64L_T
LZO_COMPILE_TIME_ASSERT_HEADER(sizeof(lzo_int_least64_t) >= 8)
LZO_COMPILE_TIME_ASSERT_HEADER(sizeof(lzo_int_least64_t) == sizeof(lzo_uint_least64_t))
#endif
#if 1
#define lzo_int_fast32_t lzo_int32f_t
#define lzo_uint_fast32_t lzo_uint32f_t
#define LZO_SIZEOF_LZO_INT_FAST32_T LZO_SIZEOF_LZO_INT32F_T
LZO_COMPILE_TIME_ASSERT_HEADER(sizeof(lzo_int_fast32_t) >= 4)
LZO_COMPILE_TIME_ASSERT_HEADER(sizeof(lzo_int_fast32_t) == sizeof(lzo_uint_fast32_t))
#endif
#if defined(lzo_int64f_t)
#define lzo_int_fast64_t lzo_int64f_t
#define lzo_uint_fast64_t lzo_uint64f_t
#define LZO_SIZEOF_LZO_INT_FAST64_T LZO_SIZEOF_LZO_INT64F_T
LZO_COMPILE_TIME_ASSERT_HEADER(sizeof(lzo_int_fast64_t) >= 8)
LZO_COMPILE_TIME_ASSERT_HEADER(sizeof(lzo_int_fast64_t) == sizeof(lzo_uint_fast64_t))
#endif
#if !defined(LZO_INT16_C)
# if (LZO_BROKEN_INTEGRAL_CONSTANTS) && (LZO_SIZEOF_INT >= 2)
# define LZO_INT16_C(c) ((c) + 0)
# define LZO_UINT16_C(c) ((c) + 0U)
# elif (LZO_BROKEN_INTEGRAL_CONSTANTS) && (LZO_SIZEOF_LONG >= 2)
# define LZO_INT16_C(c) ((c) + 0L)
# define LZO_UINT16_C(c) ((c) + 0UL)
# elif (LZO_SIZEOF_INT >= 2)
# define LZO_INT16_C(c) (c)
# define LZO_UINT16_C(c) (c##U)
# elif (LZO_SIZEOF_LONG >= 2)
# define LZO_INT16_C(c) (c##L)
# define LZO_UINT16_C(c) (c##UL)
# else
# error "LZO_INT16_C"
# endif
#endif
#if !defined(LZO_INT32_C)
# if (LZO_BROKEN_INTEGRAL_CONSTANTS) && (LZO_SIZEOF_INT >= 4)
# define LZO_INT32_C(c) ((c) + 0)
# define LZO_UINT32_C(c) ((c) + 0U)
# elif (LZO_BROKEN_INTEGRAL_CONSTANTS) && (LZO_SIZEOF_LONG >= 4)
# define LZO_INT32_C(c) ((c) + 0L)
# define LZO_UINT32_C(c) ((c) + 0UL)
# elif (LZO_SIZEOF_INT >= 4)
# define LZO_INT32_C(c) (c)
# define LZO_UINT32_C(c) (c##U)
# elif (LZO_SIZEOF_LONG >= 4)
# define LZO_INT32_C(c) (c##L)
# define LZO_UINT32_C(c) (c##UL)
# elif (LZO_SIZEOF_LONG_LONG >= 4)
# define LZO_INT32_C(c) (c##LL)
# define LZO_UINT32_C(c) (c##ULL)
# else
# error "LZO_INT32_C"
# endif
#endif
#if !defined(LZO_INT64_C) && defined(lzo_int64l_t)
# if (LZO_BROKEN_INTEGRAL_CONSTANTS) && (LZO_SIZEOF_INT >= 8)
# define LZO_INT64_C(c) ((c) + 0)
# define LZO_UINT64_C(c) ((c) + 0U)
# elif (LZO_BROKEN_INTEGRAL_CONSTANTS) && (LZO_SIZEOF_LONG >= 8)
# define LZO_INT64_C(c) ((c) + 0L)
# define LZO_UINT64_C(c) ((c) + 0UL)
# elif (LZO_SIZEOF_INT >= 8)
# define LZO_INT64_C(c) (c)
# define LZO_UINT64_C(c) (c##U)
# elif (LZO_SIZEOF_LONG >= 8)
# define LZO_INT64_C(c) (c##L)
# define LZO_UINT64_C(c) (c##UL)
# else
# error "LZO_INT64_C"
# endif
#endif
#endif
#endif
#endif
#undef LZO_HAVE_CONFIG_H
#include "minilzo.h"
#if !defined(MINILZO_VERSION) || (MINILZO_VERSION != 0x2080)
# error "version mismatch in miniLZO source files"
#endif
#ifdef MINILZO_HAVE_CONFIG_H
# define LZO_HAVE_CONFIG_H 1
#endif
#ifndef __LZO_CONF_H
#define __LZO_CONF_H 1
#if !defined(__LZO_IN_MINILZO)
#if defined(LZO_CFG_FREESTANDING) && (LZO_CFG_FREESTANDING)
# define LZO_LIBC_FREESTANDING 1
# define LZO_OS_FREESTANDING 1
#endif
#if defined(LZO_CFG_EXTRA_CONFIG_HEADER)
# include LZO_CFG_EXTRA_CONFIG_HEADER
#endif
#if defined(__LZOCONF_H) || defined(__LZOCONF_H_INCLUDED)
# error "include this file first"
#endif
#include "lzo/lzoconf.h"
#if defined(LZO_CFG_EXTRA_CONFIG_HEADER2)
# include LZO_CFG_EXTRA_CONFIG_HEADER2
#endif
#endif
#if (LZO_VERSION < 0x2080) || !defined(__LZOCONF_H_INCLUDED)
# error "version mismatch"
#endif
#if (LZO_CC_MSC && (_MSC_VER >= 1000 && _MSC_VER < 1100))
# pragma warning(disable: 4702)
#endif
#if (LZO_CC_MSC && (_MSC_VER >= 1000))
# pragma warning(disable: 4127 4701)
# pragma warning(disable: 4514 4710 4711)
#endif
#if (LZO_CC_MSC && (_MSC_VER >= 1300))
# pragma warning(disable: 4820)
#endif
#if (LZO_CC_MSC && (_MSC_VER >= 1800))
# pragma warning(disable: 4746)
#endif
#if (LZO_CC_SUNPROC)
#if !defined(__cplusplus)
# pragma error_messages(off,E_END_OF_LOOP_CODE_NOT_REACHED)
# pragma error_messages(off,E_LOOP_NOT_ENTERED_AT_TOP)
# pragma error_messages(off,E_STATEMENT_NOT_REACHED)
#endif
#endif
#if defined(__LZO_IN_MINILZO) || (LZO_CFG_FREESTANDING)
#elif 1
# include <string.h>
#else
# define LZO_WANT_ACC_INCD_H 1
#endif
#if defined(LZO_HAVE_CONFIG_H)
# define LZO_CFG_NO_CONFIG_HEADER 1
#endif
#if 1 && !defined(LZO_CFG_FREESTANDING)
#if 1 && !defined(HAVE_STRING_H)
#define HAVE_STRING_H 1
#endif
#if 1 && !defined(HAVE_MEMCMP)
#define HAVE_MEMCMP 1
#endif
#if 1 && !defined(HAVE_MEMCPY)
#define HAVE_MEMCPY 1
#endif
#if 1 && !defined(HAVE_MEMMOVE)
#define HAVE_MEMMOVE 1
#endif
#if 1 && !defined(HAVE_MEMSET)
#define HAVE_MEMSET 1
#endif
#endif
#if 1 && defined(HAVE_STRING_H)
#include <string.h>
#endif
#if 1 || defined(lzo_int8_t) || defined(lzo_uint8_t)
LZO_COMPILE_TIME_ASSERT_HEADER(sizeof(lzo_int8_t) == 1)
LZO_COMPILE_TIME_ASSERT_HEADER(sizeof(lzo_uint8_t) == 1)
#endif
#if 1 || defined(lzo_int16_t) || defined(lzo_uint16_t)
LZO_COMPILE_TIME_ASSERT_HEADER(sizeof(lzo_int16_t) == 2)
LZO_COMPILE_TIME_ASSERT_HEADER(sizeof(lzo_uint16_t) == 2)
#endif
#if 1 || defined(lzo_int32_t) || defined(lzo_uint32_t)
LZO_COMPILE_TIME_ASSERT_HEADER(sizeof(lzo_int32_t) == 4)
LZO_COMPILE_TIME_ASSERT_HEADER(sizeof(lzo_uint32_t) == 4)
#endif
#if defined(lzo_int64_t) || defined(lzo_uint64_t)
LZO_COMPILE_TIME_ASSERT_HEADER(sizeof(lzo_int64_t) == 8)
LZO_COMPILE_TIME_ASSERT_HEADER(sizeof(lzo_uint64_t) == 8)
#endif
#if (LZO_CFG_FREESTANDING)
# undef HAVE_MEMCMP
# undef HAVE_MEMCPY
# undef HAVE_MEMMOVE
# undef HAVE_MEMSET
#endif
#if !(HAVE_MEMCMP)
# undef memcmp
# define memcmp(a,b,c) lzo_memcmp(a,b,c)
#else
# undef lzo_memcmp
# define lzo_memcmp(a,b,c) memcmp(a,b,c)
#endif
#if !(HAVE_MEMCPY)
# undef memcpy
# define memcpy(a,b,c) lzo_memcpy(a,b,c)
#else
# undef lzo_memcpy
# define lzo_memcpy(a,b,c) memcpy(a,b,c)
#endif
#if !(HAVE_MEMMOVE)
# undef memmove
# define memmove(a,b,c) lzo_memmove(a,b,c)
#else
# undef lzo_memmove
# define lzo_memmove(a,b,c) memmove(a,b,c)
#endif
#if !(HAVE_MEMSET)
# undef memset
# define memset(a,b,c) lzo_memset(a,b,c)
#else
# undef lzo_memset
# define lzo_memset(a,b,c) memset(a,b,c)
#endif
#undef NDEBUG
#if (LZO_CFG_FREESTANDING)
# undef LZO_DEBUG
# define NDEBUG 1
# undef assert
# define assert(e) ((void)0)
#else
# if !defined(LZO_DEBUG)
# define NDEBUG 1
# endif
# include <assert.h>
#endif
#if 0 && defined(__BOUNDS_CHECKING_ON)
# include <unchecked.h>
#else
# define BOUNDS_CHECKING_OFF_DURING(stmt) stmt
# define BOUNDS_CHECKING_OFF_IN_EXPR(expr) (expr)
#endif
#if (LZO_CFG_PGO)
# undef __lzo_likely
# undef __lzo_unlikely
# define __lzo_likely(e) (e)
# define __lzo_unlikely(e) (e)
#endif
#undef _
#undef __
#undef ___
#undef ____
#undef _p0
#undef _p1
#undef _p2
#undef _p3
#undef _p4
#undef _s0
#undef _s1
#undef _s2
#undef _s3
#undef _s4
#undef _ww
#if 1
# define LZO_BYTE(x) ((unsigned char) (x))
#else
# define LZO_BYTE(x) ((unsigned char) ((x) & 0xff))
#endif
#define LZO_MAX(a,b) ((a) >= (b) ? (a) : (b))
#define LZO_MIN(a,b) ((a) <= (b) ? (a) : (b))
#define LZO_MAX3(a,b,c) ((a) >= (b) ? LZO_MAX(a,c) : LZO_MAX(b,c))
#define LZO_MIN3(a,b,c) ((a) <= (b) ? LZO_MIN(a,c) : LZO_MIN(b,c))
#define lzo_sizeof(type) ((lzo_uint) (sizeof(type)))
#define LZO_HIGH(array) ((lzo_uint) (sizeof(array)/sizeof(*(array))))
#define LZO_SIZE(bits) (1u << (bits))
#define LZO_MASK(bits) (LZO_SIZE(bits) - 1)
#define LZO_USIZE(bits) ((lzo_uint) 1 << (bits))
#define LZO_UMASK(bits) (LZO_USIZE(bits) - 1)
#if !defined(DMUL)
#if 0
# define DMUL(a,b) ((lzo_xint) ((lzo_uint32_t)(a) * (lzo_uint32_t)(b)))
#else
# define DMUL(a,b) ((lzo_xint) ((a) * (b)))
#endif
#endif
#ifndef __LZO_FUNC_H
#define __LZO_FUNC_H 1
#if !defined(LZO_BITOPS_USE_ASM_BITSCAN) && !defined(LZO_BITOPS_USE_GNUC_BITSCAN) && !defined(LZO_BITOPS_USE_MSC_BITSCAN)
#if 1 && (LZO_ARCH_AMD64) && (LZO_CC_GNUC && (LZO_CC_GNUC < 0x040000ul)) && (LZO_ASM_SYNTAX_GNUC)
#define LZO_BITOPS_USE_ASM_BITSCAN 1
#elif (LZO_CC_CLANG || (LZO_CC_GNUC >= 0x030400ul) || (LZO_CC_INTELC_GNUC && (__INTEL_COMPILER >= 1000)) || (LZO_CC_LLVM && (!defined(__llvm_tools_version__) || (__llvm_tools_version__+0 >= 0x010500ul))))
#define LZO_BITOPS_USE_GNUC_BITSCAN 1
#elif (LZO_OS_WIN32 || LZO_OS_WIN64) && ((LZO_CC_INTELC_MSC && (__INTEL_COMPILER >= 1010)) || (LZO_CC_MSC && (_MSC_VER >= 1400)))
#define LZO_BITOPS_USE_MSC_BITSCAN 1
#if (LZO_CC_MSC) && (LZO_ARCH_AMD64 || LZO_ARCH_I386)
#include <intrin.h>
#endif
#if (LZO_CC_MSC) && (LZO_ARCH_AMD64 || LZO_ARCH_I386)
#pragma intrinsic(_BitScanReverse)
#pragma intrinsic(_BitScanForward)
#endif
#if (LZO_CC_MSC) && (LZO_ARCH_AMD64)
#pragma intrinsic(_BitScanReverse64)
#pragma intrinsic(_BitScanForward64)
#endif
#endif
#endif
__lzo_static_forceinline unsigned lzo_bitops_ctlz32_func(lzo_uint32_t v)
{
#if (LZO_BITOPS_USE_MSC_BITSCAN) && (LZO_ARCH_AMD64 || LZO_ARCH_I386)
unsigned long r; (void) _BitScanReverse(&r, v); return (unsigned) r ^ 31;
#define lzo_bitops_ctlz32(v) lzo_bitops_ctlz32_func(v)
#elif (LZO_BITOPS_USE_ASM_BITSCAN) && (LZO_ARCH_AMD64 || LZO_ARCH_I386) && (LZO_ASM_SYNTAX_GNUC)
lzo_uint32_t r;
__asm__("bsr %1,%0" : "=r" (r) : "rm" (v) __LZO_ASM_CLOBBER_LIST_CC);
return (unsigned) r ^ 31;
#define lzo_bitops_ctlz32(v) lzo_bitops_ctlz32_func(v)
#elif (LZO_BITOPS_USE_GNUC_BITSCAN) && (LZO_SIZEOF_INT == 4)
unsigned r; r = (unsigned) __builtin_clz(v); return r;
#define lzo_bitops_ctlz32(v) ((unsigned) __builtin_clz(v))
#else
LZO_UNUSED(v); return 0;
#endif
}
#if defined(lzo_uint64_t)
__lzo_static_forceinline unsigned lzo_bitops_ctlz64_func(lzo_uint64_t v)
{
#if (LZO_BITOPS_USE_MSC_BITSCAN) && (LZO_ARCH_AMD64)
unsigned long r; (void) _BitScanReverse64(&r, v); return (unsigned) r ^ 63;
#define lzo_bitops_ctlz64(v) lzo_bitops_ctlz64_func(v)
#elif (LZO_BITOPS_USE_ASM_BITSCAN) && (LZO_ARCH_AMD64) && (LZO_ASM_SYNTAX_GNUC)
lzo_uint64_t r;
__asm__("bsr %1,%0" : "=r" (r) : "rm" (v) __LZO_ASM_CLOBBER_LIST_CC);
return (unsigned) r ^ 63;
#define lzo_bitops_ctlz64(v) lzo_bitops_ctlz64_func(v)
#elif (LZO_BITOPS_USE_GNUC_BITSCAN) && (LZO_SIZEOF_LONG == 8) && (LZO_WORDSIZE >= 8)
unsigned r; r = (unsigned) __builtin_clzl(v); return r;
#define lzo_bitops_ctlz64(v) ((unsigned) __builtin_clzl(v))
#elif (LZO_BITOPS_USE_GNUC_BITSCAN) && (LZO_SIZEOF_LONG_LONG == 8) && (LZO_WORDSIZE >= 8)
unsigned r; r = (unsigned) __builtin_clzll(v); return r;
#define lzo_bitops_ctlz64(v) ((unsigned) __builtin_clzll(v))
#else
LZO_UNUSED(v); return 0;
#endif
}
#endif
__lzo_static_forceinline unsigned lzo_bitops_cttz32_func(lzo_uint32_t v)
{
#if (LZO_BITOPS_USE_MSC_BITSCAN) && (LZO_ARCH_AMD64 || LZO_ARCH_I386)
unsigned long r; (void) _BitScanForward(&r, v); return (unsigned) r;
#define lzo_bitops_cttz32(v) lzo_bitops_cttz32_func(v)
#elif (LZO_BITOPS_USE_ASM_BITSCAN) && (LZO_ARCH_AMD64 || LZO_ARCH_I386) && (LZO_ASM_SYNTAX_GNUC)
lzo_uint32_t r;
__asm__("bsf %1,%0" : "=r" (r) : "rm" (v) __LZO_ASM_CLOBBER_LIST_CC);
return (unsigned) r;
#define lzo_bitops_cttz32(v) lzo_bitops_cttz32_func(v)
#elif (LZO_BITOPS_USE_GNUC_BITSCAN) && (LZO_SIZEOF_INT >= 4)
unsigned r; r = (unsigned) __builtin_ctz(v); return r;
#define lzo_bitops_cttz32(v) ((unsigned) __builtin_ctz(v))
#else
LZO_UNUSED(v); return 0;
#endif
}
#if defined(lzo_uint64_t)
__lzo_static_forceinline unsigned lzo_bitops_cttz64_func(lzo_uint64_t v)
{
#if (LZO_BITOPS_USE_MSC_BITSCAN) && (LZO_ARCH_AMD64)
unsigned long r; (void) _BitScanForward64(&r, v); return (unsigned) r;
#define lzo_bitops_cttz64(v) lzo_bitops_cttz64_func(v)
#elif (LZO_BITOPS_USE_ASM_BITSCAN) && (LZO_ARCH_AMD64) && (LZO_ASM_SYNTAX_GNUC)
lzo_uint64_t r;
__asm__("bsf %1,%0" : "=r" (r) : "rm" (v) __LZO_ASM_CLOBBER_LIST_CC);
return (unsigned) r;
#define lzo_bitops_cttz64(v) lzo_bitops_cttz64_func(v)
#elif (LZO_BITOPS_USE_GNUC_BITSCAN) && (LZO_SIZEOF_LONG >= 8) && (LZO_WORDSIZE >= 8)
unsigned r; r = (unsigned) __builtin_ctzl(v); return r;
#define lzo_bitops_cttz64(v) ((unsigned) __builtin_ctzl(v))
#elif (LZO_BITOPS_USE_GNUC_BITSCAN) && (LZO_SIZEOF_LONG_LONG >= 8) && (LZO_WORDSIZE >= 8)
unsigned r; r = (unsigned) __builtin_ctzll(v); return r;
#define lzo_bitops_cttz64(v) ((unsigned) __builtin_ctzll(v))
#else
LZO_UNUSED(v); return 0;
#endif
}
#endif
#if 1 && (LZO_CC_ARMCC_GNUC || LZO_CC_CLANG || (LZO_CC_GNUC >= 0x020700ul) || LZO_CC_INTELC_GNUC || LZO_CC_LLVM || LZO_CC_PATHSCALE || LZO_CC_PGI)
static void __attribute__((__unused__))
#else
__lzo_static_forceinline void
#endif
lzo_bitops_unused_funcs(void)
{
LZO_UNUSED_FUNC(lzo_bitops_ctlz32_func);
LZO_UNUSED_FUNC(lzo_bitops_cttz32_func);
#if defined(lzo_uint64_t)
LZO_UNUSED_FUNC(lzo_bitops_ctlz64_func);
LZO_UNUSED_FUNC(lzo_bitops_cttz64_func);
#endif
LZO_UNUSED_FUNC(lzo_bitops_unused_funcs);
}
#if defined(__lzo_alignof) && !(LZO_CFG_NO_UNALIGNED)
#ifndef __lzo_memops_tcheck
#define __lzo_memops_tcheck(t,a,b) ((void)0, sizeof(t) == (a) && __lzo_alignof(t) == (b))
#endif
#endif
#ifndef lzo_memops_TU0p
#define lzo_memops_TU0p void __LZO_MMODEL *
#endif
#ifndef lzo_memops_TU1p
#define lzo_memops_TU1p unsigned char __LZO_MMODEL *
#endif
#ifndef lzo_memops_TU2p
#if (LZO_OPT_UNALIGNED16)
typedef lzo_uint16_t __lzo_may_alias lzo_memops_TU2;
#define lzo_memops_TU2p volatile lzo_memops_TU2 *
#elif defined(__lzo_byte_struct)
__lzo_byte_struct(lzo_memops_TU2_struct,2)
typedef struct lzo_memops_TU2_struct lzo_memops_TU2;
#else
struct lzo_memops_TU2_struct { unsigned char a[2]; } __lzo_may_alias;
typedef struct lzo_memops_TU2_struct lzo_memops_TU2;
#endif
#ifndef lzo_memops_TU2p
#define lzo_memops_TU2p lzo_memops_TU2 *
#endif
#endif
#ifndef lzo_memops_TU4p
#if (LZO_OPT_UNALIGNED32)
typedef lzo_uint32_t __lzo_may_alias lzo_memops_TU4;
#define lzo_memops_TU4p volatile lzo_memops_TU4 __LZO_MMODEL *
#elif defined(__lzo_byte_struct)
__lzo_byte_struct(lzo_memops_TU4_struct,4)
typedef struct lzo_memops_TU4_struct lzo_memops_TU4;
#else
struct lzo_memops_TU4_struct { unsigned char a[4]; } __lzo_may_alias;
typedef struct lzo_memops_TU4_struct lzo_memops_TU4;
#endif
#ifndef lzo_memops_TU4p
#define lzo_memops_TU4p lzo_memops_TU4 __LZO_MMODEL *
#endif
#endif
#ifndef lzo_memops_TU8p
#if (LZO_OPT_UNALIGNED64)
typedef lzo_uint64_t __lzo_may_alias lzo_memops_TU8;
#define lzo_memops_TU8p volatile lzo_memops_TU8 __LZO_MMODEL *
#elif defined(__lzo_byte_struct)
__lzo_byte_struct(lzo_memops_TU8_struct,8)
typedef struct lzo_memops_TU8_struct lzo_memops_TU8;
#else
struct lzo_memops_TU8_struct { unsigned char a[8]; } __lzo_may_alias;
typedef struct lzo_memops_TU8_struct lzo_memops_TU8;
#endif
#ifndef lzo_memops_TU8p
#define lzo_memops_TU8p lzo_memops_TU8 __LZO_MMODEL *
#endif
#endif
#ifndef lzo_memops_set_TU1p
#define lzo_memops_set_TU1p volatile lzo_memops_TU1p
#endif
#ifndef lzo_memops_move_TU1p
#define lzo_memops_move_TU1p lzo_memops_TU1p
#endif
#define LZO_MEMOPS_SET1(dd,cc) \
LZO_BLOCK_BEGIN \
lzo_memops_set_TU1p d__1 = (lzo_memops_set_TU1p) (lzo_memops_TU0p) (dd); \
d__1[0] = LZO_BYTE(cc); \
LZO_BLOCK_END
#define LZO_MEMOPS_SET2(dd,cc) \
LZO_BLOCK_BEGIN \
lzo_memops_set_TU1p d__2 = (lzo_memops_set_TU1p) (lzo_memops_TU0p) (dd); \
d__2[0] = LZO_BYTE(cc); d__2[1] = LZO_BYTE(cc); \
LZO_BLOCK_END
#define LZO_MEMOPS_SET3(dd,cc) \
LZO_BLOCK_BEGIN \
lzo_memops_set_TU1p d__3 = (lzo_memops_set_TU1p) (lzo_memops_TU0p) (dd); \
d__3[0] = LZO_BYTE(cc); d__3[1] = LZO_BYTE(cc); d__3[2] = LZO_BYTE(cc); \
LZO_BLOCK_END
#define LZO_MEMOPS_SET4(dd,cc) \
LZO_BLOCK_BEGIN \
lzo_memops_set_TU1p d__4 = (lzo_memops_set_TU1p) (lzo_memops_TU0p) (dd); \
d__4[0] = LZO_BYTE(cc); d__4[1] = LZO_BYTE(cc); d__4[2] = LZO_BYTE(cc); d__4[3] = LZO_BYTE(cc); \
LZO_BLOCK_END
#define LZO_MEMOPS_MOVE1(dd,ss) \
LZO_BLOCK_BEGIN \
lzo_memops_move_TU1p d__1 = (lzo_memops_move_TU1p) (lzo_memops_TU0p) (dd); \
const lzo_memops_move_TU1p s__1 = (const lzo_memops_move_TU1p) (const lzo_memops_TU0p) (ss); \
d__1[0] = s__1[0]; \
LZO_BLOCK_END
#define LZO_MEMOPS_MOVE2(dd,ss) \
LZO_BLOCK_BEGIN \
lzo_memops_move_TU1p d__2 = (lzo_memops_move_TU1p) (lzo_memops_TU0p) (dd); \
const lzo_memops_move_TU1p s__2 = (const lzo_memops_move_TU1p) (const lzo_memops_TU0p) (ss); \
d__2[0] = s__2[0]; d__2[1] = s__2[1]; \
LZO_BLOCK_END
#define LZO_MEMOPS_MOVE3(dd,ss) \
LZO_BLOCK_BEGIN \
lzo_memops_move_TU1p d__3 = (lzo_memops_move_TU1p) (lzo_memops_TU0p) (dd); \
const lzo_memops_move_TU1p s__3 = (const lzo_memops_move_TU1p) (const lzo_memops_TU0p) (ss); \
d__3[0] = s__3[0]; d__3[1] = s__3[1]; d__3[2] = s__3[2]; \
LZO_BLOCK_END
#define LZO_MEMOPS_MOVE4(dd,ss) \
LZO_BLOCK_BEGIN \
lzo_memops_move_TU1p d__4 = (lzo_memops_move_TU1p) (lzo_memops_TU0p) (dd); \
const lzo_memops_move_TU1p s__4 = (const lzo_memops_move_TU1p) (const lzo_memops_TU0p) (ss); \
d__4[0] = s__4[0]; d__4[1] = s__4[1]; d__4[2] = s__4[2]; d__4[3] = s__4[3]; \
LZO_BLOCK_END
#define LZO_MEMOPS_MOVE8(dd,ss) \
LZO_BLOCK_BEGIN \
lzo_memops_move_TU1p d__8 = (lzo_memops_move_TU1p) (lzo_memops_TU0p) (dd); \
const lzo_memops_move_TU1p s__8 = (const lzo_memops_move_TU1p) (const lzo_memops_TU0p) (ss); \
d__8[0] = s__8[0]; d__8[1] = s__8[1]; d__8[2] = s__8[2]; d__8[3] = s__8[3]; \
d__8[4] = s__8[4]; d__8[5] = s__8[5]; d__8[6] = s__8[6]; d__8[7] = s__8[7]; \
LZO_BLOCK_END
LZO_COMPILE_TIME_ASSERT_HEADER(sizeof(*(lzo_memops_TU1p)0)==1)
#define LZO_MEMOPS_COPY1(dd,ss) LZO_MEMOPS_MOVE1(dd,ss)
#if (LZO_OPT_UNALIGNED16)
LZO_COMPILE_TIME_ASSERT_HEADER(sizeof(*(lzo_memops_TU2p)0)==2)
#define LZO_MEMOPS_COPY2(dd,ss) \
* (lzo_memops_TU2p) (lzo_memops_TU0p) (dd) = * (const lzo_memops_TU2p) (const lzo_memops_TU0p) (ss)
#elif defined(__lzo_memops_tcheck)
#define LZO_MEMOPS_COPY2(dd,ss) \
LZO_BLOCK_BEGIN if (__lzo_memops_tcheck(lzo_memops_TU2,2,1)) { \
* (lzo_memops_TU2p) (lzo_memops_TU0p) (dd) = * (const lzo_memops_TU2p) (const lzo_memops_TU0p) (ss); \
} else { LZO_MEMOPS_MOVE2(dd,ss); } LZO_BLOCK_END
#else
#define LZO_MEMOPS_COPY2(dd,ss) LZO_MEMOPS_MOVE2(dd,ss)
#endif
#if (LZO_OPT_UNALIGNED32)
LZO_COMPILE_TIME_ASSERT_HEADER(sizeof(*(lzo_memops_TU4p)0)==4)
#define LZO_MEMOPS_COPY4(dd,ss) \
* (lzo_memops_TU4p) (lzo_memops_TU0p) (dd) = * (const lzo_memops_TU4p) (const lzo_memops_TU0p) (ss)
#elif defined(__lzo_memops_tcheck)
#define LZO_MEMOPS_COPY4(dd,ss) \
LZO_BLOCK_BEGIN if (__lzo_memops_tcheck(lzo_memops_TU4,4,1)) { \
* (lzo_memops_TU4p) (lzo_memops_TU0p) (dd) = * (const lzo_memops_TU4p) (const lzo_memops_TU0p) (ss); \
} else { LZO_MEMOPS_MOVE4(dd,ss); } LZO_BLOCK_END
#else
#define LZO_MEMOPS_COPY4(dd,ss) LZO_MEMOPS_MOVE4(dd,ss)
#endif
#if (LZO_WORDSIZE != 8)
#define LZO_MEMOPS_COPY8(dd,ss) \
LZO_BLOCK_BEGIN LZO_MEMOPS_COPY4(dd,ss); LZO_MEMOPS_COPY4((lzo_memops_TU1p)(lzo_memops_TU0p)(dd)+4,(const lzo_memops_TU1p)(const lzo_memops_TU0p)(ss)+4); LZO_BLOCK_END
#else
#if (LZO_OPT_UNALIGNED64)
LZO_COMPILE_TIME_ASSERT_HEADER(sizeof(*(lzo_memops_TU8p)0)==8)
#define LZO_MEMOPS_COPY8(dd,ss) \
* (lzo_memops_TU8p) (lzo_memops_TU0p) (dd) = * (const lzo_memops_TU8p) (const lzo_memops_TU0p) (ss)
#elif (LZO_OPT_UNALIGNED32)
#define LZO_MEMOPS_COPY8(dd,ss) \
LZO_BLOCK_BEGIN LZO_MEMOPS_COPY4(dd,ss); LZO_MEMOPS_COPY4((lzo_memops_TU1p)(lzo_memops_TU0p)(dd)+4,(const lzo_memops_TU1p)(const lzo_memops_TU0p)(ss)+4); LZO_BLOCK_END
#elif defined(__lzo_memops_tcheck)
#define LZO_MEMOPS_COPY8(dd,ss) \
LZO_BLOCK_BEGIN if (__lzo_memops_tcheck(lzo_memops_TU8,8,1)) { \
* (lzo_memops_TU8p) (lzo_memops_TU0p) (dd) = * (const lzo_memops_TU8p) (const lzo_memops_TU0p) (ss); \
} else { LZO_MEMOPS_MOVE8(dd,ss); } LZO_BLOCK_END
#else
#define LZO_MEMOPS_COPY8(dd,ss) LZO_MEMOPS_MOVE8(dd,ss)
#endif
#endif
#define LZO_MEMOPS_COPYN(dd,ss,nn) \
LZO_BLOCK_BEGIN \
lzo_memops_TU1p d__n = (lzo_memops_TU1p) (lzo_memops_TU0p) (dd); \
const lzo_memops_TU1p s__n = (const lzo_memops_TU1p) (const lzo_memops_TU0p) (ss); \
lzo_uint n__n = (nn); \
while ((void)0, n__n >= 8) { LZO_MEMOPS_COPY8(d__n, s__n); d__n += 8; s__n += 8; n__n -= 8; } \
if ((void)0, n__n >= 4) { LZO_MEMOPS_COPY4(d__n, s__n); d__n += 4; s__n += 4; n__n -= 4; } \
if ((void)0, n__n > 0) do { *d__n++ = *s__n++; } while (--n__n > 0); \
LZO_BLOCK_END
__lzo_static_forceinline lzo_uint16_t lzo_memops_get_le16(const lzo_voidp ss)
{
lzo_uint16_t v;
#if (LZO_ABI_LITTLE_ENDIAN)
LZO_MEMOPS_COPY2(&v, ss);
#elif (LZO_OPT_UNALIGNED16 && LZO_ARCH_POWERPC && LZO_ABI_BIG_ENDIAN) && (LZO_ASM_SYNTAX_GNUC)
const lzo_memops_TU2p s = (const lzo_memops_TU2p) ss;
unsigned long vv;
__asm__("lhbrx %0,0,%1" : "=r" (vv) : "r" (s), "m" (*s));
v = (lzo_uint16_t) vv;
#else
const lzo_memops_TU1p s = (const lzo_memops_TU1p) ss;
v = (lzo_uint16_t) (((lzo_uint16_t)s[0]) | ((lzo_uint16_t)s[1] << 8));
#endif
return v;
}
#if (LZO_OPT_UNALIGNED16) && (LZO_ABI_LITTLE_ENDIAN)
#define LZO_MEMOPS_GET_LE16(ss) * (const lzo_memops_TU2p) (const lzo_memops_TU0p) (ss)
#else
#define LZO_MEMOPS_GET_LE16(ss) lzo_memops_get_le16(ss)
#endif
__lzo_static_forceinline lzo_uint32_t lzo_memops_get_le32(const lzo_voidp ss)
{
lzo_uint32_t v;
#if (LZO_ABI_LITTLE_ENDIAN)
LZO_MEMOPS_COPY4(&v, ss);
#elif (LZO_OPT_UNALIGNED32 && LZO_ARCH_POWERPC && LZO_ABI_BIG_ENDIAN) && (LZO_ASM_SYNTAX_GNUC)
const lzo_memops_TU4p s = (const lzo_memops_TU4p) ss;
unsigned long vv;
__asm__("lwbrx %0,0,%1" : "=r" (vv) : "r" (s), "m" (*s));
v = (lzo_uint32_t) vv;
#else
const lzo_memops_TU1p s = (const lzo_memops_TU1p) ss;
v = (lzo_uint32_t) (((lzo_uint32_t)s[0]) | ((lzo_uint32_t)s[1] << 8) | ((lzo_uint32_t)s[2] << 16) | ((lzo_uint32_t)s[3] << 24));
#endif
return v;
}
#if (LZO_OPT_UNALIGNED32) && (LZO_ABI_LITTLE_ENDIAN)
#define LZO_MEMOPS_GET_LE32(ss) * (const lzo_memops_TU4p) (const lzo_memops_TU0p) (ss)
#else
#define LZO_MEMOPS_GET_LE32(ss) lzo_memops_get_le32(ss)
#endif
#if (LZO_OPT_UNALIGNED64) && (LZO_ABI_LITTLE_ENDIAN)
#define LZO_MEMOPS_GET_LE64(ss) * (const lzo_memops_TU8p) (const lzo_memops_TU0p) (ss)
#endif
__lzo_static_forceinline lzo_uint16_t lzo_memops_get_ne16(const lzo_voidp ss)
{
lzo_uint16_t v;
LZO_MEMOPS_COPY2(&v, ss);
return v;
}
#if (LZO_OPT_UNALIGNED16)
#define LZO_MEMOPS_GET_NE16(ss) * (const lzo_memops_TU2p) (const lzo_memops_TU0p) (ss)
#else
#define LZO_MEMOPS_GET_NE16(ss) lzo_memops_get_ne16(ss)
#endif
__lzo_static_forceinline lzo_uint32_t lzo_memops_get_ne32(const lzo_voidp ss)
{
lzo_uint32_t v;
LZO_MEMOPS_COPY4(&v, ss);
return v;
}
#if (LZO_OPT_UNALIGNED32)
#define LZO_MEMOPS_GET_NE32(ss) * (const lzo_memops_TU4p) (const lzo_memops_TU0p) (ss)
#else
#define LZO_MEMOPS_GET_NE32(ss) lzo_memops_get_ne32(ss)
#endif
#if (LZO_OPT_UNALIGNED64)
#define LZO_MEMOPS_GET_NE64(ss) * (const lzo_memops_TU8p) (const lzo_memops_TU0p) (ss)
#endif
__lzo_static_forceinline void lzo_memops_put_le16(lzo_voidp dd, lzo_uint16_t vv)
{
#if (LZO_ABI_LITTLE_ENDIAN)
LZO_MEMOPS_COPY2(dd, &vv);
#elif (LZO_OPT_UNALIGNED16 && LZO_ARCH_POWERPC && LZO_ABI_BIG_ENDIAN) && (LZO_ASM_SYNTAX_GNUC)
lzo_memops_TU2p d = (lzo_memops_TU2p) dd;
unsigned long v = vv;
__asm__("sthbrx %2,0,%1" : "=m" (*d) : "r" (d), "r" (v));
#else
lzo_memops_TU1p d = (lzo_memops_TU1p) dd;
d[0] = LZO_BYTE((vv ) & 0xff);
d[1] = LZO_BYTE((vv >> 8) & 0xff);
#endif
}
#if (LZO_OPT_UNALIGNED16) && (LZO_ABI_LITTLE_ENDIAN)
#define LZO_MEMOPS_PUT_LE16(dd,vv) (* (lzo_memops_TU2p) (lzo_memops_TU0p) (dd) = (vv))
#else
#define LZO_MEMOPS_PUT_LE16(dd,vv) lzo_memops_put_le16(dd,vv)
#endif
__lzo_static_forceinline void lzo_memops_put_le32(lzo_voidp dd, lzo_uint32_t vv)
{
#if (LZO_ABI_LITTLE_ENDIAN)
LZO_MEMOPS_COPY4(dd, &vv);
#elif (LZO_OPT_UNALIGNED32 && LZO_ARCH_POWERPC && LZO_ABI_BIG_ENDIAN) && (LZO_ASM_SYNTAX_GNUC)
lzo_memops_TU4p d = (lzo_memops_TU4p) dd;
unsigned long v = vv;
__asm__("stwbrx %2,0,%1" : "=m" (*d) : "r" (d), "r" (v));
#else
lzo_memops_TU1p d = (lzo_memops_TU1p) dd;
d[0] = LZO_BYTE((vv ) & 0xff);
d[1] = LZO_BYTE((vv >> 8) & 0xff);
d[2] = LZO_BYTE((vv >> 16) & 0xff);
d[3] = LZO_BYTE((vv >> 24) & 0xff);
#endif
}
#if (LZO_OPT_UNALIGNED32) && (LZO_ABI_LITTLE_ENDIAN)
#define LZO_MEMOPS_PUT_LE32(dd,vv) (* (lzo_memops_TU4p) (lzo_memops_TU0p) (dd) = (vv))
#else
#define LZO_MEMOPS_PUT_LE32(dd,vv) lzo_memops_put_le32(dd,vv)
#endif
__lzo_static_forceinline void lzo_memops_put_ne16(lzo_voidp dd, lzo_uint16_t vv)
{
LZO_MEMOPS_COPY2(dd, &vv);
}
#if (LZO_OPT_UNALIGNED16)
#define LZO_MEMOPS_PUT_NE16(dd,vv) (* (lzo_memops_TU2p) (lzo_memops_TU0p) (dd) = (vv))
#else
#define LZO_MEMOPS_PUT_NE16(dd,vv) lzo_memops_put_ne16(dd,vv)
#endif
__lzo_static_forceinline void lzo_memops_put_ne32(lzo_voidp dd, lzo_uint32_t vv)
{
LZO_MEMOPS_COPY4(dd, &vv);
}
#if (LZO_OPT_UNALIGNED32)
#define LZO_MEMOPS_PUT_NE32(dd,vv) (* (lzo_memops_TU4p) (lzo_memops_TU0p) (dd) = (vv))
#else
#define LZO_MEMOPS_PUT_NE32(dd,vv) lzo_memops_put_ne32(dd,vv)
#endif
#if 1 && (LZO_CC_ARMCC_GNUC || LZO_CC_CLANG || (LZO_CC_GNUC >= 0x020700ul) || LZO_CC_INTELC_GNUC || LZO_CC_LLVM || LZO_CC_PATHSCALE || LZO_CC_PGI)
static void __attribute__((__unused__))
#else
__lzo_static_forceinline void
#endif
lzo_memops_unused_funcs(void)
{
LZO_UNUSED_FUNC(lzo_memops_get_le16);
LZO_UNUSED_FUNC(lzo_memops_get_le32);
LZO_UNUSED_FUNC(lzo_memops_get_ne16);
LZO_UNUSED_FUNC(lzo_memops_get_ne32);
LZO_UNUSED_FUNC(lzo_memops_put_le16);
LZO_UNUSED_FUNC(lzo_memops_put_le32);
LZO_UNUSED_FUNC(lzo_memops_put_ne16);
LZO_UNUSED_FUNC(lzo_memops_put_ne32);
LZO_UNUSED_FUNC(lzo_memops_unused_funcs);
}
#endif
#ifndef UA_SET1
#define UA_SET1 LZO_MEMOPS_SET1
#endif
#ifndef UA_SET2
#define UA_SET2 LZO_MEMOPS_SET2
#endif
#ifndef UA_SET3
#define UA_SET3 LZO_MEMOPS_SET3
#endif
#ifndef UA_SET4
#define UA_SET4 LZO_MEMOPS_SET4
#endif
#ifndef UA_MOVE1
#define UA_MOVE1 LZO_MEMOPS_MOVE1
#endif
#ifndef UA_MOVE2
#define UA_MOVE2 LZO_MEMOPS_MOVE2
#endif
#ifndef UA_MOVE3
#define UA_MOVE3 LZO_MEMOPS_MOVE3
#endif
#ifndef UA_MOVE4
#define UA_MOVE4 LZO_MEMOPS_MOVE4
#endif
#ifndef UA_MOVE8
#define UA_MOVE8 LZO_MEMOPS_MOVE8
#endif
#ifndef UA_COPY1
#define UA_COPY1 LZO_MEMOPS_COPY1
#endif
#ifndef UA_COPY2
#define UA_COPY2 LZO_MEMOPS_COPY2
#endif
#ifndef UA_COPY3
#define UA_COPY3 LZO_MEMOPS_COPY3
#endif
#ifndef UA_COPY4
#define UA_COPY4 LZO_MEMOPS_COPY4
#endif
#ifndef UA_COPY8
#define UA_COPY8 LZO_MEMOPS_COPY8
#endif
#ifndef UA_COPYN
#define UA_COPYN LZO_MEMOPS_COPYN
#endif
#ifndef UA_COPYN_X
#define UA_COPYN_X LZO_MEMOPS_COPYN
#endif
#ifndef UA_GET_LE16
#define UA_GET_LE16 LZO_MEMOPS_GET_LE16
#endif
#ifndef UA_GET_LE32
#define UA_GET_LE32 LZO_MEMOPS_GET_LE32
#endif
#ifdef LZO_MEMOPS_GET_LE64
#ifndef UA_GET_LE64
#define UA_GET_LE64 LZO_MEMOPS_GET_LE64
#endif
#endif
#ifndef UA_GET_NE16
#define UA_GET_NE16 LZO_MEMOPS_GET_NE16
#endif
#ifndef UA_GET_NE32
#define UA_GET_NE32 LZO_MEMOPS_GET_NE32
#endif
#ifdef LZO_MEMOPS_GET_NE64
#ifndef UA_GET_NE64
#define UA_GET_NE64 LZO_MEMOPS_GET_NE64
#endif
#endif
#ifndef UA_PUT_LE16
#define UA_PUT_LE16 LZO_MEMOPS_PUT_LE16
#endif
#ifndef UA_PUT_LE32
#define UA_PUT_LE32 LZO_MEMOPS_PUT_LE32
#endif
#ifndef UA_PUT_NE16
#define UA_PUT_NE16 LZO_MEMOPS_PUT_NE16
#endif
#ifndef UA_PUT_NE32
#define UA_PUT_NE32 LZO_MEMOPS_PUT_NE32
#endif
#define MEMCPY8_DS(dest,src,len) \
lzo_memcpy(dest,src,len); dest += len; src += len
#define BZERO8_PTR(s,l,n) \
lzo_memset((lzo_voidp)(s),0,(lzo_uint)(l)*(n))
#define MEMCPY_DS(dest,src,len) \
do *dest++ = *src++; while (--len > 0)
LZO_EXTERN(const lzo_bytep) lzo_copyright(void);
#ifndef __LZO_PTR_H
#define __LZO_PTR_H 1
#ifdef __cplusplus
extern "C" {
#endif
#if (LZO_ARCH_I086)
#error "LZO_ARCH_I086 is unsupported"
#elif (LZO_MM_PVP)
#error "LZO_MM_PVP is unsupported"
#else
#define PTR(a) ((lzo_uintptr_t) (a))
#define PTR_LINEAR(a) PTR(a)
#define PTR_ALIGNED_4(a) ((PTR_LINEAR(a) & 3) == 0)
#define PTR_ALIGNED_8(a) ((PTR_LINEAR(a) & 7) == 0)
#define PTR_ALIGNED2_4(a,b) (((PTR_LINEAR(a) | PTR_LINEAR(b)) & 3) == 0)
#define PTR_ALIGNED2_8(a,b) (((PTR_LINEAR(a) | PTR_LINEAR(b)) & 7) == 0)
#endif
#define PTR_LT(a,b) (PTR(a) < PTR(b))
#define PTR_GE(a,b) (PTR(a) >= PTR(b))
#define PTR_DIFF(a,b) (PTR(a) - PTR(b))
#define pd(a,b) ((lzo_uint) ((a)-(b)))
LZO_EXTERN(lzo_uintptr_t)
__lzo_ptr_linear(const lzo_voidp ptr);
typedef union
{
char a_char;
unsigned char a_uchar;
short a_short;
unsigned short a_ushort;
int a_int;
unsigned int a_uint;
long a_long;
unsigned long a_ulong;
lzo_int a_lzo_int;
lzo_uint a_lzo_uint;
lzo_xint a_lzo_xint;
lzo_int16_t a_lzo_int16_t;
lzo_uint16_t a_lzo_uint16_t;
lzo_int32_t a_lzo_int32_t;
lzo_uint32_t a_lzo_uint32_t;
#if defined(lzo_uint64_t)
lzo_int64_t a_lzo_int64_t;
lzo_uint64_t a_lzo_uint64_t;
#endif
size_t a_size_t;
ptrdiff_t a_ptrdiff_t;
lzo_uintptr_t a_lzo_uintptr_t;
void * a_void_p;
char * a_char_p;
unsigned char * a_uchar_p;
const void * a_c_void_p;
const char * a_c_char_p;
const unsigned char * a_c_uchar_p;
lzo_voidp a_lzo_voidp;
lzo_bytep a_lzo_bytep;
const lzo_voidp a_c_lzo_voidp;
const lzo_bytep a_c_lzo_bytep;
}
lzo_full_align_t;
#ifdef __cplusplus
}
#endif
#endif
#ifndef LZO_DETERMINISTIC
#define LZO_DETERMINISTIC 1
#endif
#ifndef LZO_DICT_USE_PTR
#define LZO_DICT_USE_PTR 1
#endif
#if (LZO_DICT_USE_PTR)
# define lzo_dict_t const lzo_bytep
# define lzo_dict_p lzo_dict_t *
#else
# define lzo_dict_t lzo_uint
# define lzo_dict_p lzo_dict_t *
#endif
#endif
#if !defined(MINILZO_CFG_SKIP_LZO_PTR)
LZO_PUBLIC(lzo_uintptr_t)
__lzo_ptr_linear(const lzo_voidp ptr)
{
lzo_uintptr_t p;
#if (LZO_ARCH_I086)
#error "LZO_ARCH_I086 is unsupported"
#elif (LZO_MM_PVP)
#error "LZO_MM_PVP is unsupported"
#else
p = (lzo_uintptr_t) PTR_LINEAR(ptr);
#endif
return p;
}
LZO_PUBLIC(unsigned)
__lzo_align_gap(const lzo_voidp ptr, lzo_uint size)
{
#if (__LZO_UINTPTR_T_IS_POINTER)
#error "__LZO_UINTPTR_T_IS_POINTER is unsupported"
#else
lzo_uintptr_t p, n;
p = __lzo_ptr_linear(ptr);
n = (((p + size - 1) / size) * size) - p;
#endif
assert(size > 0);
assert((long)n >= 0);
assert(n <= size);
return (unsigned)n;
}
#endif
#if !defined(MINILZO_CFG_SKIP_LZO_UTIL)
/* If you use the LZO library in a product, I would appreciate that you
* keep this copyright string in the executable of your product.
*/
static const char __lzo_copyright[] =
#if !defined(__LZO_IN_MINLZO)
LZO_VERSION_STRING;
#else
"\r\n\n"
"LZO data compression library.\n"
"$Copyright: LZO Copyright (C) 1996-2014 Markus Franz Xaver Johannes Oberhumer\n"
"<markus@oberhumer.com>\n"
"http://www.oberhumer.com $\n\n"
"$Id: LZO version: v" LZO_VERSION_STRING ", " LZO_VERSION_DATE " $\n"
"$Info: " LZO_INFO_STRING " $\n";
#endif
LZO_PUBLIC(const lzo_bytep)
lzo_copyright(void)
{
return (const lzo_bytep) __lzo_copyright;
}
LZO_PUBLIC(unsigned)
lzo_version(void)
{
return LZO_VERSION;
}
LZO_PUBLIC(const char *)
lzo_version_string(void)
{
return LZO_VERSION_STRING;
}
LZO_PUBLIC(const char *)
lzo_version_date(void)
{
return LZO_VERSION_DATE;
}
LZO_PUBLIC(const lzo_charp)
_lzo_version_string(void)
{
return LZO_VERSION_STRING;
}
LZO_PUBLIC(const lzo_charp)
_lzo_version_date(void)
{
return LZO_VERSION_DATE;
}
#define LZO_BASE 65521u
#define LZO_NMAX 5552
#define LZO_DO1(buf,i) s1 += buf[i]; s2 += s1
#define LZO_DO2(buf,i) LZO_DO1(buf,i); LZO_DO1(buf,i+1)
#define LZO_DO4(buf,i) LZO_DO2(buf,i); LZO_DO2(buf,i+2)
#define LZO_DO8(buf,i) LZO_DO4(buf,i); LZO_DO4(buf,i+4)
#define LZO_DO16(buf,i) LZO_DO8(buf,i); LZO_DO8(buf,i+8)
LZO_PUBLIC(lzo_uint32_t)
lzo_adler32(lzo_uint32_t adler, const lzo_bytep buf, lzo_uint len)
{
lzo_uint32_t s1 = adler & 0xffff;
lzo_uint32_t s2 = (adler >> 16) & 0xffff;
unsigned k;
if (buf == NULL)
return 1;
while (len > 0)
{
k = len < LZO_NMAX ? (unsigned) len : LZO_NMAX;
len -= k;
if (k >= 16) do
{
LZO_DO16(buf,0);
buf += 16;
k -= 16;
} while (k >= 16);
if (k != 0) do
{
s1 += *buf++;
s2 += s1;
} while (--k > 0);
s1 %= LZO_BASE;
s2 %= LZO_BASE;
}
return (s2 << 16) | s1;
}
#undef LZO_DO1
#undef LZO_DO2
#undef LZO_DO4
#undef LZO_DO8
#undef LZO_DO16
#endif
#if !defined(MINILZO_CFG_SKIP_LZO_STRING)
#undef lzo_memcmp
#undef lzo_memcpy
#undef lzo_memmove
#undef lzo_memset
#if !defined(__LZO_MMODEL_HUGE)
# undef LZO_HAVE_MM_HUGE_PTR
#endif
#define lzo_hsize_t lzo_uint
#define lzo_hvoid_p lzo_voidp
#define lzo_hbyte_p lzo_bytep
#define LZOLIB_PUBLIC(r,f) LZO_PUBLIC(r) f
#define lzo_hmemcmp lzo_memcmp
#define lzo_hmemcpy lzo_memcpy
#define lzo_hmemmove lzo_memmove
#define lzo_hmemset lzo_memset
#define __LZOLIB_HMEMCPY_CH_INCLUDED 1
#if !defined(LZOLIB_PUBLIC)
# define LZOLIB_PUBLIC(r,f) r __LZOLIB_FUNCNAME(f)
#endif
LZOLIB_PUBLIC(int, lzo_hmemcmp) (const lzo_hvoid_p s1, const lzo_hvoid_p s2, lzo_hsize_t len)
{
#if (LZO_HAVE_MM_HUGE_PTR) || !(HAVE_MEMCMP)
const lzo_hbyte_p p1 = LZO_STATIC_CAST(const lzo_hbyte_p, s1);
const lzo_hbyte_p p2 = LZO_STATIC_CAST(const lzo_hbyte_p, s2);
if __lzo_likely(len > 0) do
{
int d = *p1 - *p2;
if (d != 0)
return d;
p1++; p2++;
} while __lzo_likely(--len > 0);
return 0;
#else
return memcmp(s1, s2, len);
#endif
}
LZOLIB_PUBLIC(lzo_hvoid_p, lzo_hmemcpy) (lzo_hvoid_p dest, const lzo_hvoid_p src, lzo_hsize_t len)
{
#if (LZO_HAVE_MM_HUGE_PTR) || !(HAVE_MEMCPY)
lzo_hbyte_p p1 = LZO_STATIC_CAST(lzo_hbyte_p, dest);
const lzo_hbyte_p p2 = LZO_STATIC_CAST(const lzo_hbyte_p, src);
if (!(len > 0) || p1 == p2)
return dest;
do
*p1++ = *p2++;
while __lzo_likely(--len > 0);
return dest;
#else
return memcpy(dest, src, len);
#endif
}
LZOLIB_PUBLIC(lzo_hvoid_p, lzo_hmemmove) (lzo_hvoid_p dest, const lzo_hvoid_p src, lzo_hsize_t len)
{
#if (LZO_HAVE_MM_HUGE_PTR) || !(HAVE_MEMMOVE)
lzo_hbyte_p p1 = LZO_STATIC_CAST(lzo_hbyte_p, dest);
const lzo_hbyte_p p2 = LZO_STATIC_CAST(const lzo_hbyte_p, src);
if (!(len > 0) || p1 == p2)
return dest;
if (p1 < p2)
{
do
*p1++ = *p2++;
while __lzo_likely(--len > 0);
}
else
{
p1 += len;
p2 += len;
do
*--p1 = *--p2;
while __lzo_likely(--len > 0);
}
return dest;
#else
return memmove(dest, src, len);
#endif
}
LZOLIB_PUBLIC(lzo_hvoid_p, lzo_hmemset) (lzo_hvoid_p s, int cc, lzo_hsize_t len)
{
#if (LZO_HAVE_MM_HUGE_PTR) || !(HAVE_MEMSET)
lzo_hbyte_p p = LZO_STATIC_CAST(lzo_hbyte_p, s);
unsigned char c = LZO_ITRUNC(unsigned char, cc);
if __lzo_likely(len > 0) do
*p++ = c;
while __lzo_likely(--len > 0);
return s;
#else
return memset(s, cc, len);
#endif
}
#undef LZOLIB_PUBLIC
#endif
#if !defined(MINILZO_CFG_SKIP_LZO_INIT)
#if !defined(__LZO_IN_MINILZO)
#define LZO_WANT_ACC_CHK_CH 1
#undef LZOCHK_ASSERT
LZOCHK_ASSERT((LZO_UINT32_C(1) << (int)(8*sizeof(LZO_UINT32_C(1))-1)) > 0)
LZOCHK_ASSERT_IS_SIGNED_T(lzo_int)
LZOCHK_ASSERT_IS_UNSIGNED_T(lzo_uint)
#if !(__LZO_UINTPTR_T_IS_POINTER)
LZOCHK_ASSERT_IS_UNSIGNED_T(lzo_uintptr_t)
#endif
LZOCHK_ASSERT(sizeof(lzo_uintptr_t) >= sizeof(lzo_voidp))
LZOCHK_ASSERT_IS_UNSIGNED_T(lzo_xint)
#endif
#undef LZOCHK_ASSERT
union lzo_config_check_union {
lzo_uint a[2];
unsigned char b[2*LZO_MAX(8,sizeof(lzo_uint))];
#if defined(lzo_uint64_t)
lzo_uint64_t c[2];
#endif
};
#if 0
#define u2p(ptr,off) ((lzo_voidp) (((lzo_bytep)(lzo_voidp)(ptr)) + (off)))
#else
static __lzo_noinline lzo_voidp u2p(lzo_voidp ptr, lzo_uint off)
{
return (lzo_voidp) ((lzo_bytep) ptr + off);
}
#endif
LZO_PUBLIC(int)
_lzo_config_check(void)
{
#if (LZO_CC_CLANG && (LZO_CC_CLANG >= 0x030100ul && LZO_CC_CLANG < 0x030300ul))
# if 0
volatile
# endif
#endif
union lzo_config_check_union u;
lzo_voidp p;
unsigned r = 1;
u.a[0] = u.a[1] = 0;
p = u2p(&u, 0);
r &= ((* (lzo_bytep) p) == 0);
#if !(LZO_CFG_NO_CONFIG_CHECK)
#if (LZO_ABI_BIG_ENDIAN)
u.a[0] = u.a[1] = 0; u.b[sizeof(lzo_uint) - 1] = 128;
p = u2p(&u, 0);
r &= ((* (lzo_uintp) p) == 128);
#endif
#if (LZO_ABI_LITTLE_ENDIAN)
u.a[0] = u.a[1] = 0; u.b[0] = 128;
p = u2p(&u, 0);
r &= ((* (lzo_uintp) p) == 128);
#endif
u.a[0] = u.a[1] = 0;
u.b[0] = 1; u.b[3] = 2;
p = u2p(&u, 1);
r &= UA_GET_NE16(p) == 0;
r &= UA_GET_LE16(p) == 0;
u.b[1] = 128;
r &= UA_GET_LE16(p) == 128;
u.b[2] = 129;
r &= UA_GET_LE16(p) == LZO_UINT16_C(0x8180);
#if (LZO_ABI_BIG_ENDIAN)
r &= UA_GET_NE16(p) == LZO_UINT16_C(0x8081);
#endif
#if (LZO_ABI_LITTLE_ENDIAN)
r &= UA_GET_NE16(p) == LZO_UINT16_C(0x8180);
#endif
u.a[0] = u.a[1] = 0;
u.b[0] = 3; u.b[5] = 4;
p = u2p(&u, 1);
r &= UA_GET_NE32(p) == 0;
r &= UA_GET_LE32(p) == 0;
u.b[1] = 128;
r &= UA_GET_LE32(p) == 128;
u.b[2] = 129; u.b[3] = 130; u.b[4] = 131;
r &= UA_GET_LE32(p) == LZO_UINT32_C(0x83828180);
#if (LZO_ABI_BIG_ENDIAN)
r &= UA_GET_NE32(p) == LZO_UINT32_C(0x80818283);
#endif
#if (LZO_ABI_LITTLE_ENDIAN)
r &= UA_GET_NE32(p) == LZO_UINT32_C(0x83828180);
#endif
#if defined(UA_GET_NE64)
u.c[0] = u.c[1] = 0;
u.b[0] = 5; u.b[9] = 6;
p = u2p(&u, 1);
u.c[0] = u.c[1] = 0;
r &= UA_GET_NE64(p) == 0;
#if defined(UA_GET_LE64)
r &= UA_GET_LE64(p) == 0;
u.b[1] = 128;
r &= UA_GET_LE64(p) == 128;
#endif
#endif
#if defined(lzo_bitops_ctlz32)
{ unsigned i = 0; lzo_uint32_t v;
for (v = 1; v != 0 && r == 1; v <<= 1, i++) {
r &= lzo_bitops_ctlz32(v) == 31 - i;
r &= lzo_bitops_ctlz32_func(v) == 31 - i;
}}
#endif
#if defined(lzo_bitops_ctlz64)
{ unsigned i = 0; lzo_uint64_t v;
for (v = 1; v != 0 && r == 1; v <<= 1, i++) {
r &= lzo_bitops_ctlz64(v) == 63 - i;
r &= lzo_bitops_ctlz64_func(v) == 63 - i;
}}
#endif
#if defined(lzo_bitops_cttz32)
{ unsigned i = 0; lzo_uint32_t v;
for (v = 1; v != 0 && r == 1; v <<= 1, i++) {
r &= lzo_bitops_cttz32(v) == i;
r &= lzo_bitops_cttz32_func(v) == i;
}}
#endif
#if defined(lzo_bitops_cttz64)
{ unsigned i = 0; lzo_uint64_t v;
for (v = 1; v != 0 && r == 1; v <<= 1, i++) {
r &= lzo_bitops_cttz64(v) == i;
r &= lzo_bitops_cttz64_func(v) == i;
}}
#endif
#endif
LZO_UNUSED_FUNC(lzo_bitops_unused_funcs);
return r == 1 ? LZO_E_OK : LZO_E_ERROR;
}
LZO_PUBLIC(int)
__lzo_init_v2(unsigned v, int s1, int s2, int s3, int s4, int s5,
int s6, int s7, int s8, int s9)
{
int r;
#if defined(__LZO_IN_MINILZO)
#elif (LZO_CC_MSC && ((_MSC_VER) < 700))
#else
#define LZO_WANT_ACC_CHK_CH 1
#undef LZOCHK_ASSERT
#define LZOCHK_ASSERT(expr) LZO_COMPILE_TIME_ASSERT(expr)
#endif
#undef LZOCHK_ASSERT
if (v == 0)
return LZO_E_ERROR;
r = (s1 == -1 || s1 == (int) sizeof(short)) &&
(s2 == -1 || s2 == (int) sizeof(int)) &&
(s3 == -1 || s3 == (int) sizeof(long)) &&
(s4 == -1 || s4 == (int) sizeof(lzo_uint32_t)) &&
(s5 == -1 || s5 == (int) sizeof(lzo_uint)) &&
(s6 == -1 || s6 == (int) lzo_sizeof_dict_t) &&
(s7 == -1 || s7 == (int) sizeof(char *)) &&
(s8 == -1 || s8 == (int) sizeof(lzo_voidp)) &&
(s9 == -1 || s9 == (int) sizeof(lzo_callback_t));
if (!r)
return LZO_E_ERROR;
r = _lzo_config_check();
if (r != LZO_E_OK)
return r;
return r;
}
#if !defined(__LZO_IN_MINILZO)
#if (LZO_OS_WIN16 && LZO_CC_WATCOMC) && defined(__SW_BD)
#if 0
BOOL FAR PASCAL LibMain ( HANDLE hInstance, WORD wDataSegment,
WORD wHeapSize, LPSTR lpszCmdLine )
#else
int __far __pascal LibMain ( int a, short b, short c, long d )
#endif
{
LZO_UNUSED(a); LZO_UNUSED(b); LZO_UNUSED(c); LZO_UNUSED(d);
return 1;
}
#endif
#endif
#endif
#define LZO1X 1
#define LZO_EOF_CODE 1
#define M2_MAX_OFFSET 0x0800
#if !defined(MINILZO_CFG_SKIP_LZO1X_1_COMPRESS)
#if 1 && defined(UA_GET_LE32)
#undef LZO_DICT_USE_PTR
#define LZO_DICT_USE_PTR 0
#undef lzo_dict_t
#define lzo_dict_t lzo_uint16_t
#endif
#define LZO_NEED_DICT_H 1
#ifndef D_BITS
#define D_BITS 14
#endif
#define D_INDEX1(d,p) d = DM(DMUL(0x21,DX3(p,5,5,6)) >> 5)
#define D_INDEX2(d,p) d = (d & (D_MASK & 0x7ff)) ^ (D_HIGH | 0x1f)
#if 1
#define DINDEX(dv,p) DM(((DMUL(0x1824429d,dv)) >> (32-D_BITS)))
#else
#define DINDEX(dv,p) DM((dv) + ((dv) >> (32-D_BITS)))
#endif
#ifndef __LZO_CONFIG1X_H
#define __LZO_CONFIG1X_H 1
#if !defined(LZO1X) && !defined(LZO1Y) && !defined(LZO1Z)
# define LZO1X 1
#endif
#if !defined(__LZO_IN_MINILZO)
#include "lzo/lzo1x.h"
#endif
#ifndef LZO_EOF_CODE
#define LZO_EOF_CODE 1
#endif
#undef LZO_DETERMINISTIC
#define M1_MAX_OFFSET 0x0400
#ifndef M2_MAX_OFFSET
#define M2_MAX_OFFSET 0x0800
#endif
#define M3_MAX_OFFSET 0x4000
#define M4_MAX_OFFSET 0xbfff
#define MX_MAX_OFFSET (M1_MAX_OFFSET + M2_MAX_OFFSET)
#define M1_MIN_LEN 2
#define M1_MAX_LEN 2
#define M2_MIN_LEN 3
#ifndef M2_MAX_LEN
#define M2_MAX_LEN 8
#endif
#define M3_MIN_LEN 3
#define M3_MAX_LEN 33
#define M4_MIN_LEN 3
#define M4_MAX_LEN 9
#define M1_MARKER 0
#define M2_MARKER 64
#define M3_MARKER 32
#define M4_MARKER 16
#ifndef MIN_LOOKAHEAD
#define MIN_LOOKAHEAD (M2_MAX_LEN + 1)
#endif
#if defined(LZO_NEED_DICT_H)
#ifndef LZO_HASH
#define LZO_HASH LZO_HASH_LZO_INCREMENTAL_B
#endif
#define DL_MIN_LEN M2_MIN_LEN
#ifndef __LZO_DICT_H
#define __LZO_DICT_H 1
#ifdef __cplusplus
extern "C" {
#endif
#if !defined(D_BITS) && defined(DBITS)
# define D_BITS DBITS
#endif
#if !defined(D_BITS)
# error "D_BITS is not defined"
#endif
#if (D_BITS < 16)
# define D_SIZE LZO_SIZE(D_BITS)
# define D_MASK LZO_MASK(D_BITS)
#else
# define D_SIZE LZO_USIZE(D_BITS)
# define D_MASK LZO_UMASK(D_BITS)
#endif
#define D_HIGH ((D_MASK >> 1) + 1)
#if !defined(DD_BITS)
# define DD_BITS 0
#endif
#define DD_SIZE LZO_SIZE(DD_BITS)
#define DD_MASK LZO_MASK(DD_BITS)
#if !defined(DL_BITS)
# define DL_BITS (D_BITS - DD_BITS)
#endif
#if (DL_BITS < 16)
# define DL_SIZE LZO_SIZE(DL_BITS)
# define DL_MASK LZO_MASK(DL_BITS)
#else
# define DL_SIZE LZO_USIZE(DL_BITS)
# define DL_MASK LZO_UMASK(DL_BITS)
#endif
#if (D_BITS != DL_BITS + DD_BITS)
# error "D_BITS does not match"
#endif
#if (D_BITS < 6 || D_BITS > 18)
# error "invalid D_BITS"
#endif
#if (DL_BITS < 6 || DL_BITS > 20)
# error "invalid DL_BITS"
#endif
#if (DD_BITS < 0 || DD_BITS > 6)
# error "invalid DD_BITS"
#endif
#if !defined(DL_MIN_LEN)
# define DL_MIN_LEN 3
#endif
#if !defined(DL_SHIFT)
# define DL_SHIFT ((DL_BITS + (DL_MIN_LEN - 1)) / DL_MIN_LEN)
#endif
#define LZO_HASH_GZIP 1
#define LZO_HASH_GZIP_INCREMENTAL 2
#define LZO_HASH_LZO_INCREMENTAL_A 3
#define LZO_HASH_LZO_INCREMENTAL_B 4
#if !defined(LZO_HASH)
# error "choose a hashing strategy"
#endif
#undef DM
#undef DX
#if (DL_MIN_LEN == 3)
# define _DV2_A(p,shift1,shift2) \
(((( (lzo_xint)((p)[0]) << shift1) ^ (p)[1]) << shift2) ^ (p)[2])
# define _DV2_B(p,shift1,shift2) \
(((( (lzo_xint)((p)[2]) << shift1) ^ (p)[1]) << shift2) ^ (p)[0])
# define _DV3_B(p,shift1,shift2,shift3) \
((_DV2_B((p)+1,shift1,shift2) << (shift3)) ^ (p)[0])
#elif (DL_MIN_LEN == 2)
# define _DV2_A(p,shift1,shift2) \
(( (lzo_xint)(p[0]) << shift1) ^ p[1])
# define _DV2_B(p,shift1,shift2) \
(( (lzo_xint)(p[1]) << shift1) ^ p[2])
#else
# error "invalid DL_MIN_LEN"
#endif
#define _DV_A(p,shift) _DV2_A(p,shift,shift)
#define _DV_B(p,shift) _DV2_B(p,shift,shift)
#define DA2(p,s1,s2) \
(((((lzo_xint)((p)[2]) << (s2)) + (p)[1]) << (s1)) + (p)[0])
#define DS2(p,s1,s2) \
(((((lzo_xint)((p)[2]) << (s2)) - (p)[1]) << (s1)) - (p)[0])
#define DX2(p,s1,s2) \
(((((lzo_xint)((p)[2]) << (s2)) ^ (p)[1]) << (s1)) ^ (p)[0])
#define DA3(p,s1,s2,s3) ((DA2((p)+1,s2,s3) << (s1)) + (p)[0])
#define DS3(p,s1,s2,s3) ((DS2((p)+1,s2,s3) << (s1)) - (p)[0])
#define DX3(p,s1,s2,s3) ((DX2((p)+1,s2,s3) << (s1)) ^ (p)[0])
#define DMS(v,s) ((lzo_uint) (((v) & (D_MASK >> (s))) << (s)))
#define DM(v) DMS(v,0)
#if (LZO_HASH == LZO_HASH_GZIP)
# define _DINDEX(dv,p) (_DV_A((p),DL_SHIFT))
#elif (LZO_HASH == LZO_HASH_GZIP_INCREMENTAL)
# define __LZO_HASH_INCREMENTAL 1
# define DVAL_FIRST(dv,p) dv = _DV_A((p),DL_SHIFT)
# define DVAL_NEXT(dv,p) dv = (((dv) << DL_SHIFT) ^ p[2])
# define _DINDEX(dv,p) (dv)
# define DVAL_LOOKAHEAD DL_MIN_LEN
#elif (LZO_HASH == LZO_HASH_LZO_INCREMENTAL_A)
# define __LZO_HASH_INCREMENTAL 1
# define DVAL_FIRST(dv,p) dv = _DV_A((p),5)
# define DVAL_NEXT(dv,p) \
dv ^= (lzo_xint)(p[-1]) << (2*5); dv = (((dv) << 5) ^ p[2])
# define _DINDEX(dv,p) ((DMUL(0x9f5f,dv)) >> 5)
# define DVAL_LOOKAHEAD DL_MIN_LEN
#elif (LZO_HASH == LZO_HASH_LZO_INCREMENTAL_B)
# define __LZO_HASH_INCREMENTAL 1
# define DVAL_FIRST(dv,p) dv = _DV_B((p),5)
# define DVAL_NEXT(dv,p) \
dv ^= p[-1]; dv = (((dv) >> 5) ^ ((lzo_xint)(p[2]) << (2*5)))
# define _DINDEX(dv,p) ((DMUL(0x9f5f,dv)) >> 5)
# define DVAL_LOOKAHEAD DL_MIN_LEN
#else
# error "choose a hashing strategy"
#endif
#ifndef DINDEX
#define DINDEX(dv,p) ((lzo_uint)((_DINDEX(dv,p)) & DL_MASK) << DD_BITS)
#endif
#if !defined(DINDEX1) && defined(D_INDEX1)
#define DINDEX1 D_INDEX1
#endif
#if !defined(DINDEX2) && defined(D_INDEX2)
#define DINDEX2 D_INDEX2
#endif
#if !defined(__LZO_HASH_INCREMENTAL)
# define DVAL_FIRST(dv,p) ((void) 0)
# define DVAL_NEXT(dv,p) ((void) 0)
# define DVAL_LOOKAHEAD 0
#endif
#if !defined(DVAL_ASSERT)
#if defined(__LZO_HASH_INCREMENTAL) && !defined(NDEBUG)
#if (LZO_CC_CLANG || (LZO_CC_GNUC >= 0x020700ul) || LZO_CC_LLVM)
static void __attribute__((__unused__))
#else
static void
#endif
DVAL_ASSERT(lzo_xint dv, const lzo_bytep p)
{
lzo_xint df;
DVAL_FIRST(df,(p));
assert(DINDEX(dv,p) == DINDEX(df,p));
}
#else
# define DVAL_ASSERT(dv,p) ((void) 0)
#endif
#endif
#if (LZO_DICT_USE_PTR)
# define DENTRY(p,in) (p)
# define GINDEX(m_pos,m_off,dict,dindex,in) m_pos = dict[dindex]
#else
# define DENTRY(p,in) ((lzo_dict_t) pd(p, in))
# define GINDEX(m_pos,m_off,dict,dindex,in) m_off = dict[dindex]
#endif
#if (DD_BITS == 0)
# define UPDATE_D(dict,drun,dv,p,in) dict[ DINDEX(dv,p) ] = DENTRY(p,in)
# define UPDATE_I(dict,drun,index,p,in) dict[index] = DENTRY(p,in)
# define UPDATE_P(ptr,drun,p,in) (ptr)[0] = DENTRY(p,in)
#else
# define UPDATE_D(dict,drun,dv,p,in) \
dict[ DINDEX(dv,p) + drun++ ] = DENTRY(p,in); drun &= DD_MASK
# define UPDATE_I(dict,drun,index,p,in) \
dict[ (index) + drun++ ] = DENTRY(p,in); drun &= DD_MASK
# define UPDATE_P(ptr,drun,p,in) \
(ptr) [ drun++ ] = DENTRY(p,in); drun &= DD_MASK
#endif
#if (LZO_DICT_USE_PTR)
#define LZO_CHECK_MPOS_DET(m_pos,m_off,in,ip,max_offset) \
(m_pos == NULL || (m_off = pd(ip, m_pos)) > max_offset)
#define LZO_CHECK_MPOS_NON_DET(m_pos,m_off,in,ip,max_offset) \
(BOUNDS_CHECKING_OFF_IN_EXPR(( \
m_pos = ip - (lzo_uint) PTR_DIFF(ip,m_pos), \
PTR_LT(m_pos,in) || \
(m_off = (lzo_uint) PTR_DIFF(ip,m_pos)) == 0 || \
m_off > max_offset )))
#else
#define LZO_CHECK_MPOS_DET(m_pos,m_off,in,ip,max_offset) \
(m_off == 0 || \
((m_off = pd(ip, in) - m_off) > max_offset) || \
(m_pos = (ip) - (m_off), 0) )
#define LZO_CHECK_MPOS_NON_DET(m_pos,m_off,in,ip,max_offset) \
(pd(ip, in) <= m_off || \
((m_off = pd(ip, in) - m_off) > max_offset) || \
(m_pos = (ip) - (m_off), 0) )
#endif
#if (LZO_DETERMINISTIC)
# define LZO_CHECK_MPOS LZO_CHECK_MPOS_DET
#else
# define LZO_CHECK_MPOS LZO_CHECK_MPOS_NON_DET
#endif
#ifdef __cplusplus
}
#endif
#endif
#endif
#endif
#define LZO_DETERMINISTIC !(LZO_DICT_USE_PTR)
#ifndef DO_COMPRESS
#define DO_COMPRESS lzo1x_1_compress
#endif
#if 1 && defined(DO_COMPRESS) && !defined(do_compress)
# define do_compress LZO_PP_ECONCAT2(DO_COMPRESS,_core)
#endif
static __lzo_noinline lzo_uint
do_compress ( const lzo_bytep in , lzo_uint in_len,
lzo_bytep out, lzo_uintp out_len,
lzo_uint ti, lzo_voidp wrkmem)
{
const lzo_bytep ip;
lzo_bytep op;
const lzo_bytep const in_end = in + in_len;
const lzo_bytep const ip_end = in + in_len - 20;
const lzo_bytep ii;
lzo_dict_p const dict = (lzo_dict_p) wrkmem;
op = out;
ip = in;
ii = ip;
ip += ti < 4 ? 4 - ti : 0;
for (;;)
{
const lzo_bytep m_pos;
#if !(LZO_DETERMINISTIC)
LZO_DEFINE_UNINITIALIZED_VAR(lzo_uint, m_off, 0);
lzo_uint m_len;
lzo_uint dindex;
next:
if __lzo_unlikely(ip >= ip_end)
break;
DINDEX1(dindex,ip);
GINDEX(m_pos,m_off,dict,dindex,in);
if (LZO_CHECK_MPOS_NON_DET(m_pos,m_off,in,ip,M4_MAX_OFFSET))
goto literal;
#if 1
if (m_off <= M2_MAX_OFFSET || m_pos[3] == ip[3])
goto try_match;
DINDEX2(dindex,ip);
#endif
GINDEX(m_pos,m_off,dict,dindex,in);
if (LZO_CHECK_MPOS_NON_DET(m_pos,m_off,in,ip,M4_MAX_OFFSET))
goto literal;
if (m_off <= M2_MAX_OFFSET || m_pos[3] == ip[3])
goto try_match;
goto literal;
try_match:
#if (LZO_OPT_UNALIGNED32)
if (UA_GET_NE32(m_pos) != UA_GET_NE32(ip))
#else
if (m_pos[0] != ip[0] || m_pos[1] != ip[1] || m_pos[2] != ip[2] || m_pos[3] != ip[3])
#endif
{
literal:
UPDATE_I(dict,0,dindex,ip,in);
ip += 1 + ((ip - ii) >> 5);
continue;
}
UPDATE_I(dict,0,dindex,ip,in);
#else
lzo_uint m_off;
lzo_uint m_len;
{
lzo_uint32_t dv;
lzo_uint dindex;
literal:
ip += 1 + ((ip - ii) >> 5);
next:
if __lzo_unlikely(ip >= ip_end)
break;
dv = UA_GET_LE32(ip);
dindex = DINDEX(dv,ip);
GINDEX(m_off,m_pos,in+dict,dindex,in);
UPDATE_I(dict,0,dindex,ip,in);
if __lzo_unlikely(dv != UA_GET_LE32(m_pos))
goto literal;
}
#endif
ii -= ti; ti = 0;
{
lzo_uint t = pd(ip,ii);
if (t != 0)
{
if (t <= 3)
{
op[-2] = LZO_BYTE(op[-2] | t);
#if (LZO_OPT_UNALIGNED32)
UA_COPY4(op, ii);
op += t;
#else
{ do *op++ = *ii++; while (--t > 0); }
#endif
}
#if (LZO_OPT_UNALIGNED32) || (LZO_OPT_UNALIGNED64)
else if (t <= 16)
{
*op++ = LZO_BYTE(t - 3);
UA_COPY8(op, ii);
UA_COPY8(op+8, ii+8);
op += t;
}
#endif
else
{
if (t <= 18)
*op++ = LZO_BYTE(t - 3);
else
{
lzo_uint tt = t - 18;
*op++ = 0;
while __lzo_unlikely(tt > 255)
{
tt -= 255;
UA_SET1(op, 0);
op++;
}
assert(tt > 0);
*op++ = LZO_BYTE(tt);
}
#if (LZO_OPT_UNALIGNED32) || (LZO_OPT_UNALIGNED64)
do {
UA_COPY8(op, ii);
UA_COPY8(op+8, ii+8);
op += 16; ii += 16; t -= 16;
} while (t >= 16); if (t > 0)
#endif
{ do *op++ = *ii++; while (--t > 0); }
}
}
}
m_len = 4;
{
#if (LZO_OPT_UNALIGNED64)
lzo_uint64_t v;
v = UA_GET_NE64(ip + m_len) ^ UA_GET_NE64(m_pos + m_len);
if __lzo_unlikely(v == 0) {
do {
m_len += 8;
v = UA_GET_NE64(ip + m_len) ^ UA_GET_NE64(m_pos + m_len);
if __lzo_unlikely(ip + m_len >= ip_end)
goto m_len_done;
} while (v == 0);
}
#if (LZO_ABI_BIG_ENDIAN) && defined(lzo_bitops_ctlz64)
m_len += lzo_bitops_ctlz64(v) / CHAR_BIT;
#elif (LZO_ABI_BIG_ENDIAN)
if ((v >> (64 - CHAR_BIT)) == 0) do {
v <<= CHAR_BIT;
m_len += 1;
} while ((v >> (64 - CHAR_BIT)) == 0);
#elif (LZO_ABI_LITTLE_ENDIAN) && defined(lzo_bitops_cttz64)
m_len += lzo_bitops_cttz64(v) / CHAR_BIT;
#elif (LZO_ABI_LITTLE_ENDIAN)
if ((v & UCHAR_MAX) == 0) do {
v >>= CHAR_BIT;
m_len += 1;
} while ((v & UCHAR_MAX) == 0);
#else
if (ip[m_len] == m_pos[m_len]) do {
m_len += 1;
} while (ip[m_len] == m_pos[m_len]);
#endif
#elif (LZO_OPT_UNALIGNED32)
lzo_uint32_t v;
v = UA_GET_NE32(ip + m_len) ^ UA_GET_NE32(m_pos + m_len);
if __lzo_unlikely(v == 0) {
do {
m_len += 4;
v = UA_GET_NE32(ip + m_len) ^ UA_GET_NE32(m_pos + m_len);
if (v != 0)
break;
m_len += 4;
v = UA_GET_NE32(ip + m_len) ^ UA_GET_NE32(m_pos + m_len);
if __lzo_unlikely(ip + m_len >= ip_end)
goto m_len_done;
} while (v == 0);
}
#if (LZO_ABI_BIG_ENDIAN) && defined(lzo_bitops_ctlz32)
m_len += lzo_bitops_ctlz32(v) / CHAR_BIT;
#elif (LZO_ABI_BIG_ENDIAN)
if ((v >> (32 - CHAR_BIT)) == 0) do {
v <<= CHAR_BIT;
m_len += 1;
} while ((v >> (32 - CHAR_BIT)) == 0);
#elif (LZO_ABI_LITTLE_ENDIAN) && defined(lzo_bitops_cttz32)
m_len += lzo_bitops_cttz32(v) / CHAR_BIT;
#elif (LZO_ABI_LITTLE_ENDIAN)
if ((v & UCHAR_MAX) == 0) do {
v >>= CHAR_BIT;
m_len += 1;
} while ((v & UCHAR_MAX) == 0);
#else
if (ip[m_len] == m_pos[m_len]) do {
m_len += 1;
} while (ip[m_len] == m_pos[m_len]);
#endif
#else
if __lzo_unlikely(ip[m_len] == m_pos[m_len]) {
do {
m_len += 1;
if (ip[m_len] != m_pos[m_len])
break;
m_len += 1;
if (ip[m_len] != m_pos[m_len])
break;
m_len += 1;
if (ip[m_len] != m_pos[m_len])
break;
m_len += 1;
if (ip[m_len] != m_pos[m_len])
break;
m_len += 1;
if (ip[m_len] != m_pos[m_len])
break;
m_len += 1;
if (ip[m_len] != m_pos[m_len])
break;
m_len += 1;
if (ip[m_len] != m_pos[m_len])
break;
m_len += 1;
if __lzo_unlikely(ip + m_len >= ip_end)
goto m_len_done;
} while (ip[m_len] == m_pos[m_len]);
}
#endif
}
m_len_done:
m_off = pd(ip,m_pos);
ip += m_len;
ii = ip;
if (m_len <= M2_MAX_LEN && m_off <= M2_MAX_OFFSET)
{
m_off -= 1;
#if defined(LZO1X)
*op++ = LZO_BYTE(((m_len - 1) << 5) | ((m_off & 7) << 2));
*op++ = LZO_BYTE(m_off >> 3);
#elif defined(LZO1Y)
*op++ = LZO_BYTE(((m_len + 1) << 4) | ((m_off & 3) << 2));
*op++ = LZO_BYTE(m_off >> 2);
#endif
}
else if (m_off <= M3_MAX_OFFSET)
{
m_off -= 1;
if (m_len <= M3_MAX_LEN)
*op++ = LZO_BYTE(M3_MARKER | (m_len - 2));
else
{
m_len -= M3_MAX_LEN;
*op++ = M3_MARKER | 0;
while __lzo_unlikely(m_len > 255)
{
m_len -= 255;
UA_SET1(op, 0);
op++;
}
*op++ = LZO_BYTE(m_len);
}
*op++ = LZO_BYTE(m_off << 2);
*op++ = LZO_BYTE(m_off >> 6);
}
else
{
m_off -= 0x4000;
if (m_len <= M4_MAX_LEN)
*op++ = LZO_BYTE(M4_MARKER | ((m_off >> 11) & 8) | (m_len - 2));
else
{
m_len -= M4_MAX_LEN;
*op++ = LZO_BYTE(M4_MARKER | ((m_off >> 11) & 8));
while __lzo_unlikely(m_len > 255)
{
m_len -= 255;
UA_SET1(op, 0);
op++;
}
*op++ = LZO_BYTE(m_len);
}
*op++ = LZO_BYTE(m_off << 2);
*op++ = LZO_BYTE(m_off >> 6);
}
goto next;
}
*out_len = pd(op, out);
return pd(in_end,ii-ti);
}
LZO_PUBLIC(int)
DO_COMPRESS ( const lzo_bytep in , lzo_uint in_len,
lzo_bytep out, lzo_uintp out_len,
lzo_voidp wrkmem )
{
const lzo_bytep ip = in;
lzo_bytep op = out;
lzo_uint l = in_len;
lzo_uint t = 0;
while (l > 20)
{
lzo_uint ll = l;
lzo_uintptr_t ll_end;
#if 0 || (LZO_DETERMINISTIC)
ll = LZO_MIN(ll, 49152);
#endif
ll_end = (lzo_uintptr_t)ip + ll;
if ((ll_end + ((t + ll) >> 5)) <= ll_end || (const lzo_bytep)(ll_end + ((t + ll) >> 5)) <= ip + ll)
break;
#if (LZO_DETERMINISTIC)
lzo_memset(wrkmem, 0, ((lzo_uint)1 << D_BITS) * sizeof(lzo_dict_t));
#endif
t = do_compress(ip,ll,op,out_len,t,wrkmem);
ip += ll;
op += *out_len;
l -= ll;
}
t += l;
if (t > 0)
{
const lzo_bytep ii = in + in_len - t;
if (op == out && t <= 238)
*op++ = LZO_BYTE(17 + t);
else if (t <= 3)
op[-2] = LZO_BYTE(op[-2] | t);
else if (t <= 18)
*op++ = LZO_BYTE(t - 3);
else
{
lzo_uint tt = t - 18;
*op++ = 0;
while (tt > 255)
{
tt -= 255;
UA_SET1(op, 0);
op++;
}
assert(tt > 0);
*op++ = LZO_BYTE(tt);
}
UA_COPYN(op, ii, t);
op += t;
}
*op++ = M4_MARKER | 1;
*op++ = 0;
*op++ = 0;
*out_len = pd(op, out);
return LZO_E_OK;
}
#endif
#undef do_compress
#undef DO_COMPRESS
#undef LZO_HASH
#undef LZO_TEST_OVERRUN
#undef DO_DECOMPRESS
#define DO_DECOMPRESS lzo1x_decompress
#if !defined(MINILZO_CFG_SKIP_LZO1X_DECOMPRESS)
#if defined(LZO_TEST_OVERRUN)
# if !defined(LZO_TEST_OVERRUN_INPUT)
# define LZO_TEST_OVERRUN_INPUT 2
# endif
# if !defined(LZO_TEST_OVERRUN_OUTPUT)
# define LZO_TEST_OVERRUN_OUTPUT 2
# endif
# if !defined(LZO_TEST_OVERRUN_LOOKBEHIND)
# define LZO_TEST_OVERRUN_LOOKBEHIND 1
# endif
#endif
#undef TEST_IP
#undef TEST_OP
#undef TEST_IP_AND_TEST_OP
#undef TEST_LB
#undef TEST_LBO
#undef NEED_IP
#undef NEED_OP
#undef TEST_IV
#undef TEST_OV
#undef HAVE_TEST_IP
#undef HAVE_TEST_OP
#undef HAVE_NEED_IP
#undef HAVE_NEED_OP
#undef HAVE_ANY_IP
#undef HAVE_ANY_OP
#if defined(LZO_TEST_OVERRUN_INPUT)
# if (LZO_TEST_OVERRUN_INPUT >= 1)
# define TEST_IP (ip < ip_end)
# endif
# if (LZO_TEST_OVERRUN_INPUT >= 2)
# define NEED_IP(x) \
if ((lzo_uint)(ip_end - ip) < (lzo_uint)(x)) goto input_overrun
# define TEST_IV(x) if ((x) > (lzo_uint)0 - (511)) goto input_overrun
# endif
#endif
#if defined(LZO_TEST_OVERRUN_OUTPUT)
# if (LZO_TEST_OVERRUN_OUTPUT >= 1)
# define TEST_OP (op <= op_end)
# endif
# if (LZO_TEST_OVERRUN_OUTPUT >= 2)
# undef TEST_OP
# define NEED_OP(x) \
if ((lzo_uint)(op_end - op) < (lzo_uint)(x)) goto output_overrun
# define TEST_OV(x) if ((x) > (lzo_uint)0 - (511)) goto output_overrun
# endif
#endif
#if defined(LZO_TEST_OVERRUN_LOOKBEHIND)
# define TEST_LB(m_pos) if (PTR_LT(m_pos,out) || PTR_GE(m_pos,op)) goto lookbehind_overrun
# define TEST_LBO(m_pos,o) if (PTR_LT(m_pos,out) || PTR_GE(m_pos,op-(o))) goto lookbehind_overrun
#else
# define TEST_LB(m_pos) ((void) 0)
# define TEST_LBO(m_pos,o) ((void) 0)
#endif
#if !defined(LZO_EOF_CODE) && !defined(TEST_IP)
# define TEST_IP (ip < ip_end)
#endif
#if defined(TEST_IP)
# define HAVE_TEST_IP 1
#else
# define TEST_IP 1
#endif
#if defined(TEST_OP)
# define HAVE_TEST_OP 1
#else
# define TEST_OP 1
#endif
#if defined(HAVE_TEST_IP) && defined(HAVE_TEST_OP)
# define TEST_IP_AND_TEST_OP (TEST_IP && TEST_OP)
#elif defined(HAVE_TEST_IP)
# define TEST_IP_AND_TEST_OP TEST_IP
#elif defined(HAVE_TEST_OP)
# define TEST_IP_AND_TEST_OP TEST_OP
#else
# define TEST_IP_AND_TEST_OP 1
#endif
#if defined(NEED_IP)
# define HAVE_NEED_IP 1
#else
# define NEED_IP(x) ((void) 0)
# define TEST_IV(x) ((void) 0)
#endif
#if defined(NEED_OP)
# define HAVE_NEED_OP 1
#else
# define NEED_OP(x) ((void) 0)
# define TEST_OV(x) ((void) 0)
#endif
#if defined(HAVE_TEST_IP) || defined(HAVE_NEED_IP)
# define HAVE_ANY_IP 1
#endif
#if defined(HAVE_TEST_OP) || defined(HAVE_NEED_OP)
# define HAVE_ANY_OP 1
#endif
#if defined(DO_DECOMPRESS)
LZO_PUBLIC(int)
DO_DECOMPRESS ( const lzo_bytep in , lzo_uint in_len,
lzo_bytep out, lzo_uintp out_len,
lzo_voidp wrkmem )
#endif
{
lzo_bytep op;
const lzo_bytep ip;
lzo_uint t;
#if defined(COPY_DICT)
lzo_uint m_off;
const lzo_bytep dict_end;
#else
const lzo_bytep m_pos;
#endif
const lzo_bytep const ip_end = in + in_len;
#if defined(HAVE_ANY_OP)
lzo_bytep const op_end = out + *out_len;
#endif
#if defined(LZO1Z)
lzo_uint last_m_off = 0;
#endif
LZO_UNUSED(wrkmem);
#if defined(COPY_DICT)
if (dict)
{
if (dict_len > M4_MAX_OFFSET)
{
dict += dict_len - M4_MAX_OFFSET;
dict_len = M4_MAX_OFFSET;
}
dict_end = dict + dict_len;
}
else
{
dict_len = 0;
dict_end = NULL;
}
#endif
*out_len = 0;
op = out;
ip = in;
NEED_IP(1);
if (*ip > 17)
{
t = *ip++ - 17;
if (t < 4)
goto match_next;
assert(t > 0); NEED_OP(t); NEED_IP(t+3);
do *op++ = *ip++; while (--t > 0);
goto first_literal_run;
}
for (;;)
{
NEED_IP(3);
t = *ip++;
if (t >= 16)
goto match;
if (t == 0)
{
while (*ip == 0)
{
t += 255;
ip++;
TEST_IV(t);
NEED_IP(1);
}
t += 15 + *ip++;
}
assert(t > 0); NEED_OP(t+3); NEED_IP(t+6);
#if (LZO_OPT_UNALIGNED64) && (LZO_OPT_UNALIGNED32)
t += 3;
if (t >= 8) do
{
UA_COPY8(op,ip);
op += 8; ip += 8; t -= 8;
} while (t >= 8);
if (t >= 4)
{
UA_COPY4(op,ip);
op += 4; ip += 4; t -= 4;
}
if (t > 0)
{
*op++ = *ip++;
if (t > 1) { *op++ = *ip++; if (t > 2) { *op++ = *ip++; } }
}
#elif (LZO_OPT_UNALIGNED32) || (LZO_ALIGNED_OK_4)
#if !(LZO_OPT_UNALIGNED32)
if (PTR_ALIGNED2_4(op,ip))
{
#endif
UA_COPY4(op,ip);
op += 4; ip += 4;
if (--t > 0)
{
if (t >= 4)
{
do {
UA_COPY4(op,ip);
op += 4; ip += 4; t -= 4;
} while (t >= 4);
if (t > 0) do *op++ = *ip++; while (--t > 0);
}
else
do *op++ = *ip++; while (--t > 0);
}
#if !(LZO_OPT_UNALIGNED32)
}
else
#endif
#endif
#if !(LZO_OPT_UNALIGNED32)
{
*op++ = *ip++; *op++ = *ip++; *op++ = *ip++;
do *op++ = *ip++; while (--t > 0);
}
#endif
first_literal_run:
t = *ip++;
if (t >= 16)
goto match;
#if defined(COPY_DICT)
#if defined(LZO1Z)
m_off = (1 + M2_MAX_OFFSET) + (t << 6) + (*ip++ >> 2);
last_m_off = m_off;
#else
m_off = (1 + M2_MAX_OFFSET) + (t >> 2) + (*ip++ << 2);
#endif
NEED_OP(3);
t = 3; COPY_DICT(t,m_off)
#else
#if defined(LZO1Z)
t = (1 + M2_MAX_OFFSET) + (t << 6) + (*ip++ >> 2);
m_pos = op - t;
last_m_off = t;
#else
m_pos = op - (1 + M2_MAX_OFFSET);
m_pos -= t >> 2;
m_pos -= *ip++ << 2;
#endif
TEST_LB(m_pos); NEED_OP(3);
*op++ = *m_pos++; *op++ = *m_pos++; *op++ = *m_pos;
#endif
goto match_done;
for (;;) {
match:
if (t >= 64)
{
#if defined(COPY_DICT)
#if defined(LZO1X)
m_off = 1 + ((t >> 2) & 7) + (*ip++ << 3);
t = (t >> 5) - 1;
#elif defined(LZO1Y)
m_off = 1 + ((t >> 2) & 3) + (*ip++ << 2);
t = (t >> 4) - 3;
#elif defined(LZO1Z)
m_off = t & 0x1f;
if (m_off >= 0x1c)
m_off = last_m_off;
else
{
m_off = 1 + (m_off << 6) + (*ip++ >> 2);
last_m_off = m_off;
}
t = (t >> 5) - 1;
#endif
#else
#if defined(LZO1X)
m_pos = op - 1;
m_pos -= (t >> 2) & 7;
m_pos -= *ip++ << 3;
t = (t >> 5) - 1;
#elif defined(LZO1Y)
m_pos = op - 1;
m_pos -= (t >> 2) & 3;
m_pos -= *ip++ << 2;
t = (t >> 4) - 3;
#elif defined(LZO1Z)
{
lzo_uint off = t & 0x1f;
m_pos = op;
if (off >= 0x1c)
{
assert(last_m_off > 0);
m_pos -= last_m_off;
}
else
{
off = 1 + (off << 6) + (*ip++ >> 2);
m_pos -= off;
last_m_off = off;
}
}
t = (t >> 5) - 1;
#endif
TEST_LB(m_pos); assert(t > 0); NEED_OP(t+3-1);
goto copy_match;
#endif
}
else if (t >= 32)
{
t &= 31;
if (t == 0)
{
while (*ip == 0)
{
t += 255;
ip++;
TEST_OV(t);
NEED_IP(1);
}
t += 31 + *ip++;
NEED_IP(2);
}
#if defined(COPY_DICT)
#if defined(LZO1Z)
m_off = 1 + (ip[0] << 6) + (ip[1] >> 2);
last_m_off = m_off;
#else
m_off = 1 + (ip[0] >> 2) + (ip[1] << 6);
#endif
#else
#if defined(LZO1Z)
{
lzo_uint off = 1 + (ip[0] << 6) + (ip[1] >> 2);
m_pos = op - off;
last_m_off = off;
}
#elif (LZO_OPT_UNALIGNED16) && (LZO_ABI_LITTLE_ENDIAN)
m_pos = op - 1;
m_pos -= UA_GET_LE16(ip) >> 2;
#else
m_pos = op - 1;
m_pos -= (ip[0] >> 2) + (ip[1] << 6);
#endif
#endif
ip += 2;
}
else if (t >= 16)
{
#if defined(COPY_DICT)
m_off = (t & 8) << 11;
#else
m_pos = op;
m_pos -= (t & 8) << 11;
#endif
t &= 7;
if (t == 0)
{
while (*ip == 0)
{
t += 255;
ip++;
TEST_OV(t);
NEED_IP(1);
}
t += 7 + *ip++;
NEED_IP(2);
}
#if defined(COPY_DICT)
#if defined(LZO1Z)
m_off += (ip[0] << 6) + (ip[1] >> 2);
#else
m_off += (ip[0] >> 2) + (ip[1] << 6);
#endif
ip += 2;
if (m_off == 0)
goto eof_found;
m_off += 0x4000;
#if defined(LZO1Z)
last_m_off = m_off;
#endif
#else
#if defined(LZO1Z)
m_pos -= (ip[0] << 6) + (ip[1] >> 2);
#elif (LZO_OPT_UNALIGNED16) && (LZO_ABI_LITTLE_ENDIAN)
m_pos -= UA_GET_LE16(ip) >> 2;
#else
m_pos -= (ip[0] >> 2) + (ip[1] << 6);
#endif
ip += 2;
if (m_pos == op)
goto eof_found;
m_pos -= 0x4000;
#if defined(LZO1Z)
last_m_off = pd((const lzo_bytep)op, m_pos);
#endif
#endif
}
else
{
#if defined(COPY_DICT)
#if defined(LZO1Z)
m_off = 1 + (t << 6) + (*ip++ >> 2);
last_m_off = m_off;
#else
m_off = 1 + (t >> 2) + (*ip++ << 2);
#endif
NEED_OP(2);
t = 2; COPY_DICT(t,m_off)
#else
#if defined(LZO1Z)
t = 1 + (t << 6) + (*ip++ >> 2);
m_pos = op - t;
last_m_off = t;
#else
m_pos = op - 1;
m_pos -= t >> 2;
m_pos -= *ip++ << 2;
#endif
TEST_LB(m_pos); NEED_OP(2);
*op++ = *m_pos++; *op++ = *m_pos;
#endif
goto match_done;
}
#if defined(COPY_DICT)
NEED_OP(t+3-1);
t += 3-1; COPY_DICT(t,m_off)
#else
TEST_LB(m_pos); assert(t > 0); NEED_OP(t+3-1);
#if (LZO_OPT_UNALIGNED64) && (LZO_OPT_UNALIGNED32)
if (op - m_pos >= 8)
{
t += (3 - 1);
if (t >= 8) do
{
UA_COPY8(op,m_pos);
op += 8; m_pos += 8; t -= 8;
} while (t >= 8);
if (t >= 4)
{
UA_COPY4(op,m_pos);
op += 4; m_pos += 4; t -= 4;
}
if (t > 0)
{
*op++ = m_pos[0];
if (t > 1) { *op++ = m_pos[1]; if (t > 2) { *op++ = m_pos[2]; } }
}
}
else
#elif (LZO_OPT_UNALIGNED32) || (LZO_ALIGNED_OK_4)
#if !(LZO_OPT_UNALIGNED32)
if (t >= 2 * 4 - (3 - 1) && PTR_ALIGNED2_4(op,m_pos))
{
assert((op - m_pos) >= 4);
#else
if (t >= 2 * 4 - (3 - 1) && (op - m_pos) >= 4)
{
#endif
UA_COPY4(op,m_pos);
op += 4; m_pos += 4; t -= 4 - (3 - 1);
do {
UA_COPY4(op,m_pos);
op += 4; m_pos += 4; t -= 4;
} while (t >= 4);
if (t > 0) do *op++ = *m_pos++; while (--t > 0);
}
else
#endif
{
copy_match:
*op++ = *m_pos++; *op++ = *m_pos++;
do *op++ = *m_pos++; while (--t > 0);
}
#endif
match_done:
#if defined(LZO1Z)
t = ip[-1] & 3;
#else
t = ip[-2] & 3;
#endif
if (t == 0)
break;
match_next:
assert(t > 0); assert(t < 4); NEED_OP(t); NEED_IP(t+3);
#if 0
do *op++ = *ip++; while (--t > 0);
#else
*op++ = *ip++;
if (t > 1) { *op++ = *ip++; if (t > 2) { *op++ = *ip++; } }
#endif
t = *ip++;
}
}
eof_found:
*out_len = pd(op, out);
return (ip == ip_end ? LZO_E_OK :
(ip < ip_end ? LZO_E_INPUT_NOT_CONSUMED : LZO_E_INPUT_OVERRUN));
#if defined(HAVE_NEED_IP)
input_overrun:
*out_len = pd(op, out);
return LZO_E_INPUT_OVERRUN;
#endif
#if defined(HAVE_NEED_OP)
output_overrun:
*out_len = pd(op, out);
return LZO_E_OUTPUT_OVERRUN;
#endif
#if defined(LZO_TEST_OVERRUN_LOOKBEHIND)
lookbehind_overrun:
*out_len = pd(op, out);
return LZO_E_LOOKBEHIND_OVERRUN;
#endif
}
#endif
#define LZO_TEST_OVERRUN 1
#undef DO_DECOMPRESS
#define DO_DECOMPRESS lzo1x_decompress_safe
#if !defined(MINILZO_CFG_SKIP_LZO1X_DECOMPRESS_SAFE)
#if defined(LZO_TEST_OVERRUN)
# if !defined(LZO_TEST_OVERRUN_INPUT)
# define LZO_TEST_OVERRUN_INPUT 2
# endif
# if !defined(LZO_TEST_OVERRUN_OUTPUT)
# define LZO_TEST_OVERRUN_OUTPUT 2
# endif
# if !defined(LZO_TEST_OVERRUN_LOOKBEHIND)
# define LZO_TEST_OVERRUN_LOOKBEHIND 1
# endif
#endif
#undef TEST_IP
#undef TEST_OP
#undef TEST_IP_AND_TEST_OP
#undef TEST_LB
#undef TEST_LBO
#undef NEED_IP
#undef NEED_OP
#undef TEST_IV
#undef TEST_OV
#undef HAVE_TEST_IP
#undef HAVE_TEST_OP
#undef HAVE_NEED_IP
#undef HAVE_NEED_OP
#undef HAVE_ANY_IP
#undef HAVE_ANY_OP
#if defined(LZO_TEST_OVERRUN_INPUT)
# if (LZO_TEST_OVERRUN_INPUT >= 1)
# define TEST_IP (ip < ip_end)
# endif
# if (LZO_TEST_OVERRUN_INPUT >= 2)
# define NEED_IP(x) \
if ((lzo_uint)(ip_end - ip) < (lzo_uint)(x)) goto input_overrun
# define TEST_IV(x) if ((x) > (lzo_uint)0 - (511)) goto input_overrun
# endif
#endif
#if defined(LZO_TEST_OVERRUN_OUTPUT)
# if (LZO_TEST_OVERRUN_OUTPUT >= 1)
# define TEST_OP (op <= op_end)
# endif
# if (LZO_TEST_OVERRUN_OUTPUT >= 2)
# undef TEST_OP
# define NEED_OP(x) \
if ((lzo_uint)(op_end - op) < (lzo_uint)(x)) goto output_overrun
# define TEST_OV(x) if ((x) > (lzo_uint)0 - (511)) goto output_overrun
# endif
#endif
#if defined(LZO_TEST_OVERRUN_LOOKBEHIND)
# define TEST_LB(m_pos) if (PTR_LT(m_pos,out) || PTR_GE(m_pos,op)) goto lookbehind_overrun
# define TEST_LBO(m_pos,o) if (PTR_LT(m_pos,out) || PTR_GE(m_pos,op-(o))) goto lookbehind_overrun
#else
# define TEST_LB(m_pos) ((void) 0)
# define TEST_LBO(m_pos,o) ((void) 0)
#endif
#if !defined(LZO_EOF_CODE) && !defined(TEST_IP)
# define TEST_IP (ip < ip_end)
#endif
#if defined(TEST_IP)
# define HAVE_TEST_IP 1
#else
# define TEST_IP 1
#endif
#if defined(TEST_OP)
# define HAVE_TEST_OP 1
#else
# define TEST_OP 1
#endif
#if defined(HAVE_TEST_IP) && defined(HAVE_TEST_OP)
# define TEST_IP_AND_TEST_OP (TEST_IP && TEST_OP)
#elif defined(HAVE_TEST_IP)
# define TEST_IP_AND_TEST_OP TEST_IP
#elif defined(HAVE_TEST_OP)
# define TEST_IP_AND_TEST_OP TEST_OP
#else
# define TEST_IP_AND_TEST_OP 1
#endif
#if defined(NEED_IP)
# define HAVE_NEED_IP 1
#else
# define NEED_IP(x) ((void) 0)
# define TEST_IV(x) ((void) 0)
#endif
#if defined(NEED_OP)
# define HAVE_NEED_OP 1
#else
# define NEED_OP(x) ((void) 0)
# define TEST_OV(x) ((void) 0)
#endif
#if defined(HAVE_TEST_IP) || defined(HAVE_NEED_IP)
# define HAVE_ANY_IP 1
#endif
#if defined(HAVE_TEST_OP) || defined(HAVE_NEED_OP)
# define HAVE_ANY_OP 1
#endif
#if defined(DO_DECOMPRESS)
LZO_PUBLIC(int)
DO_DECOMPRESS ( const lzo_bytep in , lzo_uint in_len,
lzo_bytep out, lzo_uintp out_len,
lzo_voidp wrkmem )
#endif
{
lzo_bytep op;
const lzo_bytep ip;
lzo_uint t;
#if defined(COPY_DICT)
lzo_uint m_off;
const lzo_bytep dict_end;
#else
const lzo_bytep m_pos;
#endif
const lzo_bytep const ip_end = in + in_len;
#if defined(HAVE_ANY_OP)
lzo_bytep const op_end = out + *out_len;
#endif
#if defined(LZO1Z)
lzo_uint last_m_off = 0;
#endif
LZO_UNUSED(wrkmem);
#if defined(COPY_DICT)
if (dict)
{
if (dict_len > M4_MAX_OFFSET)
{
dict += dict_len - M4_MAX_OFFSET;
dict_len = M4_MAX_OFFSET;
}
dict_end = dict + dict_len;
}
else
{
dict_len = 0;
dict_end = NULL;
}
#endif
*out_len = 0;
op = out;
ip = in;
NEED_IP(1);
if (*ip > 17)
{
t = *ip++ - 17;
if (t < 4)
goto match_next;
assert(t > 0); NEED_OP(t); NEED_IP(t+3);
do *op++ = *ip++; while (--t > 0);
goto first_literal_run;
}
for (;;)
{
NEED_IP(3);
t = *ip++;
if (t >= 16)
goto match;
if (t == 0)
{
while (*ip == 0)
{
t += 255;
ip++;
TEST_IV(t);
NEED_IP(1);
}
t += 15 + *ip++;
}
assert(t > 0); NEED_OP(t+3); NEED_IP(t+6);
#if (LZO_OPT_UNALIGNED64) && (LZO_OPT_UNALIGNED32)
t += 3;
if (t >= 8) do
{
UA_COPY8(op,ip);
op += 8; ip += 8; t -= 8;
} while (t >= 8);
if (t >= 4)
{
UA_COPY4(op,ip);
op += 4; ip += 4; t -= 4;
}
if (t > 0)
{
*op++ = *ip++;
if (t > 1) { *op++ = *ip++; if (t > 2) { *op++ = *ip++; } }
}
#elif (LZO_OPT_UNALIGNED32) || (LZO_ALIGNED_OK_4)
#if !(LZO_OPT_UNALIGNED32)
if (PTR_ALIGNED2_4(op,ip))
{
#endif
UA_COPY4(op,ip);
op += 4; ip += 4;
if (--t > 0)
{
if (t >= 4)
{
do {
UA_COPY4(op,ip);
op += 4; ip += 4; t -= 4;
} while (t >= 4);
if (t > 0) do *op++ = *ip++; while (--t > 0);
}
else
do *op++ = *ip++; while (--t > 0);
}
#if !(LZO_OPT_UNALIGNED32)
}
else
#endif
#endif
#if !(LZO_OPT_UNALIGNED32)
{
*op++ = *ip++; *op++ = *ip++; *op++ = *ip++;
do *op++ = *ip++; while (--t > 0);
}
#endif
first_literal_run:
t = *ip++;
if (t >= 16)
goto match;
#if defined(COPY_DICT)
#if defined(LZO1Z)
m_off = (1 + M2_MAX_OFFSET) + (t << 6) + (*ip++ >> 2);
last_m_off = m_off;
#else
m_off = (1 + M2_MAX_OFFSET) + (t >> 2) + (*ip++ << 2);
#endif
NEED_OP(3);
t = 3; COPY_DICT(t,m_off)
#else
#if defined(LZO1Z)
t = (1 + M2_MAX_OFFSET) + (t << 6) + (*ip++ >> 2);
m_pos = op - t;
last_m_off = t;
#else
m_pos = op - (1 + M2_MAX_OFFSET);
m_pos -= t >> 2;
m_pos -= *ip++ << 2;
#endif
TEST_LB(m_pos); NEED_OP(3);
*op++ = *m_pos++; *op++ = *m_pos++; *op++ = *m_pos;
#endif
goto match_done;
for (;;) {
match:
if (t >= 64)
{
#if defined(COPY_DICT)
#if defined(LZO1X)
m_off = 1 + ((t >> 2) & 7) + (*ip++ << 3);
t = (t >> 5) - 1;
#elif defined(LZO1Y)
m_off = 1 + ((t >> 2) & 3) + (*ip++ << 2);
t = (t >> 4) - 3;
#elif defined(LZO1Z)
m_off = t & 0x1f;
if (m_off >= 0x1c)
m_off = last_m_off;
else
{
m_off = 1 + (m_off << 6) + (*ip++ >> 2);
last_m_off = m_off;
}
t = (t >> 5) - 1;
#endif
#else
#if defined(LZO1X)
m_pos = op - 1;
m_pos -= (t >> 2) & 7;
m_pos -= *ip++ << 3;
t = (t >> 5) - 1;
#elif defined(LZO1Y)
m_pos = op - 1;
m_pos -= (t >> 2) & 3;
m_pos -= *ip++ << 2;
t = (t >> 4) - 3;
#elif defined(LZO1Z)
{
lzo_uint off = t & 0x1f;
m_pos = op;
if (off >= 0x1c)
{
assert(last_m_off > 0);
m_pos -= last_m_off;
}
else
{
off = 1 + (off << 6) + (*ip++ >> 2);
m_pos -= off;
last_m_off = off;
}
}
t = (t >> 5) - 1;
#endif
TEST_LB(m_pos); assert(t > 0); NEED_OP(t+3-1);
goto copy_match;
#endif
}
else if (t >= 32)
{
t &= 31;
if (t == 0)
{
while (*ip == 0)
{
t += 255;
ip++;
TEST_OV(t);
NEED_IP(1);
}
t += 31 + *ip++;
NEED_IP(2);
}
#if defined(COPY_DICT)
#if defined(LZO1Z)
m_off = 1 + (ip[0] << 6) + (ip[1] >> 2);
last_m_off = m_off;
#else
m_off = 1 + (ip[0] >> 2) + (ip[1] << 6);
#endif
#else
#if defined(LZO1Z)
{
lzo_uint off = 1 + (ip[0] << 6) + (ip[1] >> 2);
m_pos = op - off;
last_m_off = off;
}
#elif (LZO_OPT_UNALIGNED16) && (LZO_ABI_LITTLE_ENDIAN)
m_pos = op - 1;
m_pos -= UA_GET_LE16(ip) >> 2;
#else
m_pos = op - 1;
m_pos -= (ip[0] >> 2) + (ip[1] << 6);
#endif
#endif
ip += 2;
}
else if (t >= 16)
{
#if defined(COPY_DICT)
m_off = (t & 8) << 11;
#else
m_pos = op;
m_pos -= (t & 8) << 11;
#endif
t &= 7;
if (t == 0)
{
while (*ip == 0)
{
t += 255;
ip++;
TEST_OV(t);
NEED_IP(1);
}
t += 7 + *ip++;
NEED_IP(2);
}
#if defined(COPY_DICT)
#if defined(LZO1Z)
m_off += (ip[0] << 6) + (ip[1] >> 2);
#else
m_off += (ip[0] >> 2) + (ip[1] << 6);
#endif
ip += 2;
if (m_off == 0)
goto eof_found;
m_off += 0x4000;
#if defined(LZO1Z)
last_m_off = m_off;
#endif
#else
#if defined(LZO1Z)
m_pos -= (ip[0] << 6) + (ip[1] >> 2);
#elif (LZO_OPT_UNALIGNED16) && (LZO_ABI_LITTLE_ENDIAN)
m_pos -= UA_GET_LE16(ip) >> 2;
#else
m_pos -= (ip[0] >> 2) + (ip[1] << 6);
#endif
ip += 2;
if (m_pos == op)
goto eof_found;
m_pos -= 0x4000;
#if defined(LZO1Z)
last_m_off = pd((const lzo_bytep)op, m_pos);
#endif
#endif
}
else
{
#if defined(COPY_DICT)
#if defined(LZO1Z)
m_off = 1 + (t << 6) + (*ip++ >> 2);
last_m_off = m_off;
#else
m_off = 1 + (t >> 2) + (*ip++ << 2);
#endif
NEED_OP(2);
t = 2; COPY_DICT(t,m_off)
#else
#if defined(LZO1Z)
t = 1 + (t << 6) + (*ip++ >> 2);
m_pos = op - t;
last_m_off = t;
#else
m_pos = op - 1;
m_pos -= t >> 2;
m_pos -= *ip++ << 2;
#endif
TEST_LB(m_pos); NEED_OP(2);
*op++ = *m_pos++; *op++ = *m_pos;
#endif
goto match_done;
}
#if defined(COPY_DICT)
NEED_OP(t+3-1);
t += 3-1; COPY_DICT(t,m_off)
#else
TEST_LB(m_pos); assert(t > 0); NEED_OP(t+3-1);
#if (LZO_OPT_UNALIGNED64) && (LZO_OPT_UNALIGNED32)
if (op - m_pos >= 8)
{
t += (3 - 1);
if (t >= 8) do
{
UA_COPY8(op,m_pos);
op += 8; m_pos += 8; t -= 8;
} while (t >= 8);
if (t >= 4)
{
UA_COPY4(op,m_pos);
op += 4; m_pos += 4; t -= 4;
}
if (t > 0)
{
*op++ = m_pos[0];
if (t > 1) { *op++ = m_pos[1]; if (t > 2) { *op++ = m_pos[2]; } }
}
}
else
#elif (LZO_OPT_UNALIGNED32) || (LZO_ALIGNED_OK_4)
#if !(LZO_OPT_UNALIGNED32)
if (t >= 2 * 4 - (3 - 1) && PTR_ALIGNED2_4(op,m_pos))
{
assert((op - m_pos) >= 4);
#else
if (t >= 2 * 4 - (3 - 1) && (op - m_pos) >= 4)
{
#endif
UA_COPY4(op,m_pos);
op += 4; m_pos += 4; t -= 4 - (3 - 1);
do {
UA_COPY4(op,m_pos);
op += 4; m_pos += 4; t -= 4;
} while (t >= 4);
if (t > 0) do *op++ = *m_pos++; while (--t > 0);
}
else
#endif
{
copy_match:
*op++ = *m_pos++; *op++ = *m_pos++;
do *op++ = *m_pos++; while (--t > 0);
}
#endif
match_done:
#if defined(LZO1Z)
t = ip[-1] & 3;
#else
t = ip[-2] & 3;
#endif
if (t == 0)
break;
match_next:
assert(t > 0); assert(t < 4); NEED_OP(t); NEED_IP(t+3);
#if 0
do *op++ = *ip++; while (--t > 0);
#else
*op++ = *ip++;
if (t > 1) { *op++ = *ip++; if (t > 2) { *op++ = *ip++; } }
#endif
t = *ip++;
}
}
eof_found:
*out_len = pd(op, out);
return (ip == ip_end ? LZO_E_OK :
(ip < ip_end ? LZO_E_INPUT_NOT_CONSUMED : LZO_E_INPUT_OVERRUN));
#if defined(HAVE_NEED_IP)
input_overrun:
*out_len = pd(op, out);
return LZO_E_INPUT_OVERRUN;
#endif
#if defined(HAVE_NEED_OP)
output_overrun:
*out_len = pd(op, out);
return LZO_E_OUTPUT_OVERRUN;
#endif
#if defined(LZO_TEST_OVERRUN_LOOKBEHIND)
lookbehind_overrun:
*out_len = pd(op, out);
return LZO_E_LOOKBEHIND_OVERRUN;
#endif
}
#endif
/***** End of minilzo.c *****/

View File

@@ -1,94 +0,0 @@
/* minilzo.h -- mini subset of the LZO real-time data compression library
This file is part of the LZO real-time data compression library.
Copyright (C) 1996-2014 Markus Franz Xaver Johannes Oberhumer
All Rights Reserved.
The LZO library is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License as
published by the Free Software Foundation; either version 2 of
the License, or (at your option) any later version.
The LZO library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with the LZO library; see the file COPYING.
If not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
Markus F.X.J. Oberhumer
<markus@oberhumer.com>
http://www.oberhumer.com/opensource/lzo/
*/
/*
* NOTE:
* the full LZO package can be found at
* http://www.oberhumer.com/opensource/lzo/
*/
#ifndef __MINILZO_H
#define __MINILZO_H 1
#define MINILZO_VERSION 0x2080
#ifdef __LZOCONF_H
# error "you cannot use both LZO and miniLZO"
#endif
#undef LZO_HAVE_CONFIG_H
#include "lzoconf.h"
#if !defined(LZO_VERSION) || (LZO_VERSION != MINILZO_VERSION)
# error "version mismatch in header files"
#endif
#ifdef __cplusplus
extern "C" {
#endif
/***********************************************************************
//
************************************************************************/
/* Memory required for the wrkmem parameter.
* When the required size is 0, you can also pass a NULL pointer.
*/
#define LZO1X_MEM_COMPRESS LZO1X_1_MEM_COMPRESS
#define LZO1X_1_MEM_COMPRESS ((lzo_uint32_t) (16384L * lzo_sizeof_dict_t))
#define LZO1X_MEM_DECOMPRESS (0)
/* compression */
LZO_EXTERN(int)
lzo1x_1_compress ( const lzo_bytep src, lzo_uint src_len,
lzo_bytep dst, lzo_uintp dst_len,
lzo_voidp wrkmem );
/* decompression */
LZO_EXTERN(int)
lzo1x_decompress ( const lzo_bytep src, lzo_uint src_len,
lzo_bytep dst, lzo_uintp dst_len,
lzo_voidp wrkmem /* NOT USED */ );
/* safe decompression with overrun testing */
LZO_EXTERN(int)
lzo1x_decompress_safe ( const lzo_bytep src, lzo_uint src_len,
lzo_bytep dst, lzo_uintp dst_len,
lzo_voidp wrkmem /* NOT USED */ );
#ifdef __cplusplus
} /* extern "C" */
#endif
#endif /* already included */

View File

@@ -1469,7 +1469,6 @@ Legal Terms
| [FFTW](https://www.fftw.org/) | 3.3.10 | `Copyright (c) 2003, 2007-14 Matteo Frigo. Copyright (c) 2003, 2007-14 Massachusetts Institute of Technology` |
| [Flac](https://xiph.org/flac/) | 1.4.2 | `Copyright (C) 2001-2009 Josh Coalson. Copyright (C) 2011-2016 Xiph.Org Foundation.` |
| [GMP](https://gmplib.org/) | 6.3.0 | `Copyright 1996-2020 Free Software Foundation, Inc.` |
| [miniLZO](http://www.oberhumer.com/opensource/lzo/) | 2.08 | `Copyright (C) 1996-2014 Markus Franz Xaver Oberhumer All Rights Reserved.` |
| [Potrace](https://potrace.sourceforge.net/) | 1.16 | `Copyright © 2001-2019 Peter Selinger.` |
| [X Drag and Drop](http://www.newplanetsoftware.com/xdnd/ (defunct)) | 2000-08-08 | `Copyright (C) 1996-2000 Paul Sheer` |
| [X264](https://www.videolan.org/developers/x264.html) | 35fe20d1ba4 | `Copyright (C) 2003-2021 x264 project` |

View File

@@ -709,36 +709,6 @@ if(WITH_JACK)
add_definitions(-DWITH_JACK)
endif()
if(WITH_LZO)
if(WITH_SYSTEM_LZO)
list(APPEND INC_SYS
${LZO_INCLUDE_DIR}
)
list(APPEND LIB
${LZO_LIBRARIES}
)
add_definitions(-DWITH_SYSTEM_LZO)
else()
list(APPEND INC_SYS
../../../extern/lzo/minilzo
)
list(APPEND LIB
extern_minilzo
)
endif()
add_definitions(-DWITH_LZO)
endif()
if(WITH_LZMA)
list(APPEND INC_SYS
../../../extern/lzma
)
list(APPEND LIB
extern_lzma
)
add_definitions(-DWITH_LZMA)
endif()
if(WITH_LIBMV)
add_definitions(-DWITH_LIBMV)
endif()

View File

@@ -75,20 +75,6 @@
# include "RBI_api.h"
#endif
#if 0 // #ifdef WITH_LZO
# ifdef WITH_SYSTEM_LZO
# include <lzo/lzo1x.h>
# else
# include "minilzo.h"
# endif
# define LZO_HEAP_ALLOC(var, size) \
lzo_align_t __LZO_MMODEL var[((size) + (sizeof(lzo_align_t) - 1)) / sizeof(lzo_align_t)]
#endif
#if 0 // #ifdef WITH_LZMA
# include "LzmaLib.h"
#endif
#include <zstd.h>
#define PTCACHE_DATA_FROM(data, type, from) \
@@ -1535,22 +1521,6 @@ static int ptcache_file_compressed_read(PTCacheFile *pf,
decomp_result = MEM_malloc_arrayN<uchar>(items_num * item_size,
"pointcache_unfilter_buffer");
}
#if 0 // #ifdef WITH_LZO
if (compressed == PTCACHE_COMPRESS_LZO_DEPRECATED) {
size_t out_len = items_num * item_size;
r = lzo1x_decompress_safe(in, (lzo_uint)in_len, decomp_result, (lzo_uint *)&out_len, nullptr);
}
#endif
#if 0 // #ifdef WITH_LZMA
if (compressed == PTCACHE_COMPRESS_LZMA_DEPRECATED) {
size_t leni = in_len, leno = items_num * item_size;
uchar lzma_props[16] = {};
uint lzma_props_size = 0;
ptcache_file_read(pf, &lzma_props_size, 1, sizeof(uint));
ptcache_file_read(pf, lzma_props, lzma_props_size, sizeof(uchar));
r = LzmaUncompress(decomp_result, &leno, in, &leni, lzma_props, lzma_props_size);
}
#endif
if (ELEM(compressed,
PTCACHE_COMPRESS_ZSTD_FILTERED,
PTCACHE_COMPRESS_ZSTD_FAST_DEPRECATED,