2017-08-18 23:52:05 +00:00
|
|
|
/*
|
2016-08-30 17:04:33 +00:00
|
|
|
* Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
|
|
|
|
* All rights reserved.
|
|
|
|
*
|
2017-08-18 23:52:05 +00:00
|
|
|
* This source code is licensed under both the BSD-style license (found in the
|
|
|
|
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
|
|
|
* in the COPYING file in the root directory of this source tree).
|
2017-09-08 07:09:23 +00:00
|
|
|
* You may select, at your option, one of the above-listed licenses.
|
2016-08-30 17:04:33 +00:00
|
|
|
*/
|
2015-10-18 21:18:32 +00:00
|
|
|
|
|
|
|
#ifndef MEM_H_MODULE
|
|
|
|
#define MEM_H_MODULE
|
|
|
|
|
|
|
|
#if defined (__cplusplus)
|
|
|
|
extern "C" {
|
|
|
|
#endif
|
|
|
|
|
2016-02-03 01:46:46 +00:00
|
|
|
/*-****************************************
|
|
|
|
* Dependencies
|
2015-10-18 21:18:32 +00:00
|
|
|
******************************************/
|
2016-05-24 08:57:14 +00:00
|
|
|
#include <stddef.h> /* size_t, ptrdiff_t */
|
|
|
|
#include <string.h> /* memcpy */
|
2015-10-18 21:18:32 +00:00
|
|
|
|
|
|
|
|
2016-02-03 01:46:46 +00:00
|
|
|
/*-****************************************
|
|
|
|
* Compiler specifics
|
2015-10-18 21:18:32 +00:00
|
|
|
******************************************/
|
2016-07-17 13:53:18 +00:00
|
|
|
#if defined(_MSC_VER) /* Visual Studio */
|
|
|
|
# include <stdlib.h> /* _byteswap_ulong */
|
|
|
|
# include <intrin.h> /* _byteswap_* */
|
2016-05-17 12:30:19 +00:00
|
|
|
#endif
|
2015-11-13 10:27:46 +00:00
|
|
|
#if defined(__GNUC__)
|
2016-07-26 19:30:35 +00:00
|
|
|
# define MEM_STATIC static __inline __attribute__((unused))
|
2015-11-13 10:27:46 +00:00
|
|
|
#elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
|
2015-10-18 21:18:32 +00:00
|
|
|
# define MEM_STATIC static inline
|
|
|
|
#elif defined(_MSC_VER)
|
|
|
|
# define MEM_STATIC static __inline
|
|
|
|
#else
|
|
|
|
# define MEM_STATIC static /* this version may generate warnings for unused static functions; disable the relevant warning */
|
|
|
|
#endif
|
|
|
|
|
2018-10-11 02:32:03 +00:00
|
|
|
#ifndef __has_builtin
|
|
|
|
# define __has_builtin(x) 0 /* compat. with non-clang compilers */
|
|
|
|
#endif
|
|
|
|
|
2016-07-17 13:53:18 +00:00
|
|
|
/* code only tested on 32 and 64 bits systems */
|
2016-12-21 18:25:15 +00:00
|
|
|
#define MEM_STATIC_ASSERT(c) { enum { MEM_static_assert = 1/(int)(!!(c)) }; }
|
2016-07-17 13:53:18 +00:00
|
|
|
MEM_STATIC void MEM_check(void) { MEM_STATIC_ASSERT((sizeof(size_t)==4) || (sizeof(size_t)==8)); }
|
|
|
|
|
2019-09-11 20:40:29 +00:00
|
|
|
/* detects whether we are being compiled under msan */
|
|
|
|
#if defined (__has_feature)
|
|
|
|
# if __has_feature(memory_sanitizer)
|
|
|
|
# define MEMORY_SANITIZER 1
|
|
|
|
# endif
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#if defined (MEMORY_SANITIZER)
|
2019-09-16 16:08:03 +00:00
|
|
|
/* Not all platforms that support msan provide sanitizers/msan_interface.h.
|
|
|
|
* We therefore declare the functions we need ourselves, rather than trying to
|
|
|
|
* include the header file... */
|
|
|
|
|
|
|
|
#include <stdint.h> /* intptr_t */
|
|
|
|
|
|
|
|
/* Make memory region fully initialized (without changing its contents). */
|
|
|
|
void __msan_unpoison(const volatile void *a, size_t size);
|
|
|
|
|
|
|
|
/* Make memory region fully uninitialized (without changing its contents).
|
|
|
|
This is a legacy interface that does not update origin information. Use
|
|
|
|
__msan_allocated_memory() instead. */
|
|
|
|
void __msan_poison(const volatile void *a, size_t size);
|
|
|
|
|
|
|
|
/* Returns the offset of the first (at least partially) poisoned byte in the
|
|
|
|
memory range, or -1 if the whole range is good. */
|
|
|
|
intptr_t __msan_test_shadow(const volatile void *x, size_t size);
|
2019-09-11 20:40:29 +00:00
|
|
|
#endif
|
|
|
|
|
2019-09-12 22:32:22 +00:00
|
|
|
/* detects whether we are being compiled under asan */
|
|
|
|
#if defined (__has_feature)
|
|
|
|
# if __has_feature(address_sanitizer)
|
|
|
|
# define ADDRESS_SANITIZER 1
|
|
|
|
# endif
|
|
|
|
#elif defined(__SANITIZE_ADDRESS__)
|
|
|
|
# define ADDRESS_SANITIZER 1
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#if defined (ADDRESS_SANITIZER)
|
|
|
|
# include <sanitizer/asan_interface.h>
|
|
|
|
#endif
|
|
|
|
|
2015-10-18 21:18:32 +00:00
|
|
|
|
2016-02-03 01:46:46 +00:00
|
|
|
/*-**************************************************************
|
2015-10-18 21:18:32 +00:00
|
|
|
* Basic Types
|
|
|
|
*****************************************************************/
|
2016-05-13 10:43:36 +00:00
|
|
|
#if !defined (__VMS) && (defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
|
2015-10-18 21:18:32 +00:00
|
|
|
# include <stdint.h>
|
2017-03-09 19:54:34 +00:00
|
|
|
typedef uint8_t BYTE;
|
|
|
|
typedef uint16_t U16;
|
|
|
|
typedef int16_t S16;
|
|
|
|
typedef uint32_t U32;
|
|
|
|
typedef int32_t S32;
|
|
|
|
typedef uint64_t U64;
|
|
|
|
typedef int64_t S64;
|
2015-10-18 21:18:32 +00:00
|
|
|
#else
|
2018-09-24 06:57:30 +00:00
|
|
|
# include <limits.h>
|
|
|
|
#if CHAR_BIT != 8
|
|
|
|
# error "this implementation requires char to be exactly 8-bit type"
|
|
|
|
#endif
|
2016-11-29 03:59:11 +00:00
|
|
|
typedef unsigned char BYTE;
|
2018-09-24 06:57:30 +00:00
|
|
|
#if USHRT_MAX != 65535
|
|
|
|
# error "this implementation requires short to be exactly 16-bit type"
|
|
|
|
#endif
|
2015-10-18 21:18:32 +00:00
|
|
|
typedef unsigned short U16;
|
|
|
|
typedef signed short S16;
|
2018-09-24 06:57:30 +00:00
|
|
|
#if UINT_MAX != 4294967295
|
|
|
|
# error "this implementation requires int to be exactly 32-bit type"
|
|
|
|
#endif
|
2015-10-18 21:18:32 +00:00
|
|
|
typedef unsigned int U32;
|
|
|
|
typedef signed int S32;
|
2018-09-24 06:57:30 +00:00
|
|
|
/* note : there are no limits defined for long long type in C90.
|
|
|
|
* limits exist in C99, however, in such case, <stdint.h> is preferred */
|
2015-10-18 21:18:32 +00:00
|
|
|
typedef unsigned long long U64;
|
|
|
|
typedef signed long long S64;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
2016-02-03 01:46:46 +00:00
|
|
|
/*-**************************************************************
|
2015-10-18 21:18:32 +00:00
|
|
|
* Memory I/O
|
|
|
|
*****************************************************************/
|
2016-02-11 03:17:50 +00:00
|
|
|
/* MEM_FORCE_MEMORY_ACCESS :
|
2015-10-18 21:18:32 +00:00
|
|
|
* By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable.
|
|
|
|
* Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal.
|
|
|
|
* The below switch allow to select different access method for improved performance.
|
|
|
|
* Method 0 (default) : use `memcpy()`. Safe and portable.
|
2017-02-20 20:08:59 +00:00
|
|
|
* Method 1 : `__packed` statement. It depends on compiler extension (i.e., not portable).
|
2015-10-18 21:18:32 +00:00
|
|
|
* This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`.
|
|
|
|
* Method 2 : direct access. This method is portable but violate C standard.
|
2016-02-03 01:46:46 +00:00
|
|
|
* It can generate buggy code on targets depending on alignment.
|
2017-02-20 20:08:59 +00:00
|
|
|
* In some circumstances, it's the only known way to get the most performance (i.e. GCC + ARMv6)
|
2015-10-18 21:18:32 +00:00
|
|
|
* See http://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details.
|
|
|
|
* Prefer these methods in priority order (0 > 1 > 2)
|
|
|
|
*/
|
|
|
|
#ifndef MEM_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */
|
|
|
|
# if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) )
|
|
|
|
# define MEM_FORCE_MEMORY_ACCESS 2
|
2019-07-29 07:20:37 +00:00
|
|
|
# elif defined(__INTEL_COMPILER) || defined(__GNUC__) || defined(__ICCARM__)
|
2015-10-18 21:18:32 +00:00
|
|
|
# define MEM_FORCE_MEMORY_ACCESS 1
|
|
|
|
# endif
|
|
|
|
#endif
|
|
|
|
|
2016-05-11 14:38:18 +00:00
|
|
|
MEM_STATIC unsigned MEM_32bits(void) { return sizeof(size_t)==4; }
|
|
|
|
MEM_STATIC unsigned MEM_64bits(void) { return sizeof(size_t)==8; }
|
2015-10-18 21:18:32 +00:00
|
|
|
|
|
|
|
MEM_STATIC unsigned MEM_isLittleEndian(void)
|
|
|
|
{
|
|
|
|
const union { U32 u; BYTE c[4]; } one = { 1 }; /* don't use static : performance detrimental */
|
|
|
|
return one.c[0];
|
|
|
|
}
|
|
|
|
|
|
|
|
#if defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==2)
|
|
|
|
|
2016-02-11 23:07:30 +00:00
|
|
|
/* violates C standard, by lying on structure alignment.
|
2015-10-18 21:18:32 +00:00
|
|
|
Only use if no other choice to achieve best performance on target platform */
|
|
|
|
MEM_STATIC U16 MEM_read16(const void* memPtr) { return *(const U16*) memPtr; }
|
|
|
|
MEM_STATIC U32 MEM_read32(const void* memPtr) { return *(const U32*) memPtr; }
|
|
|
|
MEM_STATIC U64 MEM_read64(const void* memPtr) { return *(const U64*) memPtr; }
|
2017-07-08 00:13:12 +00:00
|
|
|
MEM_STATIC size_t MEM_readST(const void* memPtr) { return *(const size_t*) memPtr; }
|
2015-10-18 21:18:32 +00:00
|
|
|
|
|
|
|
MEM_STATIC void MEM_write16(void* memPtr, U16 value) { *(U16*)memPtr = value; }
|
|
|
|
MEM_STATIC void MEM_write32(void* memPtr, U32 value) { *(U32*)memPtr = value; }
|
|
|
|
MEM_STATIC void MEM_write64(void* memPtr, U64 value) { *(U64*)memPtr = value; }
|
|
|
|
|
|
|
|
#elif defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==1)
|
|
|
|
|
|
|
|
/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */
|
|
|
|
/* currently only defined for gcc and icc */
|
2016-08-10 12:26:35 +00:00
|
|
|
#if defined(_MSC_VER) || (defined(__INTEL_COMPILER) && defined(WIN32))
|
2017-03-30 01:51:58 +00:00
|
|
|
__pragma( pack(push, 1) )
|
Fixed Btree update
ZSTD_updateTree() expected to be followed by a Bt match finder, which would update zc->nextToUpdate.
With the new optimal match finder, it's not necessarily the case : a match might be found during repcode or hash3, and stops there because it reaches sufficient_len, without even entering the binary tree.
Previous policy was to nonetheless update zc->nextToUpdate, but the current position would not be inserted, creating "holes" in the btree, aka positions that will no longer be searched.
Now, when current position is not inserted, zc->nextToUpdate is not update, expecting ZSTD_updateTree() to fill the tree later on.
Solution selected is that ZSTD_updateTree() takes care of properly setting zc->nextToUpdate,
so that it no longer depends on a future function to do this job.
It took time to get there, as the issue started with a memory sanitizer error.
The pb would have been easier to spot with a proper `assert()`.
So this patch add a few of them.
Additionnally, I discovered that `make test` does not enable `assert()` during CLI tests.
This patch enables them.
Unfortunately, these `assert()` triggered other (unrelated) bugs during CLI tests, mostly within zstdmt.
So this patch also fixes them.
- Changed packed structure for gcc memory access : memory sanitizer would complain that a read "might" reach out-of-bound position on the ground that the `union` is larger than the type accessed.
Now, to avoid this issue, each type is independent.
- ZSTD_CCtxParams_setParameter() : @return provides the value of parameter, clamped/fixed appropriately.
- ZSTDMT : changed constant name to ZSTDMT_JOBSIZE_MIN
- ZSTDMT : multithreading is automatically disabled when srcSize <= ZSTDMT_JOBSIZE_MIN, since only one thread will be used in this case (saves memory and runtime).
- ZSTDMT : nbThreads is automatically clamped on setting the value.
2017-11-16 20:18:56 +00:00
|
|
|
typedef struct { U16 v; } unalign16;
|
|
|
|
typedef struct { U32 v; } unalign32;
|
|
|
|
typedef struct { U64 v; } unalign64;
|
|
|
|
typedef struct { size_t v; } unalignArch;
|
2016-08-10 12:26:35 +00:00
|
|
|
__pragma( pack(pop) )
|
|
|
|
#else
|
Fixed Btree update
ZSTD_updateTree() expected to be followed by a Bt match finder, which would update zc->nextToUpdate.
With the new optimal match finder, it's not necessarily the case : a match might be found during repcode or hash3, and stops there because it reaches sufficient_len, without even entering the binary tree.
Previous policy was to nonetheless update zc->nextToUpdate, but the current position would not be inserted, creating "holes" in the btree, aka positions that will no longer be searched.
Now, when current position is not inserted, zc->nextToUpdate is not update, expecting ZSTD_updateTree() to fill the tree later on.
Solution selected is that ZSTD_updateTree() takes care of properly setting zc->nextToUpdate,
so that it no longer depends on a future function to do this job.
It took time to get there, as the issue started with a memory sanitizer error.
The pb would have been easier to spot with a proper `assert()`.
So this patch add a few of them.
Additionnally, I discovered that `make test` does not enable `assert()` during CLI tests.
This patch enables them.
Unfortunately, these `assert()` triggered other (unrelated) bugs during CLI tests, mostly within zstdmt.
So this patch also fixes them.
- Changed packed structure for gcc memory access : memory sanitizer would complain that a read "might" reach out-of-bound position on the ground that the `union` is larger than the type accessed.
Now, to avoid this issue, each type is independent.
- ZSTD_CCtxParams_setParameter() : @return provides the value of parameter, clamped/fixed appropriately.
- ZSTDMT : changed constant name to ZSTDMT_JOBSIZE_MIN
- ZSTDMT : multithreading is automatically disabled when srcSize <= ZSTDMT_JOBSIZE_MIN, since only one thread will be used in this case (saves memory and runtime).
- ZSTDMT : nbThreads is automatically clamped on setting the value.
2017-11-16 20:18:56 +00:00
|
|
|
typedef struct { U16 v; } __attribute__((packed)) unalign16;
|
|
|
|
typedef struct { U32 v; } __attribute__((packed)) unalign32;
|
|
|
|
typedef struct { U64 v; } __attribute__((packed)) unalign64;
|
|
|
|
typedef struct { size_t v; } __attribute__((packed)) unalignArch;
|
2016-08-10 12:26:35 +00:00
|
|
|
#endif
|
2015-10-18 21:18:32 +00:00
|
|
|
|
Fixed Btree update
ZSTD_updateTree() expected to be followed by a Bt match finder, which would update zc->nextToUpdate.
With the new optimal match finder, it's not necessarily the case : a match might be found during repcode or hash3, and stops there because it reaches sufficient_len, without even entering the binary tree.
Previous policy was to nonetheless update zc->nextToUpdate, but the current position would not be inserted, creating "holes" in the btree, aka positions that will no longer be searched.
Now, when current position is not inserted, zc->nextToUpdate is not update, expecting ZSTD_updateTree() to fill the tree later on.
Solution selected is that ZSTD_updateTree() takes care of properly setting zc->nextToUpdate,
so that it no longer depends on a future function to do this job.
It took time to get there, as the issue started with a memory sanitizer error.
The pb would have been easier to spot with a proper `assert()`.
So this patch add a few of them.
Additionnally, I discovered that `make test` does not enable `assert()` during CLI tests.
This patch enables them.
Unfortunately, these `assert()` triggered other (unrelated) bugs during CLI tests, mostly within zstdmt.
So this patch also fixes them.
- Changed packed structure for gcc memory access : memory sanitizer would complain that a read "might" reach out-of-bound position on the ground that the `union` is larger than the type accessed.
Now, to avoid this issue, each type is independent.
- ZSTD_CCtxParams_setParameter() : @return provides the value of parameter, clamped/fixed appropriately.
- ZSTDMT : changed constant name to ZSTDMT_JOBSIZE_MIN
- ZSTDMT : multithreading is automatically disabled when srcSize <= ZSTDMT_JOBSIZE_MIN, since only one thread will be used in this case (saves memory and runtime).
- ZSTDMT : nbThreads is automatically clamped on setting the value.
2017-11-16 20:18:56 +00:00
|
|
|
MEM_STATIC U16 MEM_read16(const void* ptr) { return ((const unalign16*)ptr)->v; }
|
|
|
|
MEM_STATIC U32 MEM_read32(const void* ptr) { return ((const unalign32*)ptr)->v; }
|
|
|
|
MEM_STATIC U64 MEM_read64(const void* ptr) { return ((const unalign64*)ptr)->v; }
|
|
|
|
MEM_STATIC size_t MEM_readST(const void* ptr) { return ((const unalignArch*)ptr)->v; }
|
2015-10-18 21:18:32 +00:00
|
|
|
|
Fixed Btree update
ZSTD_updateTree() expected to be followed by a Bt match finder, which would update zc->nextToUpdate.
With the new optimal match finder, it's not necessarily the case : a match might be found during repcode or hash3, and stops there because it reaches sufficient_len, without even entering the binary tree.
Previous policy was to nonetheless update zc->nextToUpdate, but the current position would not be inserted, creating "holes" in the btree, aka positions that will no longer be searched.
Now, when current position is not inserted, zc->nextToUpdate is not update, expecting ZSTD_updateTree() to fill the tree later on.
Solution selected is that ZSTD_updateTree() takes care of properly setting zc->nextToUpdate,
so that it no longer depends on a future function to do this job.
It took time to get there, as the issue started with a memory sanitizer error.
The pb would have been easier to spot with a proper `assert()`.
So this patch add a few of them.
Additionnally, I discovered that `make test` does not enable `assert()` during CLI tests.
This patch enables them.
Unfortunately, these `assert()` triggered other (unrelated) bugs during CLI tests, mostly within zstdmt.
So this patch also fixes them.
- Changed packed structure for gcc memory access : memory sanitizer would complain that a read "might" reach out-of-bound position on the ground that the `union` is larger than the type accessed.
Now, to avoid this issue, each type is independent.
- ZSTD_CCtxParams_setParameter() : @return provides the value of parameter, clamped/fixed appropriately.
- ZSTDMT : changed constant name to ZSTDMT_JOBSIZE_MIN
- ZSTDMT : multithreading is automatically disabled when srcSize <= ZSTDMT_JOBSIZE_MIN, since only one thread will be used in this case (saves memory and runtime).
- ZSTDMT : nbThreads is automatically clamped on setting the value.
2017-11-16 20:18:56 +00:00
|
|
|
MEM_STATIC void MEM_write16(void* memPtr, U16 value) { ((unalign16*)memPtr)->v = value; }
|
|
|
|
MEM_STATIC void MEM_write32(void* memPtr, U32 value) { ((unalign32*)memPtr)->v = value; }
|
|
|
|
MEM_STATIC void MEM_write64(void* memPtr, U64 value) { ((unalign64*)memPtr)->v = value; }
|
2015-10-18 21:18:32 +00:00
|
|
|
|
|
|
|
#else
|
|
|
|
|
|
|
|
/* default method, safe and standard.
|
|
|
|
can sometimes prove slower */
|
|
|
|
|
|
|
|
MEM_STATIC U16 MEM_read16(const void* memPtr)
|
|
|
|
{
|
|
|
|
U16 val; memcpy(&val, memPtr, sizeof(val)); return val;
|
|
|
|
}
|
|
|
|
|
|
|
|
MEM_STATIC U32 MEM_read32(const void* memPtr)
|
|
|
|
{
|
|
|
|
U32 val; memcpy(&val, memPtr, sizeof(val)); return val;
|
|
|
|
}
|
|
|
|
|
|
|
|
MEM_STATIC U64 MEM_read64(const void* memPtr)
|
|
|
|
{
|
|
|
|
U64 val; memcpy(&val, memPtr, sizeof(val)); return val;
|
|
|
|
}
|
|
|
|
|
2016-02-11 23:07:30 +00:00
|
|
|
MEM_STATIC size_t MEM_readST(const void* memPtr)
|
|
|
|
{
|
|
|
|
size_t val; memcpy(&val, memPtr, sizeof(val)); return val;
|
|
|
|
}
|
|
|
|
|
2015-10-18 21:18:32 +00:00
|
|
|
MEM_STATIC void MEM_write16(void* memPtr, U16 value)
|
|
|
|
{
|
|
|
|
memcpy(memPtr, &value, sizeof(value));
|
|
|
|
}
|
|
|
|
|
|
|
|
MEM_STATIC void MEM_write32(void* memPtr, U32 value)
|
|
|
|
{
|
|
|
|
memcpy(memPtr, &value, sizeof(value));
|
|
|
|
}
|
|
|
|
|
|
|
|
MEM_STATIC void MEM_write64(void* memPtr, U64 value)
|
|
|
|
{
|
|
|
|
memcpy(memPtr, &value, sizeof(value));
|
|
|
|
}
|
|
|
|
|
2016-02-11 03:17:50 +00:00
|
|
|
#endif /* MEM_FORCE_MEMORY_ACCESS */
|
2015-10-18 21:18:32 +00:00
|
|
|
|
2016-05-11 14:38:18 +00:00
|
|
|
MEM_STATIC U32 MEM_swap32(U32 in)
|
|
|
|
{
|
|
|
|
#if defined(_MSC_VER) /* Visual Studio */
|
|
|
|
return _byteswap_ulong(in);
|
2018-10-11 02:32:03 +00:00
|
|
|
#elif (defined (__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__ >= 403)) \
|
|
|
|
|| (defined(__clang__) && __has_builtin(__builtin_bswap32))
|
2016-05-11 14:38:18 +00:00
|
|
|
return __builtin_bswap32(in);
|
|
|
|
#else
|
|
|
|
return ((in << 24) & 0xff000000 ) |
|
|
|
|
((in << 8) & 0x00ff0000 ) |
|
|
|
|
((in >> 8) & 0x0000ff00 ) |
|
|
|
|
((in >> 24) & 0x000000ff );
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
MEM_STATIC U64 MEM_swap64(U64 in)
|
|
|
|
{
|
|
|
|
#if defined(_MSC_VER) /* Visual Studio */
|
|
|
|
return _byteswap_uint64(in);
|
2018-10-11 02:32:03 +00:00
|
|
|
#elif (defined (__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__ >= 403)) \
|
|
|
|
|| (defined(__clang__) && __has_builtin(__builtin_bswap64))
|
2016-05-11 14:38:18 +00:00
|
|
|
return __builtin_bswap64(in);
|
|
|
|
#else
|
|
|
|
return ((in << 56) & 0xff00000000000000ULL) |
|
|
|
|
((in << 40) & 0x00ff000000000000ULL) |
|
|
|
|
((in << 24) & 0x0000ff0000000000ULL) |
|
|
|
|
((in << 8) & 0x000000ff00000000ULL) |
|
|
|
|
((in >> 8) & 0x00000000ff000000ULL) |
|
|
|
|
((in >> 24) & 0x0000000000ff0000ULL) |
|
|
|
|
((in >> 40) & 0x000000000000ff00ULL) |
|
|
|
|
((in >> 56) & 0x00000000000000ffULL);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2016-05-11 16:30:24 +00:00
|
|
|
MEM_STATIC size_t MEM_swapST(size_t in)
|
|
|
|
{
|
|
|
|
if (MEM_32bits())
|
2016-05-11 16:55:27 +00:00
|
|
|
return (size_t)MEM_swap32((U32)in);
|
2016-05-11 16:30:24 +00:00
|
|
|
else
|
2016-05-11 16:55:27 +00:00
|
|
|
return (size_t)MEM_swap64((U64)in);
|
2016-05-11 16:30:24 +00:00
|
|
|
}
|
|
|
|
|
2016-05-11 14:38:18 +00:00
|
|
|
/*=== Little endian r/w ===*/
|
|
|
|
|
2015-10-18 21:18:32 +00:00
|
|
|
MEM_STATIC U16 MEM_readLE16(const void* memPtr)
|
|
|
|
{
|
|
|
|
if (MEM_isLittleEndian())
|
|
|
|
return MEM_read16(memPtr);
|
2016-02-03 01:46:46 +00:00
|
|
|
else {
|
2015-10-18 21:18:32 +00:00
|
|
|
const BYTE* p = (const BYTE*)memPtr;
|
|
|
|
return (U16)(p[0] + (p[1]<<8));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
MEM_STATIC void MEM_writeLE16(void* memPtr, U16 val)
|
|
|
|
{
|
2016-02-03 01:46:46 +00:00
|
|
|
if (MEM_isLittleEndian()) {
|
2015-10-18 21:18:32 +00:00
|
|
|
MEM_write16(memPtr, val);
|
2016-02-03 01:46:46 +00:00
|
|
|
} else {
|
2015-10-18 21:18:32 +00:00
|
|
|
BYTE* p = (BYTE*)memPtr;
|
|
|
|
p[0] = (BYTE)val;
|
|
|
|
p[1] = (BYTE)(val>>8);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-07-20 12:58:49 +00:00
|
|
|
MEM_STATIC U32 MEM_readLE24(const void* memPtr)
|
|
|
|
{
|
|
|
|
return MEM_readLE16(memPtr) + (((const BYTE*)memPtr)[2] << 16);
|
|
|
|
}
|
|
|
|
|
|
|
|
MEM_STATIC void MEM_writeLE24(void* memPtr, U32 val)
|
|
|
|
{
|
|
|
|
MEM_writeLE16(memPtr, (U16)val);
|
|
|
|
((BYTE*)memPtr)[2] = (BYTE)(val>>16);
|
|
|
|
}
|
|
|
|
|
2015-10-18 21:18:32 +00:00
|
|
|
MEM_STATIC U32 MEM_readLE32(const void* memPtr)
|
|
|
|
{
|
|
|
|
if (MEM_isLittleEndian())
|
|
|
|
return MEM_read32(memPtr);
|
2016-05-11 14:38:18 +00:00
|
|
|
else
|
|
|
|
return MEM_swap32(MEM_read32(memPtr));
|
2015-10-18 21:18:32 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
MEM_STATIC void MEM_writeLE32(void* memPtr, U32 val32)
|
|
|
|
{
|
2016-05-11 14:38:18 +00:00
|
|
|
if (MEM_isLittleEndian())
|
2015-10-18 21:18:32 +00:00
|
|
|
MEM_write32(memPtr, val32);
|
2016-05-11 14:38:18 +00:00
|
|
|
else
|
|
|
|
MEM_write32(memPtr, MEM_swap32(val32));
|
2015-10-18 21:18:32 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
MEM_STATIC U64 MEM_readLE64(const void* memPtr)
|
|
|
|
{
|
|
|
|
if (MEM_isLittleEndian())
|
|
|
|
return MEM_read64(memPtr);
|
2016-05-11 14:38:18 +00:00
|
|
|
else
|
|
|
|
return MEM_swap64(MEM_read64(memPtr));
|
2015-10-18 21:18:32 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
MEM_STATIC void MEM_writeLE64(void* memPtr, U64 val64)
|
|
|
|
{
|
2016-05-11 14:38:18 +00:00
|
|
|
if (MEM_isLittleEndian())
|
2015-10-18 21:18:32 +00:00
|
|
|
MEM_write64(memPtr, val64);
|
2016-05-11 14:38:18 +00:00
|
|
|
else
|
|
|
|
MEM_write64(memPtr, MEM_swap64(val64));
|
2015-10-18 21:18:32 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
MEM_STATIC size_t MEM_readLEST(const void* memPtr)
|
|
|
|
{
|
|
|
|
if (MEM_32bits())
|
|
|
|
return (size_t)MEM_readLE32(memPtr);
|
|
|
|
else
|
|
|
|
return (size_t)MEM_readLE64(memPtr);
|
|
|
|
}
|
|
|
|
|
|
|
|
MEM_STATIC void MEM_writeLEST(void* memPtr, size_t val)
|
|
|
|
{
|
|
|
|
if (MEM_32bits())
|
|
|
|
MEM_writeLE32(memPtr, (U32)val);
|
|
|
|
else
|
|
|
|
MEM_writeLE64(memPtr, (U64)val);
|
|
|
|
}
|
|
|
|
|
2016-05-11 14:38:18 +00:00
|
|
|
/*=== Big endian r/w ===*/
|
|
|
|
|
|
|
|
MEM_STATIC U32 MEM_readBE32(const void* memPtr)
|
|
|
|
{
|
|
|
|
if (MEM_isLittleEndian())
|
|
|
|
return MEM_swap32(MEM_read32(memPtr));
|
|
|
|
else
|
|
|
|
return MEM_read32(memPtr);
|
|
|
|
}
|
|
|
|
|
|
|
|
MEM_STATIC void MEM_writeBE32(void* memPtr, U32 val32)
|
|
|
|
{
|
|
|
|
if (MEM_isLittleEndian())
|
|
|
|
MEM_write32(memPtr, MEM_swap32(val32));
|
|
|
|
else
|
|
|
|
MEM_write32(memPtr, val32);
|
|
|
|
}
|
|
|
|
|
|
|
|
MEM_STATIC U64 MEM_readBE64(const void* memPtr)
|
|
|
|
{
|
|
|
|
if (MEM_isLittleEndian())
|
|
|
|
return MEM_swap64(MEM_read64(memPtr));
|
|
|
|
else
|
|
|
|
return MEM_read64(memPtr);
|
|
|
|
}
|
|
|
|
|
|
|
|
MEM_STATIC void MEM_writeBE64(void* memPtr, U64 val64)
|
|
|
|
{
|
|
|
|
if (MEM_isLittleEndian())
|
|
|
|
MEM_write64(memPtr, MEM_swap64(val64));
|
|
|
|
else
|
|
|
|
MEM_write64(memPtr, val64);
|
|
|
|
}
|
|
|
|
|
|
|
|
MEM_STATIC size_t MEM_readBEST(const void* memPtr)
|
|
|
|
{
|
|
|
|
if (MEM_32bits())
|
|
|
|
return (size_t)MEM_readBE32(memPtr);
|
|
|
|
else
|
|
|
|
return (size_t)MEM_readBE64(memPtr);
|
|
|
|
}
|
|
|
|
|
|
|
|
MEM_STATIC void MEM_writeBEST(void* memPtr, size_t val)
|
|
|
|
{
|
|
|
|
if (MEM_32bits())
|
|
|
|
MEM_writeBE32(memPtr, (U32)val);
|
|
|
|
else
|
|
|
|
MEM_writeBE64(memPtr, (U64)val);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2015-10-18 21:18:32 +00:00
|
|
|
#if defined (__cplusplus)
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#endif /* MEM_H_MODULE */
|