2015-11-27 10:27:11 +00:00
|
|
|
/* Copyright 2013 Google Inc. All Rights Reserved.
|
|
|
|
|
2015-12-11 10:11:51 +00:00
|
|
|
Distributed under MIT license.
|
2015-11-27 10:27:11 +00:00
|
|
|
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
|
|
|
|
*/
|
|
|
|
|
2016-06-03 09:19:23 +00:00
|
|
|
/* Macros for endianness, branch prediction and unaligned loads and stores. */
|
2013-10-23 11:06:13 +00:00
|
|
|
|
|
|
|
#ifndef BROTLI_ENC_PORT_H_
|
|
|
|
#define BROTLI_ENC_PORT_H_
|
|
|
|
|
2015-11-23 10:05:12 +00:00
|
|
|
#include <assert.h>
|
2016-06-03 09:19:23 +00:00
|
|
|
#include <string.h> /* memcpy */
|
2016-06-03 08:51:04 +00:00
|
|
|
|
2016-09-21 15:20:36 +00:00
|
|
|
#include <brotli/port.h>
|
2016-08-23 12:40:33 +00:00
|
|
|
#include <brotli/types.h>
|
2015-06-12 14:11:50 +00:00
|
|
|
|
2013-10-23 11:06:13 +00:00
|
|
|
#if defined OS_LINUX || defined OS_CYGWIN
|
|
|
|
#include <endian.h>
|
|
|
|
#elif defined OS_FREEBSD
|
|
|
|
#include <machine/endian.h>
|
|
|
|
#elif defined OS_MACOSX
|
|
|
|
#include <machine/endian.h>
|
|
|
|
/* Let's try and follow the Linux convention */
|
|
|
|
#define __BYTE_ORDER BYTE_ORDER
|
|
|
|
#define __LITTLE_ENDIAN LITTLE_ENDIAN
|
|
|
|
#endif
|
|
|
|
|
2016-06-03 09:19:23 +00:00
|
|
|
/* define the macro IS_LITTLE_ENDIAN
|
|
|
|
using the above endian definitions from endian.h if
|
|
|
|
endian.h was included */
|
2013-10-23 11:06:13 +00:00
|
|
|
#ifdef __BYTE_ORDER
|
|
|
|
#if __BYTE_ORDER == __LITTLE_ENDIAN
|
|
|
|
#define IS_LITTLE_ENDIAN
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#else
|
|
|
|
|
|
|
|
#if defined(__LITTLE_ENDIAN__)
|
|
|
|
#define IS_LITTLE_ENDIAN
|
|
|
|
#endif
|
2016-06-03 09:19:23 +00:00
|
|
|
#endif /* __BYTE_ORDER */
|
2013-10-23 11:06:13 +00:00
|
|
|
|
2016-01-07 15:27:49 +00:00
|
|
|
#if defined(__BYTE_ORDER__) && (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)
|
|
|
|
#define IS_LITTLE_ENDIAN
|
|
|
|
#endif
|
|
|
|
|
2016-06-03 09:19:23 +00:00
|
|
|
/* Enable little-endian optimization for x64 architecture on Windows. */
|
2015-02-25 17:19:51 +00:00
|
|
|
#if (defined(_WIN32) || defined(_WIN64)) && defined(_M_X64)
|
|
|
|
#define IS_LITTLE_ENDIAN
|
|
|
|
#endif
|
|
|
|
|
2016-06-03 09:19:23 +00:00
|
|
|
/* Portable handling of unaligned loads, stores, and copies.
|
|
|
|
On some platforms, like ARM, the copy functions can be more efficient
|
|
|
|
then a load and a store. */
|
2013-10-23 11:06:13 +00:00
|
|
|
|
2016-01-07 15:27:49 +00:00
|
|
|
#if defined(ARCH_PIII) || \
|
|
|
|
defined(ARCH_ATHLON) || defined(ARCH_K8) || defined(_ARCH_PPC)
|
2013-10-23 11:06:13 +00:00
|
|
|
|
2016-06-03 09:19:23 +00:00
|
|
|
/* x86 and x86-64 can perform unaligned loads/stores directly;
|
|
|
|
modern PowerPC hardware can also do unaligned integer loads and stores;
|
|
|
|
but note: the FPU still sends unaligned loads and stores to a trap handler!
|
|
|
|
*/
|
2013-10-23 11:06:13 +00:00
|
|
|
|
2016-06-13 09:01:04 +00:00
|
|
|
#define BROTLI_UNALIGNED_LOAD32(_p) (*(const uint32_t *)(_p))
|
|
|
|
#define BROTLI_UNALIGNED_LOAD64(_p) (*(const uint64_t *)(_p))
|
2013-10-23 11:06:13 +00:00
|
|
|
|
|
|
|
#define BROTLI_UNALIGNED_STORE32(_p, _val) \
|
2016-06-13 09:01:04 +00:00
|
|
|
(*(uint32_t *)(_p) = (_val))
|
2013-10-23 11:06:13 +00:00
|
|
|
#define BROTLI_UNALIGNED_STORE64(_p, _val) \
|
2016-06-13 09:01:04 +00:00
|
|
|
(*(uint64_t *)(_p) = (_val))
|
2013-10-23 11:06:13 +00:00
|
|
|
|
|
|
|
#elif defined(__arm__) && \
|
|
|
|
!defined(__ARM_ARCH_5__) && \
|
|
|
|
!defined(__ARM_ARCH_5T__) && \
|
|
|
|
!defined(__ARM_ARCH_5TE__) && \
|
|
|
|
!defined(__ARM_ARCH_5TEJ__) && \
|
|
|
|
!defined(__ARM_ARCH_6__) && \
|
|
|
|
!defined(__ARM_ARCH_6J__) && \
|
|
|
|
!defined(__ARM_ARCH_6K__) && \
|
|
|
|
!defined(__ARM_ARCH_6Z__) && \
|
|
|
|
!defined(__ARM_ARCH_6ZK__) && \
|
|
|
|
!defined(__ARM_ARCH_6T2__)
|
|
|
|
|
2016-06-03 09:19:23 +00:00
|
|
|
/* ARMv7 and newer support native unaligned accesses, but only of 16-bit
|
|
|
|
and 32-bit values (not 64-bit); older versions either raise a fatal signal,
|
|
|
|
do an unaligned read and rotate the words around a bit, or do the reads very
|
|
|
|
slowly (trip through kernel mode). */
|
2013-10-23 11:06:13 +00:00
|
|
|
|
2016-06-13 09:01:04 +00:00
|
|
|
#define BROTLI_UNALIGNED_LOAD32(_p) (*(const uint32_t *)(_p))
|
2013-10-23 11:06:13 +00:00
|
|
|
#define BROTLI_UNALIGNED_STORE32(_p, _val) \
|
2016-06-13 09:01:04 +00:00
|
|
|
(*(uint32_t *)(_p) = (_val))
|
2013-10-23 11:06:13 +00:00
|
|
|
|
2016-06-13 09:01:04 +00:00
|
|
|
static BROTLI_INLINE uint64_t BROTLI_UNALIGNED_LOAD64(const void *p) {
|
2013-10-23 11:06:13 +00:00
|
|
|
uint64_t t;
|
|
|
|
memcpy(&t, p, sizeof t);
|
|
|
|
return t;
|
|
|
|
}
|
|
|
|
|
2016-06-13 09:01:04 +00:00
|
|
|
static BROTLI_INLINE void BROTLI_UNALIGNED_STORE64(void *p, uint64_t v) {
|
2013-10-23 11:06:13 +00:00
|
|
|
memcpy(p, &v, sizeof v);
|
|
|
|
}
|
|
|
|
|
|
|
|
#else
|
|
|
|
|
2016-06-03 09:19:23 +00:00
|
|
|
/* These functions are provided for architectures that don't support */
|
|
|
|
/* unaligned loads and stores. */
|
2013-10-23 11:06:13 +00:00
|
|
|
|
2016-06-13 09:01:04 +00:00
|
|
|
static BROTLI_INLINE uint32_t BROTLI_UNALIGNED_LOAD32(const void *p) {
|
2013-10-23 11:06:13 +00:00
|
|
|
uint32_t t;
|
|
|
|
memcpy(&t, p, sizeof t);
|
|
|
|
return t;
|
|
|
|
}
|
|
|
|
|
2016-06-13 09:01:04 +00:00
|
|
|
static BROTLI_INLINE uint64_t BROTLI_UNALIGNED_LOAD64(const void *p) {
|
2013-10-23 11:06:13 +00:00
|
|
|
uint64_t t;
|
|
|
|
memcpy(&t, p, sizeof t);
|
|
|
|
return t;
|
|
|
|
}
|
|
|
|
|
2016-06-13 09:01:04 +00:00
|
|
|
static BROTLI_INLINE void BROTLI_UNALIGNED_STORE32(void *p, uint32_t v) {
|
2013-10-23 11:06:13 +00:00
|
|
|
memcpy(p, &v, sizeof v);
|
|
|
|
}
|
|
|
|
|
2016-06-13 09:01:04 +00:00
|
|
|
static BROTLI_INLINE void BROTLI_UNALIGNED_STORE64(void *p, uint64_t v) {
|
2013-10-23 11:06:13 +00:00
|
|
|
memcpy(p, &v, sizeof v);
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
2016-08-22 11:28:22 +00:00
|
|
|
#define TEMPLATE_(T) \
|
2016-06-13 09:01:04 +00:00
|
|
|
static BROTLI_INLINE T brotli_min_ ## T (T a, T b) { return a < b ? a : b; } \
|
|
|
|
static BROTLI_INLINE T brotli_max_ ## T (T a, T b) { return a > b ? a : b; }
|
2016-08-22 11:28:22 +00:00
|
|
|
TEMPLATE_(double) TEMPLATE_(float) TEMPLATE_(int)
|
|
|
|
TEMPLATE_(size_t) TEMPLATE_(uint32_t) TEMPLATE_(uint8_t)
|
|
|
|
#undef TEMPLATE_
|
2016-06-13 09:01:04 +00:00
|
|
|
#define BROTLI_MIN(T, A, B) (brotli_min_ ## T((A), (B)))
|
|
|
|
#define BROTLI_MAX(T, A, B) (brotli_max_ ## T((A), (B)))
|
|
|
|
|
|
|
|
#define BROTLI_SWAP(T, A, I, J) { \
|
|
|
|
T __brotli_swap_tmp = (A)[(I)]; \
|
|
|
|
(A)[(I)] = (A)[(J)]; \
|
|
|
|
(A)[(J)] = __brotli_swap_tmp; \
|
|
|
|
}
|
|
|
|
|
|
|
|
#define BROTLI_ENSURE_CAPACITY(M, T, A, C, R) { \
|
|
|
|
if (C < (R)) { \
|
|
|
|
size_t _new_size = (C == 0) ? (R) : C; \
|
|
|
|
T* new_array; \
|
|
|
|
while (_new_size < (R)) _new_size *= 2; \
|
|
|
|
new_array = BROTLI_ALLOC((M), T, _new_size); \
|
2016-07-27 09:02:27 +00:00
|
|
|
if (!BROTLI_IS_OOM(m) && C != 0) \
|
2016-06-13 09:01:04 +00:00
|
|
|
memcpy(new_array, A, C * sizeof(T)); \
|
|
|
|
BROTLI_FREE((M), A); \
|
|
|
|
A = new_array; \
|
|
|
|
C = _new_size; \
|
|
|
|
} \
|
|
|
|
}
|
|
|
|
|
2016-06-03 09:19:23 +00:00
|
|
|
#endif /* BROTLI_ENC_PORT_H_ */
|