Merge pull request #569 from lz4/circle2
Migrate CircleCI tests from 1.0 towards 2.0
This commit is contained in:
commit
86ad9131db
108
.circleci/config.yml
Normal file
108
.circleci/config.yml
Normal file
@ -0,0 +1,108 @@
|
|||||||
|
# This configuration was automatically generated from a CircleCI 1.0 config.
|
||||||
|
# It should include any build commands you had along with commands that CircleCI
|
||||||
|
# inferred from your project structure. We strongly recommend you read all the
|
||||||
|
# comments in this file to understand the structure of CircleCI 2.0, as the idiom
|
||||||
|
# for configuration has changed substantially in 2.0 to allow arbitrary jobs rather
|
||||||
|
# than the prescribed lifecycle of 1.0. In general, we recommend using this generated
|
||||||
|
# configuration as a reference rather than using it in production, though in most
|
||||||
|
# cases it should duplicate the execution of your original 1.0 config.
|
||||||
|
version: 2
|
||||||
|
jobs:
|
||||||
|
build:
|
||||||
|
working_directory: ~/lz4/lz4
|
||||||
|
parallelism: 1
|
||||||
|
shell: /bin/bash --login
|
||||||
|
# CircleCI 2.0 does not support environment variables that refer to each other the same way as 1.0 did.
|
||||||
|
# If any of these refer to each other, rewrite them so that they don't or see https://circleci.com/docs/2.0/env-vars/#interpolating-environment-variables-to-set-other-environment-variables .
|
||||||
|
environment:
|
||||||
|
CIRCLE_ARTIFACTS: /tmp/circleci-artifacts
|
||||||
|
CIRCLE_TEST_REPORTS: /tmp/circleci-test-results
|
||||||
|
# In CircleCI 1.0 we used a pre-configured image with a large number of languages and other packages.
|
||||||
|
# In CircleCI 2.0 you can now specify your own image, or use one of our pre-configured images.
|
||||||
|
# The following configuration line tells CircleCI to use the specified docker image as the runtime environment for you job.
|
||||||
|
# We have selected a pre-built image that mirrors the build environment we use on
|
||||||
|
# the 1.0 platform, but we recommend you choose an image more tailored to the needs
|
||||||
|
# of each job. For more information on choosing an image (or alternatively using a
|
||||||
|
# VM instead of a container) see https://circleci.com/docs/2.0/executor-types/
|
||||||
|
# To see the list of pre-built images that CircleCI provides for most common languages see
|
||||||
|
# https://circleci.com/docs/2.0/circleci-images/
|
||||||
|
docker:
|
||||||
|
- image: circleci/build-image:ubuntu-14.04-XXL-upstart-1189-5614f37
|
||||||
|
command: /sbin/init
|
||||||
|
steps:
|
||||||
|
# Machine Setup
|
||||||
|
# If you break your build into multiple jobs with workflows, you will probably want to do the parts of this that are relevant in each
|
||||||
|
# The following `checkout` command checks out your code to your working directory. In 1.0 we did this implicitly. In 2.0 you can choose where in the course of a job your code should be checked out.
|
||||||
|
- checkout
|
||||||
|
# Prepare for artifact and test results collection equivalent to how it was done on 1.0.
|
||||||
|
# In many cases you can simplify this from what is generated here.
|
||||||
|
# 'See docs on artifact collection here https://circleci.com/docs/2.0/artifacts/'
|
||||||
|
- run: mkdir -p $CIRCLE_ARTIFACTS $CIRCLE_TEST_REPORTS
|
||||||
|
# Dependencies
|
||||||
|
# This would typically go in either a build or a build-and-test job when using workflows
|
||||||
|
# Restore the dependency cache
|
||||||
|
- restore_cache:
|
||||||
|
keys:
|
||||||
|
# This branch if available
|
||||||
|
- v1-dep-{{ .Branch }}-
|
||||||
|
# Default branch if not
|
||||||
|
- v1-dep-dev-
|
||||||
|
# Any branch if there are none on the default branch - this should be unnecessary if you have your default branch configured correctly
|
||||||
|
- v1-dep-
|
||||||
|
# This is based on your 1.0 configuration file or project settings
|
||||||
|
- run: sudo add-apt-repository -y ppa:ubuntu-toolchain-r/test; sudo apt-get -y -qq update
|
||||||
|
- run: sudo apt-get -y install qemu-system-ppc qemu-user-static gcc-powerpc-linux-gnu
|
||||||
|
- run: sudo apt-get -y install qemu-system-arm gcc-arm-linux-gnueabi libc6-dev-armel-cross gcc-aarch64-linux-gnu libc6-dev-arm64-cross
|
||||||
|
- run: sudo apt-get -y install libc6-dev-i386 clang gcc-5 gcc-5-multilib gcc-6 valgrind
|
||||||
|
# Save dependency cache
|
||||||
|
- save_cache:
|
||||||
|
key: v1-dep-{{ .Branch }}-{{ epoch }}
|
||||||
|
paths:
|
||||||
|
# This is a broad list of cache paths to include many possible development environments
|
||||||
|
# You can probably delete some of these entries
|
||||||
|
- vendor/bundle
|
||||||
|
- ~/virtualenvs
|
||||||
|
- ~/.m2
|
||||||
|
- ~/.ivy2
|
||||||
|
- ~/.bundle
|
||||||
|
- ~/.go_workspace
|
||||||
|
- ~/.gradle
|
||||||
|
- ~/.cache/bower
|
||||||
|
# Test
|
||||||
|
# This would typically be a build job when using workflows, possibly combined with build
|
||||||
|
# This is based on your 1.0 configuration file or project settings
|
||||||
|
- run: clang -v; make clangtest && make clean
|
||||||
|
- run: g++ -v; make gpptest && make clean
|
||||||
|
- run: gcc -v; make c_standards && make clean
|
||||||
|
- run: gcc -v; g++ -v; make ctocpptest && make clean
|
||||||
|
- run: gcc-5 -v; CC=gcc-5 CFLAGS="-O2 -Werror" make check && make clean
|
||||||
|
- run: gcc-5 -v; CC=gcc-5 CFLAGS="-O2 -m32 -Werror" CPPFLAGS=-I/usr/include/x86_64-linux-gnu make check && make clean
|
||||||
|
- run: gcc-6 -v; CC=gcc-6 make c_standards && make clean
|
||||||
|
- run: gcc-6 -v; CC=gcc-6 MOREFLAGS="-O2 -Werror" make check && make clean
|
||||||
|
- run: make cmake && make clean
|
||||||
|
- run: make -C tests test-lz4
|
||||||
|
- run: make -C tests test-lz4c
|
||||||
|
- run: make -C tests test-frametest
|
||||||
|
- run: make -C tests test-fullbench
|
||||||
|
- run: make -C tests test-fuzzer && make clean
|
||||||
|
- run: make -C lib all && make clean
|
||||||
|
- run: pyenv global 3.4.4; make versionsTest MOREFLAGS=-I/usr/include/x86_64-linux-gnu && make clean
|
||||||
|
- run: make travis-install && make clean
|
||||||
|
- run: gcc -v; CFLAGS="-O2 -m32 -Werror" CPPFLAGS=-I/usr/include/x86_64-linux-gnu make check && make clean
|
||||||
|
- run: make usan && make clean
|
||||||
|
- run: clang -v; make staticAnalyze && make clean
|
||||||
|
- run: make -C tests test-mem && make clean
|
||||||
|
- run: make platformTest CC=powerpc-linux-gnu-gcc QEMU_SYS=qemu-ppc-static && make clean
|
||||||
|
- run: make platformTest CC=powerpc-linux-gnu-gcc QEMU_SYS=qemu-ppc64-static MOREFLAGS=-m64 && make clean
|
||||||
|
- run: make platformTest CC=arm-linux-gnueabi-gcc QEMU_SYS=qemu-arm-static && make clean
|
||||||
|
- run: make platformTest CC=aarch64-linux-gnu-gcc QEMU_SYS=qemu-aarch64-static && make clean
|
||||||
|
# Teardown
|
||||||
|
# If you break your build into multiple jobs with workflows, you will probably want to do the parts of this that are relevant in each
|
||||||
|
# Save test results
|
||||||
|
- store_test_results:
|
||||||
|
path: /tmp/circleci-test-results
|
||||||
|
# Save artifacts
|
||||||
|
- store_artifacts:
|
||||||
|
path: /tmp/circleci-artifacts
|
||||||
|
- store_artifacts:
|
||||||
|
path: /tmp/circleci-test-results
|
39
circle.yml
39
circle.yml
@ -1,39 +0,0 @@
|
|||||||
dependencies:
|
|
||||||
override:
|
|
||||||
- sudo add-apt-repository -y ppa:ubuntu-toolchain-r/test; sudo apt-get -y -qq update
|
|
||||||
- sudo apt-get -y install qemu-system-ppc qemu-user-static gcc-powerpc-linux-gnu
|
|
||||||
- sudo apt-get -y install qemu-system-arm gcc-arm-linux-gnueabi libc6-dev-armel-cross gcc-aarch64-linux-gnu libc6-dev-arm64-cross
|
|
||||||
- sudo apt-get -y install libc6-dev-i386 clang gcc-5 gcc-5-multilib gcc-6 valgrind
|
|
||||||
|
|
||||||
test:
|
|
||||||
override:
|
|
||||||
# Tests compilers and C standards
|
|
||||||
- clang -v; make clangtest && make clean
|
|
||||||
- g++ -v; make gpptest && make clean
|
|
||||||
- gcc -v; make c_standards && make clean
|
|
||||||
- gcc -v; g++ -v; make ctocpptest && make clean
|
|
||||||
- gcc-5 -v; CC=gcc-5 CFLAGS="-O2 -Werror" make check && make clean
|
|
||||||
- gcc-5 -v; CC=gcc-5 CFLAGS="-O2 -m32 -Werror" CPPFLAGS=-I/usr/include/x86_64-linux-gnu make check && make clean
|
|
||||||
- gcc-6 -v; CC=gcc-6 make c_standards && make clean
|
|
||||||
- gcc-6 -v; CC=gcc-6 MOREFLAGS="-O2 -Werror" make check && make clean
|
|
||||||
# Shorter tests
|
|
||||||
- make cmake && make clean
|
|
||||||
- make -C tests test-lz4
|
|
||||||
- make -C tests test-lz4c
|
|
||||||
- make -C tests test-frametest
|
|
||||||
- make -C tests test-fullbench
|
|
||||||
- make -C tests test-fuzzer && make clean
|
|
||||||
- make -C lib all && make clean
|
|
||||||
- pyenv global 3.4.4; make versionsTest MOREFLAGS=-I/usr/include/x86_64-linux-gnu && make clean
|
|
||||||
- make travis-install && make clean
|
|
||||||
# Longer tests
|
|
||||||
- gcc -v; CFLAGS="-O2 -m32 -Werror" CPPFLAGS=-I/usr/include/x86_64-linux-gnu make check && make clean
|
|
||||||
- make usan && make clean
|
|
||||||
- clang -v; make staticAnalyze && make clean
|
|
||||||
# Valgrind tests
|
|
||||||
- make -C tests test-mem && make clean
|
|
||||||
# ARM, AArch64, PowerPC, PowerPC64 tests
|
|
||||||
- make platformTest CC=powerpc-linux-gnu-gcc QEMU_SYS=qemu-ppc-static && make clean
|
|
||||||
- make platformTest CC=powerpc-linux-gnu-gcc QEMU_SYS=qemu-ppc64-static MOREFLAGS=-m64 && make clean
|
|
||||||
- make platformTest CC=arm-linux-gnueabi-gcc QEMU_SYS=qemu-arm-static && make clean
|
|
||||||
- make platformTest CC=aarch64-linux-gnu-gcc QEMU_SYS=qemu-aarch64-static && make clean
|
|
@ -1394,8 +1394,8 @@ int LZ4_saveDict (LZ4_stream_t* LZ4_dict, char* safeBuffer, int dictSize)
|
|||||||
* Note that it is important for performance that this function really get inlined,
|
* Note that it is important for performance that this function really get inlined,
|
||||||
* in order to remove useless branches during compilation optimization.
|
* in order to remove useless branches during compilation optimization.
|
||||||
*/
|
*/
|
||||||
LZ4_FORCE_O2_GCC_PPC64LE
|
LZ4_FORCE_INLINE int
|
||||||
LZ4_FORCE_INLINE int LZ4_decompress_generic(
|
LZ4_decompress_generic(
|
||||||
const char* const src,
|
const char* const src,
|
||||||
char* const dst,
|
char* const dst,
|
||||||
int srcSize,
|
int srcSize,
|
||||||
|
98
lib/lz4.h
98
lib/lz4.h
@ -183,54 +183,57 @@ LZ4_compress_fast_extState() :
|
|||||||
Same compression function, just using an externally allocated memory space to store compression state.
|
Same compression function, just using an externally allocated memory space to store compression state.
|
||||||
Use LZ4_sizeofState() to know how much memory must be allocated,
|
Use LZ4_sizeofState() to know how much memory must be allocated,
|
||||||
and allocate it on 8-bytes boundaries (using malloc() typically).
|
and allocate it on 8-bytes boundaries (using malloc() typically).
|
||||||
Then, provide it as 'void* state' to compression function.
|
Then, provide this buffer as 'void* state' to compression function.
|
||||||
*/
|
*/
|
||||||
LZ4LIB_API int LZ4_sizeofState(void);
|
LZ4LIB_API int LZ4_sizeofState(void);
|
||||||
LZ4LIB_API int LZ4_compress_fast_extState (void* state, const char* src, char* dst, int srcSize, int dstCapacity, int acceleration);
|
LZ4LIB_API int LZ4_compress_fast_extState (void* state, const char* src, char* dst, int srcSize, int dstCapacity, int acceleration);
|
||||||
|
|
||||||
|
|
||||||
/*!
|
/*! LZ4_compress_destSize() :
|
||||||
LZ4_compress_destSize() :
|
* Reverse the logic : compresses as much data as possible from 'src' buffer
|
||||||
Reverse the logic : compresses as much data as possible from 'src' buffer
|
* into already allocated buffer 'dst', of size >= 'targetDestSize'.
|
||||||
into already allocated buffer 'dst' of size 'targetDestSize'.
|
* This function either compresses the entire 'src' content into 'dst' if it's large enough,
|
||||||
This function either compresses the entire 'src' content into 'dst' if it's large enough,
|
* or fill 'dst' buffer completely with as much data as possible from 'src'.
|
||||||
or fill 'dst' buffer completely with as much data as possible from 'src'.
|
* note: acceleration parameter is fixed to "default".
|
||||||
*srcSizePtr : will be modified to indicate how many bytes where read from 'src' to fill 'dst'.
|
*
|
||||||
New value is necessarily <= old value.
|
* *srcSizePtr : will be modified to indicate how many bytes where read from 'src' to fill 'dst'.
|
||||||
return : Nb bytes written into 'dst' (necessarily <= targetDestSize)
|
* New value is necessarily <= input value.
|
||||||
or 0 if compression fails
|
* @return : Nb bytes written into 'dst' (necessarily <= targetDestSize)
|
||||||
|
* or 0 if compression fails.
|
||||||
*/
|
*/
|
||||||
LZ4LIB_API int LZ4_compress_destSize (const char* src, char* dst, int* srcSizePtr, int targetDstSize);
|
LZ4LIB_API int LZ4_compress_destSize (const char* src, char* dst, int* srcSizePtr, int targetDstSize);
|
||||||
|
|
||||||
|
|
||||||
/*!
|
/*! LZ4_decompress_fast() : **unsafe!**
|
||||||
LZ4_decompress_fast() : **unsafe!**
|
* This function used to be a bit faster than LZ4_decompress_safe(),
|
||||||
This function is a bit faster than LZ4_decompress_safe(),
|
* though situation has changed in recent versions,
|
||||||
but it may misbehave on malformed input because it doesn't perform full validation of compressed data.
|
* and now `LZ4_decompress_safe()` can be as fast and sometimes faster than `LZ4_decompress_fast()`.
|
||||||
originalSize : is the uncompressed size to regenerate
|
* Moreover, LZ4_decompress_fast() is not protected vs malformed input, as it doesn't perform full validation of compressed data.
|
||||||
Destination buffer must be already allocated, and its size must be >= 'originalSize' bytes.
|
* As a consequence, this function is no longer recommended, and may be deprecated in future versions.
|
||||||
return : number of bytes read from source buffer (== compressed size).
|
* It's only remaining specificity is that it can decompress data without knowing its compressed size.
|
||||||
If the source stream is detected malformed, the function stops decoding and return a negative result.
|
*
|
||||||
note : This function is only usable if the originalSize of uncompressed data is known in advance.
|
* originalSize : is the uncompressed size to regenerate.
|
||||||
The caller should also check that all the compressed input has been consumed properly,
|
* `dst` must be already allocated, its size must be >= 'originalSize' bytes.
|
||||||
i.e. that the return value matches the size of the buffer with compressed input.
|
* @return : number of bytes read from source buffer (== compressed size).
|
||||||
The function never writes past the output buffer. However, since it doesn't know its 'src' size,
|
* If the source stream is detected malformed, the function stops decoding and returns a negative result.
|
||||||
it may read past the intended input. Also, because match offsets are not validated during decoding,
|
* note : This function requires uncompressed originalSize to be known in advance.
|
||||||
reads from 'src' may underflow. Use this function in trusted environment **only**.
|
* The function never writes past the output buffer.
|
||||||
|
* However, since it doesn't know its 'src' size, it may read past the intended input.
|
||||||
|
* Also, because match offsets are not validated during decoding,
|
||||||
|
* reads from 'src' may underflow.
|
||||||
|
* Use this function in trusted environment **only**.
|
||||||
*/
|
*/
|
||||||
LZ4LIB_API int LZ4_decompress_fast (const char* src, char* dst, int originalSize);
|
LZ4LIB_API int LZ4_decompress_fast (const char* src, char* dst, int originalSize);
|
||||||
|
|
||||||
/*!
|
/*! LZ4_decompress_safe_partial() :
|
||||||
LZ4_decompress_safe_partial() :
|
* This function decompresses a compressed block of size 'srcSize' at position 'src'
|
||||||
This function decompress a compressed block of size 'srcSize' at position 'src'
|
* into destination buffer 'dst' of size 'dstCapacity'.
|
||||||
into destination buffer 'dst' of size 'dstCapacity'.
|
* The function will decompress a minimum of 'targetOutputSize' bytes, and stop after that.
|
||||||
The function will decompress a minimum of 'targetOutputSize' bytes, and stop after that.
|
* However, it's not accurate, and may write more than 'targetOutputSize' (but always <= dstCapacity).
|
||||||
However, it's not accurate, and may write more than 'targetOutputSize' (but always <= dstCapacity).
|
* @return : the number of bytes decoded in `dst` (necessarily <= dstCapacity)
|
||||||
@return : the number of bytes decoded in the destination buffer (necessarily <= dstCapacity)
|
* Note : this number can also be < targetOutputSize, if compressed block contains less data.
|
||||||
Note : this number can also be < targetOutputSize, if compressed block contains less data.
|
* If source stream is detected malformed, function returns a negative result.
|
||||||
Therefore, always control how many bytes were decoded.
|
* This function is protected against malicious data packets.
|
||||||
If source stream is detected malformed, function returns a negative result.
|
|
||||||
This function is protected against malicious data packets.
|
|
||||||
*/
|
*/
|
||||||
LZ4LIB_API int LZ4_decompress_safe_partial (const char* src, char* dst, int srcSize, int targetOutputSize, int dstCapacity);
|
LZ4LIB_API int LZ4_decompress_safe_partial (const char* src, char* dst, int srcSize, int targetOutputSize, int dstCapacity);
|
||||||
|
|
||||||
@ -266,16 +269,23 @@ LZ4LIB_API int LZ4_loadDict (LZ4_stream_t* streamPtr, const char* dictionary, in
|
|||||||
* 'dst' buffer must be already allocated.
|
* 'dst' buffer must be already allocated.
|
||||||
* If dstCapacity >= LZ4_compressBound(srcSize), compression is guaranteed to succeed, and runs faster.
|
* If dstCapacity >= LZ4_compressBound(srcSize), compression is guaranteed to succeed, and runs faster.
|
||||||
*
|
*
|
||||||
* Important : The previous 64KB of source data is assumed to remain present and unmodified in memory!
|
|
||||||
*
|
|
||||||
* Special 1 : When input is a double-buffer, they can have any size, including < 64 KB.
|
|
||||||
* Make sure that buffers are separated by at least one byte.
|
|
||||||
* This way, each block only depends on previous block.
|
|
||||||
* Special 2 : If input buffer is a ring-buffer, it can have any size, including < 64 KB.
|
|
||||||
*
|
|
||||||
* @return : size of compressed block
|
* @return : size of compressed block
|
||||||
* or 0 if there is an error (typically, cannot fit into 'dst').
|
* or 0 if there is an error (typically, cannot fit into 'dst').
|
||||||
* After an error, the stream status is invalid, it can only be reset or freed.
|
*
|
||||||
|
* Note 1 : Each invocation to LZ4_compress_fast_continue() will generate a new block.
|
||||||
|
* Each block has precise boundaries.
|
||||||
|
* It's not possible to append blocks together and expect a single invocation of LZ4_decompress_*() to decompress them together.
|
||||||
|
* Each block must be decompressed separately, calling LZ4_decompress_*() with associated metadata.
|
||||||
|
*
|
||||||
|
* Note 2 : The previous 64KB of source data is assumed to remain present, unmodified, at same address in memory!
|
||||||
|
*
|
||||||
|
* Note 3 : When input is structured as a double-buffer, each buffer can have any size, including < 64 KB.
|
||||||
|
* Make sure that buffers are separated, by at least one byte.
|
||||||
|
* This construction ensures that each block only depends on previous block.
|
||||||
|
*
|
||||||
|
* Note 4 : If input buffer is a ring-buffer, it can have any size, including < 64 KB.
|
||||||
|
*
|
||||||
|
* Note 5 : After an error, the stream status is invalid, it can only be reset or freed.
|
||||||
*/
|
*/
|
||||||
LZ4LIB_API int LZ4_compress_fast_continue (LZ4_stream_t* streamPtr, const char* src, char* dst, int srcSize, int dstCapacity, int acceleration);
|
LZ4LIB_API int LZ4_compress_fast_continue (LZ4_stream_t* streamPtr, const char* src, char* dst, int srcSize, int dstCapacity, int acceleration);
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user