__msan_poison() Workspace When Preparing for Re-Use

This commit is contained in:
W. Felix Handte 2019-09-11 16:40:29 -04:00
parent 194c542598
commit a10c191613
3 changed files with 55 additions and 0 deletions

View File

@ -47,6 +47,23 @@ extern "C" {
#define MEM_STATIC_ASSERT(c) { enum { MEM_static_assert = 1/(int)(!!(c)) }; }
MEM_STATIC void MEM_check(void) { MEM_STATIC_ASSERT((sizeof(size_t)==4) || (sizeof(size_t)==8)); }
/* detects whether we are being compiled under msan */
#if defined (__has_feature)
# if __has_feature(memory_sanitizer)
# define MEMORY_SANITIZER 1
# endif
#endif
#if defined (MEMORY_SANITIZER)
# include <sanitizer/msan_interface.h>
#endif
#if defined (MEMORY_SANITIZER)
# define MEM_SKIP_MSAN __attribute__((no_sanitize("memory")))
#else
# define MEM_SKIP_MSAN
#endif
/*-**************************************************************
* Basic Types

View File

@ -1799,6 +1799,20 @@ ZSTD_reduceTable_internal (U32* const table, U32 const size, U32 const reducerVa
int rowNb;
assert((size & (ZSTD_ROWSIZE-1)) == 0); /* multiple of ZSTD_ROWSIZE */
assert(size < (1U<<31)); /* can be casted to int */
#if defined (MEMORY_SANITIZER) && !defined (ZSTD_MSAN_DONT_POISON_WORKSPACE)
/* To validate that the table re-use logic is sound, and that we don't
* access table space that we haven't cleaned, we re-"poison" the table
* space every time we mark it dirty.
*
* This function however is intended to operate on those dirty tables and
* re-clean them. So when this function is used correctly, we can unpoison
* the memory it operated on. This introduces a blind spot though, since
* if we now try to operate on __actually__ poisoned memory, we will not
* detect that. */
__msan_unpoison(table, size * sizeof(U32));
#endif
for (rowNb=0 ; rowNb < nbRows ; rowNb++) {
int column;
for (column=0; column<ZSTD_ROWSIZE; column++) {

View File

@ -267,6 +267,18 @@ MEM_STATIC void* ZSTD_cwksp_reserve_object(ZSTD_cwksp* ws, size_t bytes) {
MEM_STATIC void ZSTD_cwksp_mark_tables_dirty(ZSTD_cwksp* ws) {
DEBUGLOG(4, "cwksp: ZSTD_cwksp_mark_tables_dirty");
#if defined (MEMORY_SANITIZER) && !defined (ZSTD_MSAN_DONT_POISON_WORKSPACE)
/* To validate that the table re-use logic is sound, and that we don't
* access table space that we haven't cleaned, we re-"poison" the table
* space every time we mark it dirty. */
{
size_t size = (BYTE*)ws->tableValidEnd - (BYTE*)ws->objectEnd;
assert(__msan_test_shadow(ws->objectEnd, size) == -1);
__msan_poison(ws->objectEnd, size);
}
#endif
assert(ws->tableValidEnd >= ws->objectEnd);
assert(ws->tableValidEnd <= ws->allocStart);
ws->tableValidEnd = ws->objectEnd;
@ -309,6 +321,18 @@ MEM_STATIC void ZSTD_cwksp_clear_tables(ZSTD_cwksp* ws) {
*/
MEM_STATIC void ZSTD_cwksp_clear(ZSTD_cwksp* ws) {
DEBUGLOG(4, "cwksp: clearing!");
#if defined (MEMORY_SANITIZER) && !defined (ZSTD_MSAN_DONT_POISON_WORKSPACE)
/* To validate that the context re-use logic is sound, and that we don't
* access stuff that this compression hasn't initialized, we re-"poison"
* the workspace (or at least the non-static, non-table parts of it)
* every time we start a new compression. */
{
size_t size = (BYTE*)ws->workspaceEnd - (BYTE*)ws->tableValidEnd;
__msan_poison(ws->tableValidEnd, size);
}
#endif
ws->tableEnd = ws->objectEnd;
ws->allocStart = ws->workspaceEnd;
ws->allocFailed = 0;