diff --git a/src/native/external/zlib-intel.cmake b/src/native/external/zlib-intel.cmake index a9d0be1fa4b059..1b6fa0cb4765bf 100644 --- a/src/native/external/zlib-intel.cmake +++ b/src/native/external/zlib-intel.cmake @@ -20,6 +20,10 @@ set(ZLIB_SOURCES_BASE trees.c x86.c zutil.c + ../../libs/System.IO.Compression.Native/zlib_allocator_win.c ) +# enable custom zlib allocator +add_definitions(-DMY_ZCALLOC) + addprefix(ZLIB_SOURCES "${CMAKE_CURRENT_LIST_DIR}/zlib-intel" "${ZLIB_SOURCES_BASE}") diff --git a/src/native/external/zlib.cmake b/src/native/external/zlib.cmake index 80b5f7b1a54387..b08d2574df6746 100644 --- a/src/native/external/zlib.cmake +++ b/src/native/external/zlib.cmake @@ -29,4 +29,12 @@ set(ZLIB_SOURCES_BASE zutil.h ) +# enable custom zlib allocator +add_definitions(-DMY_ZCALLOC) +if(CLR_CMAKE_HOST_WIN32) + set(ZLIB_SOURCES_BASE ${ZLIB_SOURCES_BASE} ../../libs/System.IO.Compression.Native/zlib_allocator_win.c) +else() + set(ZLIB_SOURCES_BASE ${ZLIB_SOURCES_BASE} ../../libs/System.IO.Compression.Native/zlib_allocator_unix.c) +endif() + addprefix(ZLIB_SOURCES "${CMAKE_CURRENT_LIST_DIR}/zlib" "${ZLIB_SOURCES_BASE}") diff --git a/src/native/libs/System.IO.Compression.Native/CMakeLists.txt b/src/native/libs/System.IO.Compression.Native/CMakeLists.txt index 845c09097f7745..b8a726542ce3ad 100644 --- a/src/native/libs/System.IO.Compression.Native/CMakeLists.txt +++ b/src/native/libs/System.IO.Compression.Native/CMakeLists.txt @@ -104,6 +104,7 @@ else () if (CLR_CMAKE_HOST_ARCH_I386 OR CLR_CMAKE_HOST_ARCH_AMD64) include(${CLR_SRC_NATIVE_DIR}/external/zlib-intel.cmake) + add_definitions(-DINTERNAL_ZLIB_INTEL) else () include(${CLR_SRC_NATIVE_DIR}/external/zlib.cmake) endif () diff --git a/src/native/libs/System.IO.Compression.Native/pal_zlib.c b/src/native/libs/System.IO.Compression.Native/pal_zlib.c index f5d25d66106b02..1fd84662264a65 100644 --- a/src/native/libs/System.IO.Compression.Native/pal_zlib.c +++ b/src/native/libs/System.IO.Compression.Native/pal_zlib.c @@ -9,7 +9,11 @@ #ifdef _WIN32 #define c_static_assert(e) static_assert((e),"") #endif - #include + #ifdef INTERNAL_ZLIB_INTEL + #include + #else + #include + #endif #else #include "pal_utilities.h" #include @@ -39,14 +43,11 @@ Initializes the PAL_ZStream by creating and setting its underlying z_stream. */ static int32_t Init(PAL_ZStream* stream) { - z_stream* zStream = (z_stream*)malloc(sizeof(z_stream)); + z_stream* zStream = (z_stream*)calloc(1, sizeof(z_stream)); stream->internalState = zStream; if (zStream != NULL) { - zStream->zalloc = Z_NULL; - zStream->zfree = Z_NULL; - zStream->opaque = Z_NULL; return PAL_Z_OK; } else diff --git a/src/native/libs/System.IO.Compression.Native/zlib_allocator_unix.c b/src/native/libs/System.IO.Compression.Native/zlib_allocator_unix.c new file mode 100644 index 00000000000000..9eb4bbf2671057 --- /dev/null +++ b/src/native/libs/System.IO.Compression.Native/zlib_allocator_unix.c @@ -0,0 +1,153 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. + +#include +#include +#include + +/* A custom allocator for zlib that provides some defense-in-depth over standard malloc / free. + * (non-Windows version) + * + * 1. When zlib allocates fixed-length data structures for containing stream metadata, we zero + * the memory before using it, preventing use of uninitialized memory within these structures. + * Ideally we would do this for dynamically-sized buffers as well, but there is a measurable + * perf impact to doing this. Zeroing fixed structures seems like a good trade-off here, since + * these data structures contain most of the metadata used for managing the variable-length + * dynamically allocated buffers. + * + * 2. We put a cookie both before and after any allocated memory, which allows us to detect local + * buffer overruns on the call to free(). The cookie values are tied to the addresses where + * the data is located in memory. + * + * 3. We trash the aforementioned cookie on free(), which allows us to detect double-free. + * + * If any of these checks fails, the application raises SIGABRT. + */ + +#ifndef MEMORY_ALLOCATION_ALIGNMENT +// malloc() returns an address suitably aligned for any built-in data type. +// Historically, this has been twice the arch's natural word size. +#ifdef HOST_64BIT +#define MEMORY_ALLOCATION_ALIGNMENT 16 +#else +#define MEMORY_ALLOCATION_ALIGNMENT 8 +#endif +#endif + +typedef struct _DOTNET_ALLOC_COOKIE +{ + void* Address; + size_t Size; +} DOTNET_ALLOC_COOKIE; + +static bool SafeAdd(size_t a, size_t b, size_t* sum) +{ + if (SIZE_MAX - a >= b) { *sum = a + b; return true; } + else { *sum = 0; return false; } +} + +static bool SafeMult(size_t a, size_t b, size_t* product) +{ + if (SIZE_MAX / a >= b) { *product = a * b; return true; } + else { *product = 0; return false; } +} + +static DOTNET_ALLOC_COOKIE ReadAllocCookieUnaligned(const void* pSrc) +{ + DOTNET_ALLOC_COOKIE vCookie; + memcpy(&vCookie, pSrc, sizeof(DOTNET_ALLOC_COOKIE)); + return vCookie; +} + +static void WriteAllocCookieUnaligned(void* pDest, DOTNET_ALLOC_COOKIE vCookie) +{ + memcpy(pDest, &vCookie, sizeof(DOTNET_ALLOC_COOKIE)); +} + +// Historically, the memory allocator always returns addresses aligned to some +// particular boundary. We'll make that same guarantee here just in case somebody +// depends on it. +const size_t DOTNET_ALLOC_HEADER_COOKIE_SIZE_WITH_PADDING = (sizeof(DOTNET_ALLOC_COOKIE) + MEMORY_ALLOCATION_ALIGNMENT - 1) & ~((size_t)MEMORY_ALLOCATION_ALIGNMENT - 1); +const size_t DOTNET_ALLOC_TRAILER_COOKIE_SIZE = sizeof(DOTNET_ALLOC_COOKIE); + +voidpf ZLIB_INTERNAL zcalloc(opaque, items, size) + voidpf opaque; + unsigned items; + unsigned size; +{ + (void)opaque; // unreferenced formal parameter + + // If initializing a fixed-size structure, zero the memory. + bool fZeroMemory = (items == 1); + + size_t cbRequested; + if (sizeof(items) + sizeof(size) <= sizeof(cbRequested)) + { + // multiplication can't overflow; no need for safeint + cbRequested = (size_t)items * (size_t)size; + } + else + { + // multiplication can overflow; go through safeint + if (!SafeMult((size_t)items, (size_t)size, &cbRequested)) { return NULL; } + } + + // Make sure the actual allocation has enough room for our frontside & backside cookies. + size_t cbActualAllocationSize; + if (!SafeAdd(cbRequested, DOTNET_ALLOC_HEADER_COOKIE_SIZE_WITH_PADDING + DOTNET_ALLOC_TRAILER_COOKIE_SIZE, &cbActualAllocationSize)) { return NULL; } + + void* pAlloced = (fZeroMemory) ? calloc(1, cbActualAllocationSize) : malloc(cbActualAllocationSize); + if (pAlloced == NULL) { return NULL; } // OOM + + DOTNET_ALLOC_COOKIE* pHeaderCookie = (DOTNET_ALLOC_COOKIE*)pAlloced; + uint8_t* pReturnToCaller = (uint8_t*)pAlloced + DOTNET_ALLOC_HEADER_COOKIE_SIZE_WITH_PADDING; + uint8_t* pTrailerCookie = pReturnToCaller + cbRequested; + + // Write out the same cookie for the header & the trailer, then we're done. + + DOTNET_ALLOC_COOKIE vCookie = { 0 }; + vCookie.Address = pReturnToCaller; + vCookie.Size = cbRequested; + *pHeaderCookie = vCookie; // aligned + WriteAllocCookieUnaligned(pTrailerCookie, vCookie); + + return pReturnToCaller; +} + +static void zcfree_trash_cookie(void* pCookie) +{ + memset(pCookie, 0, sizeof(DOTNET_ALLOC_COOKIE)); +} + +void ZLIB_INTERNAL zcfree(opaque, ptr) + voidpf opaque; + voidpf ptr; +{ + (void)opaque; // unreferenced formal parameter + + if (ptr == NULL) { return; } // ok to free nullptr + + // Check cookie at beginning + + DOTNET_ALLOC_COOKIE* pHeaderCookie = (DOTNET_ALLOC_COOKIE*)((uint8_t*)ptr - DOTNET_ALLOC_HEADER_COOKIE_SIZE_WITH_PADDING); + if (pHeaderCookie->Address != ptr) { goto Fail; } + size_t cbRequested = pHeaderCookie->Size; + + // Check cookie at end + + uint8_t* pTrailerCookie = (uint8_t*)ptr + cbRequested; + DOTNET_ALLOC_COOKIE vTrailerCookie = ReadAllocCookieUnaligned(pTrailerCookie); + if (vTrailerCookie.Address != ptr) { goto Fail; } + if (vTrailerCookie.Size != cbRequested) { goto Fail; } + + // Checks passed - now trash the cookies and free memory + + zcfree_trash_cookie(pHeaderCookie); + zcfree_trash_cookie(pTrailerCookie); + + free(pHeaderCookie); + return; + +Fail: + abort(); // cookie check failed +} diff --git a/src/native/libs/System.IO.Compression.Native/zlib_allocator_win.c b/src/native/libs/System.IO.Compression.Native/zlib_allocator_win.c new file mode 100644 index 00000000000000..9bdf694495e680 --- /dev/null +++ b/src/native/libs/System.IO.Compression.Native/zlib_allocator_win.c @@ -0,0 +1,181 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. + +#include +#include +#include +#include +#include /* _ASSERTE */ + +#ifdef INTERNAL_ZLIB_INTEL +#include +#else +#include +#endif + +/* A custom allocator for zlib that provides some defense-in-depth over standard malloc / free. + * (Windows-specific version) + * + * 1. In 64-bit processes, we use a custom heap rather than relying on the standard process heap. + * This should cause zlib's buffers to go into a separate address range from the rest of app + * data, making it more difficult for buffer overruns to affect non-zlib-related data structures. + * + * 2. When zlib allocates fixed-length data structures for containing stream metadata, we zero + * the memory before using it, preventing use of uninitialized memory within these structures. + * Ideally we would do this for dynamically-sized buffers as well, but there is a measurable + * perf impact to doing this. Zeroing fixed structures seems like a good trade-off here, since + * these data structures contain most of the metadata used for managing the variable-length + * dynamically allocated buffers. + * + * 3. We put a cookie both before and after any allocated memory, which allows us to detect local + * buffer overruns on the call to free(). The cookie values are enciphered to make it more + * difficult for somebody to guess a correct value. + * + * 4. We trash the aforementioned cookie on free(), which allows us to detect double-free. + * + * If any of these checks fails, the application terminates immediately, optionally triggering a + * crash dump. We use a special code that's easy to search for in Watson. + */ + +// Gets the special heap we'll allocate from. +HANDLE GetZlibHeap() +{ +#ifdef _WIN64 + static HANDLE s_hPublishedHeap = NULL; + + // If already initialized, return immediately. + // We don't need a volatile read here since the publish is performed with release semantics. + if (s_hPublishedHeap != NULL) { return s_hPublishedHeap; } + + // Attempt to create a new heap. The heap will be dynamically sized. + HANDLE hNewHeap = HeapCreate(0, 0, 0); + + if (hNewHeap != NULL) + { + // We created a new heap. Attempt to publish it. + if (InterlockedCompareExchangePointer(&s_hPublishedHeap, hNewHeap, NULL) != NULL) + { + HeapDestroy(hNewHeap); // Somebody published before us. Destroy our heap. + hNewHeap = NULL; // Guard against accidental use later in the method. + } + } + else + { + // If we can't create a new heap, fall back to the process default heap. + InterlockedCompareExchangePointer(&s_hPublishedHeap, GetProcessHeap(), NULL); + } + + // Some thread - perhaps us, perhaps somebody else - published the heap. Return it. + // We don't need a volatile read here since the publish is performed with release semantics. + _ASSERTE(s_hPublishedHeap != NULL); + return s_hPublishedHeap; +#else + // We don't want to create a new heap in a 32-bit process because it could end up + // reserving too much of the address space. Instead, fall back to the normal process heap. + return GetProcessHeap(); +#endif +} + +typedef struct _DOTNET_ALLOC_COOKIE +{ + PVOID CookieValue; + union _Size + { + SIZE_T RawValue; + LPVOID EncodedValue; + } Size; +} DOTNET_ALLOC_COOKIE; + +// Historically, the Windows memory allocator always returns addresses aligned to some +// particular boundary. We'll make that same guarantee here just in case somebody +// depends on it. +const SIZE_T DOTNET_ALLOC_HEADER_COOKIE_SIZE_WITH_PADDING = (sizeof(DOTNET_ALLOC_COOKIE) + MEMORY_ALLOCATION_ALIGNMENT - 1) & ~((SIZE_T)MEMORY_ALLOCATION_ALIGNMENT - 1); +const SIZE_T DOTNET_ALLOC_TRAILER_COOKIE_SIZE = sizeof(DOTNET_ALLOC_COOKIE); + +voidpf ZLIB_INTERNAL zcalloc(opaque, items, size) + voidpf opaque; + unsigned items; + unsigned size; +{ + (void)opaque; // suppress C4100 - unreferenced formal parameter + + // If initializing a fixed-size structure, zero the memory. + DWORD dwFlags = (items == 1) ? HEAP_ZERO_MEMORY : 0; + + SIZE_T cbRequested; + if (sizeof(items) + sizeof(size) <= sizeof(cbRequested)) + { + // multiplication can't overflow; no need for safeint + cbRequested = (SIZE_T)items * (SIZE_T)size; + } + else + { + // multiplication can overflow; go through safeint + if (FAILED(SIZETMult(items, size, &cbRequested))) { return NULL; } + } + + // Make sure the actual allocation has enough room for our frontside & backside cookies. + SIZE_T cbActualAllocationSize; + if (FAILED(SIZETAdd(cbRequested, DOTNET_ALLOC_HEADER_COOKIE_SIZE_WITH_PADDING + DOTNET_ALLOC_TRAILER_COOKIE_SIZE, &cbActualAllocationSize))) { return NULL; } + + LPVOID pAlloced = HeapAlloc(GetZlibHeap(), dwFlags, cbActualAllocationSize); + if (pAlloced == NULL) { return NULL; } // OOM + + // Now set the header & trailer cookies + DOTNET_ALLOC_COOKIE* pHeaderCookie = (DOTNET_ALLOC_COOKIE*)pAlloced; + pHeaderCookie->CookieValue = EncodePointer(&pHeaderCookie->CookieValue); + pHeaderCookie->Size.RawValue = cbRequested; + + LPBYTE pReturnToCaller = (LPBYTE)pHeaderCookie + DOTNET_ALLOC_HEADER_COOKIE_SIZE_WITH_PADDING; + + UNALIGNED DOTNET_ALLOC_COOKIE* pTrailerCookie = (UNALIGNED DOTNET_ALLOC_COOKIE*)(pReturnToCaller + cbRequested); + pTrailerCookie->CookieValue = EncodePointer(&pTrailerCookie->CookieValue); + pTrailerCookie->Size.EncodedValue = EncodePointer((PVOID)cbRequested); + + return pReturnToCaller; +} + +FORCEINLINE +void zcfree_trash_cookie(UNALIGNED DOTNET_ALLOC_COOKIE* pCookie) +{ + memset(pCookie, 0, sizeof(*pCookie)); + pCookie->CookieValue = (PVOID)(SIZE_T)0xDEADBEEF; +} + +// Marked noinline to keep it on the call stack during crash reports. +DECLSPEC_NOINLINE +DECLSPEC_NORETURN +void zcfree_cookie_check_failed() +{ + __fastfail(FAST_FAIL_HEAP_METADATA_CORRUPTION); +} + +void ZLIB_INTERNAL zcfree(opaque, ptr) + voidpf opaque; + voidpf ptr; +{ + (void)opaque; // suppress C4100 - unreferenced formal parameter + + if (ptr == NULL) { return; } // ok to free nullptr + + // Check cookie at beginning and end + + DOTNET_ALLOC_COOKIE* pHeaderCookie = (DOTNET_ALLOC_COOKIE*)((LPBYTE)ptr - DOTNET_ALLOC_HEADER_COOKIE_SIZE_WITH_PADDING); + if (DecodePointer(pHeaderCookie->CookieValue) != &pHeaderCookie->CookieValue) { goto Fail; } + SIZE_T cbRequested = pHeaderCookie->Size.RawValue; + + UNALIGNED DOTNET_ALLOC_COOKIE* pTrailerCookie = (UNALIGNED DOTNET_ALLOC_COOKIE*)((LPBYTE)ptr + cbRequested); + if (DecodePointer(pTrailerCookie->CookieValue) != &pTrailerCookie->CookieValue) { goto Fail; } + if (DecodePointer(pTrailerCookie->Size.EncodedValue) != (LPVOID)cbRequested) { goto Fail; } + + // Checks passed - now trash the cookies and free memory + + zcfree_trash_cookie(pHeaderCookie); + zcfree_trash_cookie(pTrailerCookie); + + if (!HeapFree(GetZlibHeap(), 0, pHeaderCookie)) { goto Fail; } + return; + +Fail: + zcfree_cookie_check_failed(); +}