1
0
mirror of https://github.com/php/php-src.git synced 2026-03-24 00:02:20 +01:00

Detect heap freelist corruption (#14054)

We keep track of free slots by organizing them in a linked list, with the
first word of every free slot being a pointer to the next one.

In order to make corruptions more difficult to exploit, we check the consistency
of these pointers before dereference by comparing them with a shadow. The shadow
is a copy of the pointer, stored at the end of the slot.

Before this change, an off-by-1 write is enough to produce a valid freelist
pointer. After this change, a bigger out of bound write is required for that.
The difficulty is increase further by mangling the shadow with a secret, and
byte-swapping it, which increases the minimal required out of bound write
length.

Closes GH-14054
This commit is contained in:
Arnaud Le Blanc
2024-06-12 14:02:48 +02:00
parent d1048a0869
commit 25360ef249
2 changed files with 199 additions and 17 deletions

View File

@@ -914,7 +914,8 @@ void zend_startup(zend_utility_functions *utility_functions) /* {{{ */
php_win32_cp_set_by_id(65001);
#endif
/* Set up early utility functions. */
/* Set up early utility functions. zend_mm depends on
* zend_random_bytes_insecure */
zend_random_bytes = utility_functions->random_bytes_function;
zend_random_bytes_insecure = utility_functions->random_bytes_insecure_function;

View File

@@ -54,10 +54,12 @@
#include "zend.h"
#include "zend_alloc.h"
#include "zend_globals.h"
#include "zend_hrtime.h"
#include "zend_operators.h"
#include "zend_multiply.h"
#include "zend_bitset.h"
#include "zend_mmap.h"
#include "zend_portability.h"
#include <signal.h>
#ifdef HAVE_UNISTD_H
@@ -68,6 +70,8 @@
# include <wincrypt.h>
# include <process.h>
# include "win32/winutil.h"
# define getpid _getpid
typedef int pid_t;
#endif
#include <stdio.h>
@@ -141,6 +145,25 @@ static size_t _real_page_size = ZEND_MM_PAGE_SIZE;
#ifndef ZEND_MM_ERROR
# define ZEND_MM_ERROR 1 /* report system errors */
#endif
#ifndef ZEND_MM_HEAP_PROTECTION
# define ZEND_MM_HEAP_PROTECTION 1 /* protect heap against corruptions */
#endif
#if ZEND_MM_HEAP_PROTECTION
/* Define ZEND_MM_MIN_USEABLE_BIN_SIZE to the size of two pointers */
# if UINTPTR_MAX == UINT64_MAX
# define ZEND_MM_MIN_USEABLE_BIN_SIZE 16
# elif UINTPTR_MAX == UINT32_MAX
# define ZEND_MM_MIN_USEABLE_BIN_SIZE 8
# else
# error
# endif
# if ZEND_MM_MIN_USEABLE_BIN_SIZE < ZEND_MM_MIN_SMALL_SIZE
# error
# endif
#else /* ZEND_MM_HEAP_PROTECTION */
# define ZEND_MM_MIN_USEABLE_BIN_SIZE ZEND_MM_MIN_SMALL_SIZE
#endif /* ZEND_MM_HEAP_PROTECTION */
#ifndef ZEND_MM_CHECK
# define ZEND_MM_CHECK(condition, message) do { \
@@ -194,6 +217,44 @@ typedef zend_mm_bitset zend_mm_page_map[ZEND_MM_PAGE_MAP_LEN]; /* 64B */
#define ZEND_MM_BINS 30
#if defined(_MSC_VER)
# if UINTPTR_MAX == UINT64_MAX
# define BSWAPPTR(u) _byteswap_uint64(u)
# else
# define BSWAPPTR(u) _byteswap_ulong(u)
# endif
#else
# if UINTPTR_MAX == UINT64_MAX
# if __has_builtin(__builtin_bswap64)
# define BSWAPPTR(u) __builtin_bswap64(u)
# else
static zend_always_inline uintptr_t BSWAPPTR(uintptr_t u)
{
return (((u & 0xff00000000000000ULL) >> 56)
| ((u & 0x00ff000000000000ULL) >> 40)
| ((u & 0x0000ff0000000000ULL) >> 24)
| ((u & 0x000000ff00000000ULL) >> 8)
| ((u & 0x00000000ff000000ULL) << 8)
| ((u & 0x0000000000ff0000ULL) << 24)
| ((u & 0x000000000000ff00ULL) << 40)
| ((u & 0x00000000000000ffULL) << 56));
}
# endif /* __has_builtin(__builtin_bswap64) */
# else /* UINTPTR_MAX == UINT64_MAX */
# if __has_builtin(__builtin_bswap32)
# define BSWAPPTR(u) __builtin_bswap32(u)
# else
static zend_always_inline uintptr_t BSWAPPTR(uintptr_t u)
{
return (((u & 0xff000000) >> 24)
| ((u & 0x00ff0000) >> 8)
| ((u & 0x0000ff00) << 8)
| ((u & 0x000000ff) << 24));
}
# endif /* __has_builtin(__builtin_bswap32) */
# endif /* UINTPTR_MAX == UINT64_MAX */
#endif /* defined(_MSC_VER) */
typedef struct _zend_mm_page zend_mm_page;
typedef struct _zend_mm_bin zend_mm_bin;
typedef struct _zend_mm_free_slot zend_mm_free_slot;
@@ -245,6 +306,7 @@ struct _zend_mm_heap {
size_t size; /* current memory usage */
size_t peak; /* peak memory usage */
#endif
uintptr_t shadow_key; /* free slot shadow ptr xor key */
zend_mm_free_slot *free_slot[ZEND_MM_BINS]; /* free lists for small sizes */
#if ZEND_MM_STAT || ZEND_MM_LIMIT
size_t real_size; /* current size of allocated pages */
@@ -275,6 +337,8 @@ struct _zend_mm_heap {
} custom_heap;
HashTable *tracked_allocs;
#endif
pid_t pid;
zend_random_bytes_insecure_state rand_state;
};
struct _zend_mm_chunk {
@@ -1248,6 +1312,67 @@ static zend_always_inline int zend_mm_small_size_to_bin(size_t size)
#define ZEND_MM_SMALL_SIZE_TO_BIN(size) zend_mm_small_size_to_bin(size)
#if ZEND_MM_HEAP_PROTECTION
/* We keep track of free slots by organizing them in a linked list, with the
* first word of every free slot being a pointer to the next one.
*
* In order to frustrate corruptions, we check the consistency of these pointers
* before dereference by comparing them with a shadow.
*
* The shadow is a copy of the pointer, stored at the end of the slot. It is
* XOR'ed with a random key, and converted to big-endian so that smaller
* corruptions affect the most significant bytes, which has a high chance of
* resulting in an invalid address instead of pointing to an adjacent slot.
*/
#define ZEND_MM_FREE_SLOT_PTR_SHADOW(free_slot, bin_num) \
*((zend_mm_free_slot**)((char*)(free_slot) + bin_data_size[(bin_num)] - sizeof(zend_mm_free_slot*)))
static zend_always_inline zend_mm_free_slot* zend_mm_encode_free_slot(const zend_mm_heap *heap, const zend_mm_free_slot *slot)
{
#ifdef WORDS_BIGENDIAN
return (zend_mm_free_slot*)(((uintptr_t)slot) ^ heap->shadow_key);
#else
return (zend_mm_free_slot*)(BSWAPPTR((uintptr_t)slot) ^ heap->shadow_key);
#endif
}
static zend_always_inline zend_mm_free_slot* zend_mm_decode_free_slot(zend_mm_heap *heap, zend_mm_free_slot *slot)
{
#ifdef WORDS_BIGENDIAN
return (zend_mm_free_slot*)((uintptr_t)slot ^ heap->shadow_key);
#else
return (zend_mm_free_slot*)(BSWAPPTR((uintptr_t)slot ^ heap->shadow_key));
#endif
}
static zend_always_inline void zend_mm_set_next_free_slot(zend_mm_heap *heap, uint32_t bin_num, zend_mm_free_slot *slot, zend_mm_free_slot *next)
{
ZEND_ASSERT(bin_data_size[bin_num] >= ZEND_MM_MIN_USEABLE_BIN_SIZE);
slot->next_free_slot = next;
ZEND_MM_FREE_SLOT_PTR_SHADOW(slot, bin_num) = zend_mm_encode_free_slot(heap, next);
}
static zend_always_inline zend_mm_free_slot *zend_mm_get_next_free_slot(zend_mm_heap *heap, uint32_t bin_num, zend_mm_free_slot* slot)
{
zend_mm_free_slot *next = slot->next_free_slot;
if (EXPECTED(next != NULL)) {
zend_mm_free_slot *shadow = ZEND_MM_FREE_SLOT_PTR_SHADOW(slot, bin_num);
if (UNEXPECTED(next != zend_mm_decode_free_slot(heap, shadow))) {
zend_mm_panic("zend_mm_heap corrupted");
}
}
return (zend_mm_free_slot*)next;
}
#else /* ZEND_MM_HEAP_PROTECTION */
# define zend_mm_set_next_free_slot(heap, bin_num, slot, next) do { \
(slot)->next_free_slot = (next); \
} while (0)
# define zend_mm_get_next_free_slot(heap, bin_num, slot) (slot)->next_free_slot
#endif /* ZEND_MM_HEAP_PROTECTION */
static zend_never_inline void *zend_mm_alloc_small_slow(zend_mm_heap *heap, uint32_t bin_num ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
{
zend_mm_chunk *chunk;
@@ -1281,7 +1406,7 @@ static zend_never_inline void *zend_mm_alloc_small_slow(zend_mm_heap *heap, uint
end = (zend_mm_free_slot*)((char*)bin + (bin_data_size[bin_num] * (bin_elements[bin_num] - 1)));
heap->free_slot[bin_num] = p = (zend_mm_free_slot*)((char*)bin + bin_data_size[bin_num]);
do {
p->next_free_slot = (zend_mm_free_slot*)((char*)p + bin_data_size[bin_num]);
zend_mm_set_next_free_slot(heap, bin_num, p, (zend_mm_free_slot*)((char*)p + bin_data_size[bin_num]));
#if ZEND_DEBUG
do {
zend_mm_debug_info *dbg = (zend_mm_debug_info*)((char*)p + bin_data_size[bin_num] - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
@@ -1306,6 +1431,8 @@ static zend_never_inline void *zend_mm_alloc_small_slow(zend_mm_heap *heap, uint
static zend_always_inline void *zend_mm_alloc_small(zend_mm_heap *heap, int bin_num ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
{
ZEND_ASSERT(bin_data_size[bin_num] >= ZEND_MM_MIN_USEABLE_BIN_SIZE);
#if ZEND_MM_STAT
do {
size_t size = heap->size + bin_data_size[bin_num];
@@ -1317,7 +1444,7 @@ static zend_always_inline void *zend_mm_alloc_small(zend_mm_heap *heap, int bin_
if (EXPECTED(heap->free_slot[bin_num] != NULL)) {
zend_mm_free_slot *p = heap->free_slot[bin_num];
heap->free_slot[bin_num] = p->next_free_slot;
heap->free_slot[bin_num] = zend_mm_get_next_free_slot(heap, bin_num, p);
return p;
} else {
return zend_mm_alloc_small_slow(heap, bin_num ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
@@ -1326,6 +1453,8 @@ static zend_always_inline void *zend_mm_alloc_small(zend_mm_heap *heap, int bin_
static zend_always_inline void zend_mm_free_small(zend_mm_heap *heap, void *ptr, int bin_num)
{
ZEND_ASSERT(bin_data_size[bin_num] >= ZEND_MM_MIN_USEABLE_BIN_SIZE);
zend_mm_free_slot *p;
#if ZEND_MM_STAT
@@ -1340,7 +1469,7 @@ static zend_always_inline void zend_mm_free_small(zend_mm_heap *heap, void *ptr,
#endif
p = (zend_mm_free_slot*)ptr;
p->next_free_slot = heap->free_slot[bin_num];
zend_mm_set_next_free_slot(heap, bin_num, p, heap->free_slot[bin_num]);
heap->free_slot[bin_num] = p;
}
@@ -1375,6 +1504,11 @@ static zend_always_inline zend_mm_debug_info *zend_mm_get_debug_info(zend_mm_hea
static zend_always_inline void *zend_mm_alloc_heap(zend_mm_heap *heap, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
{
void *ptr;
#if ZEND_MM_HEAP_PROTECTION
if (size < ZEND_MM_MIN_USEABLE_BIN_SIZE) {
size = ZEND_MM_MIN_USEABLE_BIN_SIZE;
}
#endif /* ZEND_MM_HEAP_PROTECTION */
#if ZEND_DEBUG
size_t real_size = size;
zend_mm_debug_info *dbg;
@@ -1596,6 +1730,11 @@ static zend_always_inline void *zend_mm_realloc_heap(zend_mm_heap *heap, void *p
zend_mm_chunk *chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(ptr, ZEND_MM_CHUNK_SIZE);
int page_num = (int)(page_offset / ZEND_MM_PAGE_SIZE);
zend_mm_page_info info = chunk->map[page_num];
#if ZEND_MM_HEAP_PROTECTION
if (size < ZEND_MM_MIN_USEABLE_BIN_SIZE) {
size = ZEND_MM_MIN_USEABLE_BIN_SIZE;
}
#endif /* ZEND_MM_HEAP_PROTECTION */
#if ZEND_DEBUG
size_t real_size = size;
@@ -1904,6 +2043,17 @@ static void zend_mm_free_huge(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZE
/* Initialization */
/******************/
static void zend_mm_refresh_key(zend_mm_heap *heap)
{
zend_random_bytes_insecure(&heap->rand_state, &heap->shadow_key, sizeof(heap->shadow_key));
}
static void zend_mm_init_key(zend_mm_heap *heap)
{
memset(&heap->rand_state, 0, sizeof(heap->rand_state));
zend_mm_refresh_key(heap);
}
static zend_mm_heap *zend_mm_init(void)
{
zend_mm_chunk *chunk = (zend_mm_chunk*)zend_mm_chunk_alloc_int(ZEND_MM_CHUNK_SIZE, ZEND_MM_CHUNK_SIZE);
@@ -1940,6 +2090,7 @@ static zend_mm_heap *zend_mm_init(void)
heap->size = 0;
heap->peak = 0;
#endif
zend_mm_init_key(heap);
#if ZEND_MM_LIMIT
heap->limit = (size_t)Z_L(-1) >> 1;
heap->overflow = 0;
@@ -1951,12 +2102,13 @@ static zend_mm_heap *zend_mm_init(void)
heap->storage = NULL;
#endif
heap->huge_list = NULL;
heap->pid = getpid();
return heap;
}
ZEND_API size_t zend_mm_gc(zend_mm_heap *heap)
{
zend_mm_free_slot *p, **q;
zend_mm_free_slot *p, *q;
zend_mm_chunk *chunk;
size_t page_offset;
int page_num;
@@ -1994,15 +2146,15 @@ ZEND_API size_t zend_mm_gc(zend_mm_heap *heap)
has_free_pages = true;
}
chunk->map[page_num] = ZEND_MM_SRUN_EX(i, free_counter);
p = p->next_free_slot;
p = zend_mm_get_next_free_slot(heap, i, p);
}
if (!has_free_pages) {
continue;
}
q = &heap->free_slot[i];
p = *q;
q = (zend_mm_free_slot*)&heap->free_slot[i];
p = q->next_free_slot;
while (p != NULL) {
chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(p, ZEND_MM_CHUNK_SIZE);
ZEND_MM_CHECK(chunk->heap == heap, "zend_mm_heap corrupted");
@@ -2020,11 +2172,19 @@ ZEND_API size_t zend_mm_gc(zend_mm_heap *heap)
ZEND_ASSERT(ZEND_MM_SRUN_BIN_NUM(info) == i);
if (ZEND_MM_SRUN_FREE_COUNTER(info) == bin_elements[i]) {
/* remove from cache */
p = p->next_free_slot;
*q = p;
p = zend_mm_get_next_free_slot(heap, i, p);
if (q == (zend_mm_free_slot*)&heap->free_slot[i]) {
q->next_free_slot = p;
} else {
zend_mm_set_next_free_slot(heap, i, q, p);
}
} else {
q = &p->next_free_slot;
p = *q;
q = p;
if (q == (zend_mm_free_slot*)&heap->free_slot[i]) {
p = q->next_free_slot;
} else {
p = zend_mm_get_next_free_slot(heap, i, q);
}
}
}
}
@@ -2376,6 +2536,14 @@ void zend_mm_shutdown(zend_mm_heap *heap, bool full, bool silent)
memset(p->free_map, 0, sizeof(p->free_map) + sizeof(p->map));
p->free_map[0] = (1L << ZEND_MM_FIRST_PAGE) - 1;
p->map[0] = ZEND_MM_LRUN(ZEND_MM_FIRST_PAGE);
pid_t pid = getpid();
if (heap->pid != pid) {
zend_mm_init_key(heap);
heap->pid = pid;
} else {
zend_mm_refresh_key(heap);
}
}
}
@@ -2507,13 +2675,16 @@ ZEND_API bool is_zend_ptr(const void *ptr)
# define ZEND_MM_CUSTOM_DEALLOCATOR(ptr)
#endif
# define _ZEND_BIN_ALLOCATOR(_num, _size, _elements, _pages, x, y) \
# define _ZEND_BIN_ALLOCATOR(_num, _size, _elements, _pages, _min_size, y) \
ZEND_API void* ZEND_FASTCALL _emalloc_ ## _size(void) { \
ZEND_MM_CUSTOM_ALLOCATOR(_size); \
if (_size < _min_size) { \
return _emalloc_ ## _min_size(); \
} \
return zend_mm_alloc_small(AG(mm_heap), _num ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC); \
}
ZEND_MM_BINS_INFO(_ZEND_BIN_ALLOCATOR, x, y)
ZEND_MM_BINS_INFO(_ZEND_BIN_ALLOCATOR, ZEND_MM_MIN_USEABLE_BIN_SIZE, y)
ZEND_API void* ZEND_FASTCALL _emalloc_large(size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
{
@@ -2528,9 +2699,13 @@ ZEND_API void* ZEND_FASTCALL _emalloc_huge(size_t size)
}
#if ZEND_DEBUG
# define _ZEND_BIN_FREE(_num, _size, _elements, _pages, x, y) \
# define _ZEND_BIN_FREE(_num, _size, _elements, _pages, _min_size, y) \
ZEND_API void ZEND_FASTCALL _efree_ ## _size(void *ptr) { \
ZEND_MM_CUSTOM_DEALLOCATOR(ptr); \
if (_size < _min_size) { \
_efree_ ## _min_size(ptr); \
return; \
} \
{ \
size_t page_offset = ZEND_MM_ALIGNED_OFFSET(ptr, ZEND_MM_CHUNK_SIZE); \
zend_mm_chunk *chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(ptr, ZEND_MM_CHUNK_SIZE); \
@@ -2542,9 +2717,13 @@ ZEND_API void* ZEND_FASTCALL _emalloc_huge(size_t size)
} \
}
#else
# define _ZEND_BIN_FREE(_num, _size, _elements, _pages, x, y) \
# define _ZEND_BIN_FREE(_num, _size, _elements, _pages, _min_size, y) \
ZEND_API void ZEND_FASTCALL _efree_ ## _size(void *ptr) { \
ZEND_MM_CUSTOM_DEALLOCATOR(ptr); \
if (_size < _min_size) { \
_efree_ ## _min_size(ptr); \
return; \
} \
{ \
zend_mm_chunk *chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(ptr, ZEND_MM_CHUNK_SIZE); \
ZEND_MM_CHECK(chunk->heap == AG(mm_heap), "zend_mm_heap corrupted"); \
@@ -2553,7 +2732,7 @@ ZEND_API void* ZEND_FASTCALL _emalloc_huge(size_t size)
}
#endif
ZEND_MM_BINS_INFO(_ZEND_BIN_FREE, x, y)
ZEND_MM_BINS_INFO(_ZEND_BIN_FREE, ZEND_MM_MIN_USEABLE_BIN_SIZE, y)
ZEND_API void ZEND_FASTCALL _efree_large(void *ptr, size_t size)
{
@@ -3052,6 +3231,7 @@ ZEND_API zend_mm_heap *zend_mm_startup_ex(const zend_mm_handlers *handlers, void
heap->size = 0;
heap->peak = 0;
#endif
zend_mm_init_key(heap);
#if ZEND_MM_LIMIT
heap->limit = (size_t)Z_L(-1) >> 1;
heap->overflow = 0;
@@ -3076,6 +3256,7 @@ ZEND_API zend_mm_heap *zend_mm_startup_ex(const zend_mm_handlers *handlers, void
memcpy(storage->data, data, data_size);
}
heap->storage = storage;
heap->pid = getpid();
return heap;
#else
return NULL;