kernel: Rewrite guarded_heap.
Compared to the old kernel guarded heap, this new one:
* Uses VMTranslationMap and related APIs directly, rather than
indirectly through the "protection cookie" system. This means
those custom APIs, which nothing else except the guarded heap
used, can now be dropped.
* Does not allocate vm_pages for guard pages, and frees vm_pages
when not in use. This means that DISABLE_MEMORY_REUSE is much
more usable now, as it only does not reuse virtual memory,
allowing for much longer-running systems.
* Uses trees to manage meta chunks. This avoids unbounded growth
of the meta chunk count, especially under DISABLE_MEMORY_REUSE.
* Handles page faults directly and prints information about the
fault address/allocation automatically.
The system still boots and runs fine with this new guarded heap,
including under USE...FOR_OBJECT_CACHE.
Change-Id: If85e5689a47088790287990c881364fcde4cc76d
Reviewed-on: https://review.haiku-os.org/c/haiku/+/9705
Reviewed-by: waddlesplash <waddlesplash@gmail.com>
Diff
src/system/kernel/Jamfile | 1 -
src/system/kernel/guarded_heap.cpp | 1126 --------------------------------------------------------------------------------
src/system/kernel/debug/Jamfile | 1 +
src/system/kernel/debug/guarded_heap.cpp | 1027 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
src/system/kernel/vm/vm.cpp | 2 +-
5 files changed, 1029 insertions(+), 1128 deletions(-)
@@ -31,7 +31,6 @@
cpu.cpp
DPC.cpp
elf.cpp
guarded_heap.cpp
heap.cpp
image.cpp
interrupts.cpp
@@ -1,1126 +1,0 @@
/*
* Copyright 2011, Michael Lotz <mmlr@mlotz.ch>.
* Distributed under the terms of the MIT License.
*/
#include <stdio.h>
#include <string.h>
#include <arch/debug.h>
#include <elf.h>
#include <debug.h>
#include <heap.h>
#include <malloc.h>
#include <slab/Slab.h>
#include <team.h>
#include <tracing.h>
#include <util/list.h>
#include <util/AutoLock.h>
#include <vm/vm.h>
#if USE_GUARDED_HEAP_FOR_MALLOC
#define GUARDED_HEAP_PAGE_FLAG_USED 0x01
#define GUARDED_HEAP_PAGE_FLAG_FIRST 0x02
#define GUARDED_HEAP_PAGE_FLAG_GUARD 0x04
#define GUARDED_HEAP_PAGE_FLAG_DEAD 0x08
#define GUARDED_HEAP_STACK_TRACE_DEPTH 0
struct guarded_heap;
struct guarded_heap_page {
uint8 flags;
size_t allocation_size;
void* allocation_base;
size_t alignment;
team_id team;
thread_id thread;
#if GUARDED_HEAP_STACK_TRACE_DEPTH > 0
size_t stack_trace_depth;
addr_t stack_trace[GUARDED_HEAP_STACK_TRACE_DEPTH];
#endif
list_link free_list_link;
};
struct guarded_heap_area {
guarded_heap* heap;
guarded_heap_area* next;
area_id area;
addr_t base;
size_t size;
size_t page_count;
size_t used_pages;
void* protection_cookie;
mutex lock;
struct list free_list;
guarded_heap_page pages[0];
};
struct guarded_heap {
rw_lock lock;
size_t page_count;
size_t used_pages;
int32 area_creation_counter;
guarded_heap_area* areas;
};
static guarded_heap sGuardedHeap = {
RW_LOCK_INITIALIZER("guarded heap lock"),
0, 0, 0, NULL
};
#if GUARDED_HEAP_TRACING
namespace GuardedHeapTracing {
class GuardedHeapTraceEntry
: public TRACE_ENTRY_SELECTOR(GUARDED_HEAP_TRACING_STACK_TRACE) {
public:
GuardedHeapTraceEntry(guarded_heap* heap)
:
TraceEntryBase(GUARDED_HEAP_TRACING_STACK_TRACE, 0, true),
fHeap(heap)
{
}
protected:
guarded_heap* fHeap;
};
class Allocate : public GuardedHeapTraceEntry {
public:
Allocate(guarded_heap* heap, void* pageBase, uint32 flags)
:
GuardedHeapTraceEntry(heap),
fPageBase(pageBase),
fFlags(flags)
{
Initialized();
}
virtual void AddDump(TraceOutput& out)
{
out.Print("guarded heap allocate: heap: %p; page: %p; "
"flags:%s%s%s%s", fHeap, fPageBase,
(fFlags & GUARDED_HEAP_PAGE_FLAG_USED) != 0 ? " used" : "",
(fFlags & GUARDED_HEAP_PAGE_FLAG_FIRST) != 0 ? " first" : "",
(fFlags & GUARDED_HEAP_PAGE_FLAG_GUARD) != 0 ? " guard" : "",
(fFlags & GUARDED_HEAP_PAGE_FLAG_DEAD) != 0 ? " dead" : "");
}
private:
void* fPageBase;
uint32 fFlags;
};
class Free : public GuardedHeapTraceEntry {
public:
Free(guarded_heap* heap, void* pageBase)
:
GuardedHeapTraceEntry(heap),
fPageBase(pageBase)
{
Initialized();
}
virtual void AddDump(TraceOutput& out)
{
out.Print("guarded heap free: heap: %p; page: %p", fHeap,
fPageBase);
}
private:
void* fPageBase;
};
}
# define T(x) new(std::nothrow) GuardedHeapTracing::x
#else
# define T(x)
#endif
static void
guarded_heap_page_protect(guarded_heap_area& area, size_t pageIndex,
uint32 protection)
{
if (area.area < 0)
return;
addr_t address = area.base + pageIndex * B_PAGE_SIZE;
vm_set_kernel_area_debug_protection(area.protection_cookie, (void*)address,
B_PAGE_SIZE, protection);
}
static void
guarded_heap_page_allocate(guarded_heap_area& area, size_t startPageIndex,
size_t pagesNeeded, size_t allocationSize, size_t alignment,
void* allocationBase)
{
if (pagesNeeded < 2) {
panic("need to allocate at least 2 pages, one for guard\n");
return;
}
guarded_heap_page* firstPage = NULL;
for (size_t i = 0; i < pagesNeeded; i++) {
guarded_heap_page& page = area.pages[startPageIndex + i];
page.flags = GUARDED_HEAP_PAGE_FLAG_USED;
if (i == 0) {
page.team = (gKernelStartup ? 0 : team_get_current_team_id());
page.thread = find_thread(NULL);
#if GUARDED_HEAP_STACK_TRACE_DEPTH > 0
page.stack_trace_depth = arch_debug_get_stack_trace(
page.stack_trace, GUARDED_HEAP_STACK_TRACE_DEPTH, 0, 4,
STACK_TRACE_KERNEL);
#endif
page.allocation_size = allocationSize;
page.allocation_base = allocationBase;
page.alignment = alignment;
page.flags |= GUARDED_HEAP_PAGE_FLAG_FIRST;
firstPage = &page;
} else {
page.team = firstPage->team;
page.thread = firstPage->thread;
#if GUARDED_HEAP_STACK_TRACE_DEPTH > 0
page.stack_trace_depth = 0;
#endif
page.allocation_size = allocationSize;
page.allocation_base = allocationBase;
page.alignment = alignment;
}
list_remove_item(&area.free_list, &page);
if (i == pagesNeeded - 1) {
page.flags |= GUARDED_HEAP_PAGE_FLAG_GUARD;
guarded_heap_page_protect(area, startPageIndex + i, 0);
} else {
guarded_heap_page_protect(area, startPageIndex + i,
B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
}
T(Allocate(area.heap,
(void*)(area.base + (startPageIndex + i) * B_PAGE_SIZE),
page.flags));
}
}
static void
guarded_heap_free_page(guarded_heap_area& area, size_t pageIndex,
bool force = false)
{
guarded_heap_page& page = area.pages[pageIndex];
#if DEBUG_GUARDED_HEAP_DISABLE_MEMORY_REUSE
if (force || area.area < 0)
page.flags = 0;
else
page.flags |= GUARDED_HEAP_PAGE_FLAG_DEAD;
#else
page.flags = 0;
#endif
page.allocation_size = 0;
page.team = (gKernelStartup ? 0 : team_get_current_team_id());
page.thread = find_thread(NULL);
#if GUARDED_HEAP_STACK_TRACE_DEPTH > 0
page.stack_trace_depth = arch_debug_get_stack_trace(page.stack_trace,
GUARDED_HEAP_STACK_TRACE_DEPTH, 0, 3, STACK_TRACE_KERNEL);
#endif
list_add_item(&area.free_list, &page);
guarded_heap_page_protect(area, pageIndex, 0);
T(Free(area.heap, (void*)(area.base + pageIndex * B_PAGE_SIZE)));
}
static bool
guarded_heap_pages_allocated(guarded_heap& heap, size_t pagesAllocated)
{
return (atomic_add((int32*)&heap.used_pages, pagesAllocated)
+ pagesAllocated)
>= heap.page_count - HEAP_GROW_SIZE / B_PAGE_SIZE / 2;
}
static void*
guarded_heap_area_allocate(guarded_heap_area& area, size_t size,
size_t alignment, uint32 flags, bool& grow)
{
if (alignment > B_PAGE_SIZE) {
panic("alignment of %" B_PRIuSIZE " not supported", alignment);
return NULL;
}
size_t pagesNeeded = (size + B_PAGE_SIZE - 1) / B_PAGE_SIZE + 1;
if (pagesNeeded > area.page_count - area.used_pages)
return NULL;
if (pagesNeeded > area.page_count)
return NULL;
guarded_heap_page* page
= (guarded_heap_page*)list_get_first_item(&area.free_list);
for (; page != NULL;
page = (guarded_heap_page*)list_get_next_item(&area.free_list, page)) {
if ((page->flags & GUARDED_HEAP_PAGE_FLAG_USED) != 0)
continue;
size_t pageIndex = page - area.pages;
if (pageIndex > area.page_count - pagesNeeded)
continue;
bool candidate = true;
for (size_t j = 1; j < pagesNeeded; j++) {
if ((area.pages[pageIndex + j].flags & GUARDED_HEAP_PAGE_FLAG_USED)
!= 0) {
candidate = false;
break;
}
}
if (!candidate)
continue;
if (alignment == 0)
alignment = 1;
size_t offset = size & (B_PAGE_SIZE - 1);
void* result = (void*)((area.base + pageIndex * B_PAGE_SIZE
+ (offset > 0 ? B_PAGE_SIZE - offset : 0)) & ~(alignment - 1));
guarded_heap_page_allocate(area, pageIndex, pagesNeeded, size,
alignment, result);
area.used_pages += pagesNeeded;
grow = guarded_heap_pages_allocated(*area.heap, pagesNeeded);
return result;
}
return NULL;
}
static bool
guarded_heap_area_init(guarded_heap& heap, area_id id, void* baseAddress,
size_t size, uint32 flags)
{
guarded_heap_area* area = (guarded_heap_area*)baseAddress;
area->heap = &heap;
area->area = id;
area->size = size;
area->page_count = area->size / B_PAGE_SIZE;
area->used_pages = 0;
size_t pagesNeeded = (sizeof(guarded_heap_area)
+ area->page_count * sizeof(guarded_heap_page)
+ B_PAGE_SIZE - 1) / B_PAGE_SIZE;
area->page_count -= pagesNeeded;
area->size = area->page_count * B_PAGE_SIZE;
area->base = (addr_t)baseAddress + pagesNeeded * B_PAGE_SIZE;
if (area->area >= 0 && vm_prepare_kernel_area_debug_protection(area->area,
&area->protection_cookie) != B_OK) {
return false;
}
mutex_init(&area->lock, "guarded_heap_area_lock");
list_init_etc(&area->free_list,
offsetof(guarded_heap_page, free_list_link));
for (size_t i = 0; i < area->page_count; i++)
guarded_heap_free_page(*area, i, true);
WriteLocker areaListWriteLocker(heap.lock);
area->next = heap.areas;
heap.areas = area;
heap.page_count += area->page_count;
return true;
}
static bool
guarded_heap_area_create(guarded_heap& heap, uint32 flags)
{
for (size_t trySize = HEAP_GROW_SIZE; trySize >= 1 * 1024 * 1024;
trySize /= 2) {
void* baseAddress = NULL;
area_id id = create_area("guarded_heap_area", &baseAddress,
B_ANY_KERNEL_ADDRESS, trySize, B_FULL_LOCK,
B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
if (id < 0)
continue;
if (guarded_heap_area_init(heap, id, baseAddress, trySize, flags))
return true;
delete_area(id);
}
panic("failed to allocate a new heap area");
return false;
}
static bool
guarded_heap_add_area(guarded_heap& heap, int32 counter, uint32 flags)
{
if ((flags & (HEAP_DONT_LOCK_KERNEL_SPACE | HEAP_DONT_WAIT_FOR_MEMORY))
!= 0) {
return false;
}
if (atomic_test_and_set(&heap.area_creation_counter,
counter + 1, counter) == counter) {
return guarded_heap_area_create(heap, flags);
}
return false;
}
static void*
guarded_heap_allocate(guarded_heap& heap, size_t size, size_t alignment,
uint32 flags)
{
bool grow = false;
void* result = NULL;
ReadLocker areaListReadLocker(heap.lock);
for (guarded_heap_area* area = heap.areas; area != NULL;
area = area->next) {
MutexLocker locker(area->lock);
result = guarded_heap_area_allocate(*area, size, alignment, flags,
grow);
if (result != NULL)
break;
}
int32 counter = atomic_get(&heap.area_creation_counter);
areaListReadLocker.Unlock();
if (result == NULL || grow) {
bool added = guarded_heap_add_area(heap, counter, flags);
if (result == NULL && added)
return guarded_heap_allocate(heap, size, alignment, flags);
}
if (result == NULL)
panic("ran out of memory");
return result;
}
static guarded_heap_area*
guarded_heap_get_locked_area_for(guarded_heap& heap, void* address)
{
ReadLocker areaListReadLocker(heap.lock);
for (guarded_heap_area* area = heap.areas; area != NULL;
area = area->next) {
if ((addr_t)address < area->base)
continue;
if ((addr_t)address >= area->base + area->size)
continue;
mutex_lock(&area->lock);
return area;
}
panic("guarded heap area for address %p not found", address);
return NULL;
}
static size_t
guarded_heap_area_page_index_for(guarded_heap_area& area, void* address)
{
size_t pageIndex = ((addr_t)address - area.base) / B_PAGE_SIZE;
guarded_heap_page& page = area.pages[pageIndex];
if ((page.flags & GUARDED_HEAP_PAGE_FLAG_USED) == 0) {
panic("tried to free %p which points at page %" B_PRIuSIZE
" which is not marked in use", address, pageIndex);
return area.page_count;
}
if ((page.flags & GUARDED_HEAP_PAGE_FLAG_GUARD) != 0) {
panic("tried to free %p which points at page %" B_PRIuSIZE
" which is a guard page", address, pageIndex);
return area.page_count;
}
if ((page.flags & GUARDED_HEAP_PAGE_FLAG_FIRST) == 0) {
panic("tried to free %p which points at page %" B_PRIuSIZE
" which is not an allocation first page", address, pageIndex);
return area.page_count;
}
if ((page.flags & GUARDED_HEAP_PAGE_FLAG_DEAD) != 0) {
panic("tried to free %p which points at page %" B_PRIuSIZE
" which is a dead page", address, pageIndex);
return area.page_count;
}
return pageIndex;
}
static void
guarded_heap_area_free(guarded_heap_area& area, void* address, uint32 flags)
{
size_t pageIndex = guarded_heap_area_page_index_for(area, address);
if (pageIndex >= area.page_count)
return;
size_t pagesFreed = 0;
guarded_heap_page* page = &area.pages[pageIndex];
while ((page->flags & GUARDED_HEAP_PAGE_FLAG_GUARD) == 0) {
guarded_heap_free_page(area, pageIndex);
pagesFreed++;
pageIndex++;
page = &area.pages[pageIndex];
}
guarded_heap_free_page(area, pageIndex);
pagesFreed++;
#if !DEBUG_GUARDED_HEAP_DISABLE_MEMORY_REUSE
area.used_pages -= pagesFreed;
atomic_add((int32*)&area.heap->used_pages, -pagesFreed);
#endif
}
static void
guarded_heap_free(void* address, uint32 flags)
{
if (address == NULL)
return;
guarded_heap_area* area = guarded_heap_get_locked_area_for(sGuardedHeap,
address);
if (area == NULL)
return;
MutexLocker locker(area->lock, true);
guarded_heap_area_free(*area, address, flags);
}
static void*
guarded_heap_realloc(void* address, size_t newSize, uint32 flags)
{
guarded_heap_area* area = guarded_heap_get_locked_area_for(sGuardedHeap,
address);
if (area == NULL)
return NULL;
MutexLocker locker(area->lock, true);
size_t pageIndex = guarded_heap_area_page_index_for(*area, address);
if (pageIndex >= area->page_count)
return NULL;
guarded_heap_page& page = area->pages[pageIndex];
size_t oldSize = page.allocation_size;
locker.Unlock();
if (oldSize == newSize)
return address;
void* newBlock = malloc_etc(newSize, flags);
if (newBlock == NULL)
return NULL;
memcpy(newBlock, address, min_c(oldSize, newSize));
free_etc(address, flags);
return newBlock;
}
static void
dump_guarded_heap_stack_trace(guarded_heap_page& page)
{
#if GUARDED_HEAP_STACK_TRACE_DEPTH > 0
kprintf("stack trace:\n");
for (size_t i = 0; i < page.stack_trace_depth; i++) {
addr_t address = page.stack_trace[i];
const char* symbol;
const char* imageName;
bool exactMatch;
addr_t baseAddress;
if (elf_debug_lookup_symbol_address(address, &baseAddress, &symbol,
&imageName, &exactMatch) == B_OK) {
kprintf(" %p %s + 0x%lx (%s)%s\n", (void*)address, symbol,
address - baseAddress, imageName,
exactMatch ? "" : " (nearest)");
} else
kprintf(" %p\n", (void*)address);
}
#endif
}
static int
dump_guarded_heap_page(int argc, char** argv)
{
if (argc != 2) {
print_debugger_command_usage(argv[0]);
return 0;
}
addr_t address = parse_expression(argv[1]);
guarded_heap_area* area = NULL;
for (guarded_heap_area* candidate = sGuardedHeap.areas; candidate != NULL;
candidate = candidate->next) {
if (address < candidate->base)
continue;
if (address >= candidate->base + candidate->size)
continue;
area = candidate;
break;
}
if (area == NULL) {
kprintf("didn't find area for address\n");
return 1;
}
size_t pageIndex = ((addr_t)address - area->base) / B_PAGE_SIZE;
guarded_heap_page& page = area->pages[pageIndex];
kprintf("page index: %" B_PRIuSIZE "\n", pageIndex);
kprintf("flags:");
if ((page.flags & GUARDED_HEAP_PAGE_FLAG_USED) != 0)
kprintf(" used");
if ((page.flags & GUARDED_HEAP_PAGE_FLAG_FIRST) != 0)
kprintf(" first");
if ((page.flags & GUARDED_HEAP_PAGE_FLAG_GUARD) != 0)
kprintf(" guard");
if ((page.flags & GUARDED_HEAP_PAGE_FLAG_DEAD) != 0)
kprintf(" dead");
kprintf("\n");
kprintf("allocation size: %" B_PRIuSIZE "\n", page.allocation_size);
kprintf("allocation base: %p\n", page.allocation_base);
kprintf("alignment: %" B_PRIuSIZE "\n", page.alignment);
kprintf("allocating team: %" B_PRId32 "\n", page.team);
kprintf("allocating thread: %" B_PRId32 "\n", page.thread);
dump_guarded_heap_stack_trace(page);
return 0;
}
static int
dump_guarded_heap_area(int argc, char** argv)
{
if (argc != 2) {
print_debugger_command_usage(argv[0]);
return 0;
}
addr_t address = parse_expression(argv[1]);
guarded_heap_area* area = NULL;
for (guarded_heap_area* candidate = sGuardedHeap.areas; candidate != NULL;
candidate = candidate->next) {
if ((addr_t)candidate != address) {
if (address < candidate->base)
continue;
if (address >= candidate->base + candidate->size)
continue;
}
area = candidate;
break;
}
if (area == NULL) {
kprintf("didn't find area for address\n");
return 1;
}
kprintf("guarded heap area: %p\n", area);
kprintf("next heap area: %p\n", area->next);
kprintf("guarded heap: %p\n", area->heap);
kprintf("area id: %" B_PRId32 "\n", area->area);
kprintf("base: 0x%" B_PRIxADDR "\n", area->base);
kprintf("size: %" B_PRIuSIZE "\n", area->size);
kprintf("page count: %" B_PRIuSIZE "\n", area->page_count);
kprintf("used pages: %" B_PRIuSIZE "\n", area->used_pages);
kprintf("protection cookie: %p\n", area->protection_cookie);
kprintf("lock: %p\n", &area->lock);
size_t freeCount = 0;
void* item = list_get_first_item(&area->free_list);
while (item != NULL) {
freeCount++;
if ((((guarded_heap_page*)item)->flags & GUARDED_HEAP_PAGE_FLAG_USED)
!= 0) {
kprintf("free list broken, page %p not actually free\n", item);
}
item = list_get_next_item(&area->free_list, item);
}
kprintf("free_list: %p (%" B_PRIuSIZE " free)\n", &area->free_list,
freeCount);
freeCount = 0;
size_t runLength = 0;
size_t longestRun = 0;
for (size_t i = 0; i <= area->page_count; i++) {
guarded_heap_page& page = area->pages[i];
if (i == area->page_count
|| (page.flags & GUARDED_HEAP_PAGE_FLAG_USED) != 0) {
freeCount += runLength;
if (runLength > longestRun)
longestRun = runLength;
runLength = 0;
continue;
}
runLength = 1;
for (size_t j = 1; j < area->page_count - i; j++) {
if ((area->pages[i + j].flags & GUARDED_HEAP_PAGE_FLAG_USED) != 0)
break;
runLength++;
}
i += runLength - 1;
}
kprintf("longest free run: %" B_PRIuSIZE " (%" B_PRIuSIZE " free)\n",
longestRun, freeCount);
kprintf("pages: %p\n", area->pages);
return 0;
}
static int
dump_guarded_heap(int argc, char** argv)
{
guarded_heap* heap = &sGuardedHeap;
if (argc != 1) {
if (argc == 2)
heap = (guarded_heap*)parse_expression(argv[1]);
else {
print_debugger_command_usage(argv[0]);
return 0;
}
}
kprintf("guarded heap: %p\n", heap);
kprintf("rw lock: %p\n", &heap->lock);
kprintf("page count: %" B_PRIuSIZE "\n", heap->page_count);
kprintf("used pages: %" B_PRIuSIZE "\n", heap->used_pages);
kprintf("area creation counter: %" B_PRId32 "\n",
heap->area_creation_counter);
size_t areaCount = 0;
guarded_heap_area* area = heap->areas;
while (area != NULL) {
areaCount++;
area = area->next;
}
kprintf("areas: %p (%" B_PRIuSIZE ")\n", heap->areas, areaCount);
return 0;
}
static int
dump_guarded_heap_allocations(int argc, char** argv)
{
team_id team = -1;
thread_id thread = -1;
addr_t address = 0;
bool statsOnly = false;
bool stackTrace = false;
for (int32 i = 1; i < argc; i++) {
if (strcmp(argv[i], "team") == 0)
team = parse_expression(argv[++i]);
else if (strcmp(argv[i], "thread") == 0)
thread = parse_expression(argv[++i]);
else if (strcmp(argv[i], "address") == 0)
address = parse_expression(argv[++i]);
else if (strcmp(argv[i], "stats") == 0)
statsOnly = true;
#if GUARDED_HEAP_STACK_TRACE_DEPTH > 0
else if (strcmp(argv[i], "trace") == 0)
stackTrace = true;
#endif
else {
print_debugger_command_usage(argv[0]);
return 0;
}
}
size_t totalSize = 0;
uint32 totalCount = 0;
guarded_heap_area* area = sGuardedHeap.areas;
while (area != NULL) {
for (size_t i = 0; i < area->page_count; i++) {
guarded_heap_page& page = area->pages[i];
if ((page.flags & GUARDED_HEAP_PAGE_FLAG_FIRST) == 0)
continue;
if ((team < 0 || page.team == team)
&& (thread < 0 || page.thread == thread)
&& (address == 0 || (addr_t)page.allocation_base == address)) {
if (!statsOnly) {
kprintf("team: % 6" B_PRId32 "; thread: % 6" B_PRId32 "; "
"address: 0x%08" B_PRIxADDR "; size: %" B_PRIuSIZE
" bytes\n", page.team, page.thread,
(addr_t)page.allocation_base, page.allocation_size);
if (stackTrace)
dump_guarded_heap_stack_trace(page);
}
totalSize += page.allocation_size;
totalCount++;
}
}
area = area->next;
}
kprintf("total allocations: %" B_PRIu32 "; total bytes: %" B_PRIuSIZE
"\n", totalCount, totalSize);
return 0;
}
status_t
heap_init(addr_t address, size_t size)
{
return guarded_heap_area_init(sGuardedHeap, -1, (void*)address, size, 0)
? B_OK : B_ERROR;
}
status_t
heap_init_post_area()
{
return B_OK;
}
status_t
heap_init_post_sem()
{
for (guarded_heap_area* area = sGuardedHeap.areas; area != NULL;
area = area->next) {
if (area->area >= 0)
continue;
area_id id = area_for((void*)area->base);
if (id < 0 || vm_prepare_kernel_area_debug_protection(id,
&area->protection_cookie) != B_OK) {
panic("failed to prepare initial guarded heap for protection");
continue;
}
area->area = id;
for (size_t i = 0; i < area->page_count; i++) {
guarded_heap_page& page = area->pages[i];
if ((page.flags & GUARDED_HEAP_PAGE_FLAG_USED) != 0
&& (page.flags & GUARDED_HEAP_PAGE_FLAG_GUARD) == 0
&& (page.flags & GUARDED_HEAP_PAGE_FLAG_DEAD) == 0) {
guarded_heap_page_protect(*area, i,
B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
} else
guarded_heap_page_protect(*area, i, 0);
}
}
add_debugger_command("guarded_heap", &dump_guarded_heap,
"Dump info about the guarded heap");
add_debugger_command_etc("guarded_heap_area", &dump_guarded_heap_area,
"Dump info about a guarded heap area",
"<address>\nDump info about guarded heap area containing address.\n",
0);
add_debugger_command_etc("guarded_heap_page", &dump_guarded_heap_page,
"Dump info about a guarded heap page",
"<address>\nDump info about guarded heap page containing address.\n",
0);
add_debugger_command_etc("allocations", &dump_guarded_heap_allocations,
"Dump current heap allocations",
#if GUARDED_HEAP_STACK_TRACE_DEPTH == 0
"[\"stats\"] [team] [thread] [address]\n"
#else
"[\"stats\"|\"trace\"] [team] [thread] [address]\n"
#endif
"If no parameters are given, all current alloactions are dumped.\n"
"If the optional argument \"stats\" is specified, only the allocation\n"
"counts and no individual allocations are printed.\n"
#if GUARDED_HEAP_STACK_TRACE_DEPTH > 0
"If the optional argument \"trace\" is specified, a stack trace for\n"
"each allocation is printed.\n"
#endif
"If a specific allocation address is given, only this allocation is\n"
"dumped.\n"
"If a team and/or thread is specified, only allocations of this\n"
"team/thread are dumped.\n", 0);
return B_OK;
}
void*
memalign(size_t alignment, size_t size)
{
return memalign_etc(alignment, size, 0);
}
void *
memalign_etc(size_t alignment, size_t size, uint32 flags)
{
if (size == 0)
size = 1;
return guarded_heap_allocate(sGuardedHeap, size, alignment, flags);
}
extern "C" int
posix_memalign(void** _pointer, size_t alignment, size_t size)
{
if ((alignment & (sizeof(void*) - 1)) != 0 || _pointer == NULL)
return B_BAD_VALUE;
*_pointer = guarded_heap_allocate(sGuardedHeap, size, alignment, 0);
return 0;
}
void
free_etc(void *address, uint32 flags)
{
guarded_heap_free(address, flags);
}
void*
malloc(size_t size)
{
return memalign_etc(0, size, 0);
}
void
free(void* address)
{
free_etc(address, 0);
}
void*
realloc_etc(void* address, size_t newSize, uint32 flags)
{
if (newSize == 0) {
free_etc(address, flags);
return NULL;
}
if (address == NULL)
return malloc_etc(newSize, flags);
return guarded_heap_realloc(address, newSize, flags);
}
void*
realloc(void* address, size_t newSize)
{
return realloc_etc(address, newSize, 0);
}
#endif
#if USE_GUARDED_HEAP_FOR_OBJECT_CACHE
struct ObjectCache {
size_t object_size;
size_t alignment;
void* cookie;
object_cache_constructor constructor;
object_cache_destructor destructor;
};
object_cache*
create_object_cache(const char* name, size_t object_size, uint32 flags)
{
return create_object_cache_etc(name, object_size, 0, 0, 0, 0, flags,
NULL, NULL, NULL, NULL);
}
object_cache*
create_object_cache_etc(const char*, size_t objectSize, size_t alignment, size_t, size_t,
size_t, uint32, void* cookie, object_cache_constructor ctor, object_cache_destructor dtor,
object_cache_reclaimer)
{
ObjectCache* cache = new ObjectCache;
if (cache == NULL)
return NULL;
cache->object_size = objectSize;
cache->alignment = alignment;
cache->cookie = cookie;
cache->constructor = ctor;
cache->destructor = dtor;
return cache;
}
void
delete_object_cache(object_cache* cache)
{
delete cache;
}
status_t
object_cache_set_minimum_reserve(object_cache* cache, size_t objectCount)
{
return B_OK;
}
void*
object_cache_alloc(object_cache* cache, uint32 flags)
{
void* object = memalign_etc(cache->alignment, cache->object_size, flags);
if (object == NULL)
return NULL;
if (cache->constructor != NULL)
cache->constructor(cache->cookie, object);
return object;
}
void
object_cache_free(object_cache* cache, void* object, uint32 flags)
{
if (cache->destructor != NULL)
cache->destructor(cache->cookie, object);
return free_etc(object, flags);
}
status_t
object_cache_reserve(object_cache* cache, size_t objectCount, uint32 flags)
{
return B_OK;
}
void
object_cache_get_usage(object_cache* cache, size_t* _allocatedMemory)
{
*_allocatedMemory = 0;
}
void
request_memory_manager_maintenance()
{
}
void
slab_init(kernel_args* args)
{
}
void
slab_init_post_area()
{
}
void
slab_init_post_sem()
{
}
void
slab_init_post_thread()
{
}
#endif
@@ -24,6 +24,7 @@
font_big.cpp
frame_buffer_console.cpp
gdb.cpp
guarded_heap.cpp
safemode_settings.cpp
system_profiler.cpp
tracing.cpp
@@ -1,0 +1,1027 @@
/*
* Copyright 2011-2020, Michael Lotz <mmlr@mlotz.ch>.
* Copyright 2025, Haiku, Inc. All rights reserved.
* Distributed under the terms of the MIT License.
*/
#include <stdio.h>
#include <string.h>
#include <arch/debug.h>
#include <elf.h>
#include <debug.h>
#include <heap.h>
#include <malloc.h>
#include <slab/Slab.h>
#include <team.h>
#include <util/SimpleAllocator.h>
#include <util/AutoLock.h>
#include <vm/VMAddressSpace.h>
#include <vm/vm.h>
#include <vm/vm_page.h>
#include "../vm/VMAddressSpaceLocking.h"
#include "../vm/VMAnonymousNoSwapCache.h"
#if USE_GUARDED_HEAP_FOR_MALLOC
#define GUARDED_HEAP_STACK_TRACE_DEPTH 0
struct GuardedHeapChunk : public SplayTreeLink<GuardedHeapChunk> {
GuardedHeapChunk* tree_list_link;
addr_t base;
size_t pages_count;
addr_t allocation_base;
size_t allocation_size;
size_t alignment;
team_id team;
thread_id thread;
#if GUARDED_HEAP_STACK_TRACE_DEPTH > 0
size_t stack_trace_depth;
addr_t stack_trace[GUARDED_HEAP_STACK_TRACE_DEPTH];
#endif
};
struct GuardedHeapChunksTreeDefinition {
typedef addr_t KeyType;
typedef GuardedHeapChunk NodeType;
static addr_t GetKey(const GuardedHeapChunk* node)
{
return node->base;
}
static SplayTreeLink<GuardedHeapChunk>* GetLink(GuardedHeapChunk* node)
{
return node;
}
static int Compare(const addr_t& key, const GuardedHeapChunk* node)
{
if (key == node->base)
return 0;
return (key < node->base) ? -1 : 1;
}
static GuardedHeapChunk** GetListLink(GuardedHeapChunk* node)
{
return &node->tree_list_link;
}
};
typedef IteratableSplayTree<GuardedHeapChunksTreeDefinition> GuardedHeapChunksTree;
class GuardedHeapCache final : public VMAnonymousNoSwapCache {
public:
status_t Init()
{
return VMAnonymousNoSwapCache::Init(false, 0, 0, 0);
}
status_t Fault(VMAddressSpace* aspace, off_t offset) override;
protected:
virtual void DeleteObject() override
{
ASSERT_UNREACHABLE();
}
};
struct guarded_heap {
mutex lock;
ConditionVariable memory_added_condition;
GuardedHeapCache* cache;
SimpleAllocator<> meta_allocator;
GuardedHeapChunksTree dead_chunks;
GuardedHeapChunksTree free_chunks;
GuardedHeapChunksTree live_chunks;
addr_t last_allocated;
size_t free_pages_count;
thread_id acquiring_pages;
thread_id acquiring_meta;
};
static GuardedHeapCache sGuardedHeapCache;
static guarded_heap sGuardedHeap = {
MUTEX_INITIALIZER("guarded heap lock")
};
static addr_t sGuardedHeapEarlyMetaBase, sGuardedHeapEarlyBase;
static size_t sGuardedHeapEarlySize;
static void*
guarded_heap_allocate_meta(guarded_heap& heap, size_t size, uint32 flags)
{
size_t growSize = ROUNDUP(((HEAP_GROW_SIZE / B_PAGE_SIZE) / 2) * sizeof(GuardedHeapChunk),
B_PAGE_SIZE);
while (heap.meta_allocator.Available() < (growSize / 2)) {
if ((flags & HEAP_DONT_LOCK_KERNEL_SPACE) != 0)
break;
if (heap.acquiring_meta == thread_get_current_thread_id())
break;
bool waited = false;
while (heap.acquiring_meta >= 0) {
heap.memory_added_condition.Wait(&heap.lock);
waited = true;
}
if (waited)
continue;
heap.acquiring_meta = thread_get_current_thread_id();
mutex_unlock(&heap.lock);
void* meta = NULL;
create_area("guarded heap meta", &meta, B_ANY_KERNEL_ADDRESS, growSize, B_FULL_LOCK,
B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
if (meta == NULL)
panic("guarded_heap: failed to allocate meta area");
mutex_lock(&heap.lock);
heap.meta_allocator.AddChunk(meta, growSize);
heap.acquiring_meta = -1;
heap.memory_added_condition.NotifyAll();
}
void* meta = heap.meta_allocator.Allocate(size);
memset(meta, 0, size);
return meta;
}
static bool
guarded_heap_add_area(guarded_heap& heap, size_t minimumPages, uint32 flags)
{
if (heap.cache == NULL) {
panic("guarded_heap_add_area: too early in the boot!");
return false;
}
if ((flags & HEAP_DONT_LOCK_KERNEL_SPACE) != 0)
return false;
if (heap.acquiring_pages == thread_get_current_thread_id())
return false;
size_t growPages = HEAP_GROW_SIZE / B_PAGE_SIZE;
if (minimumPages > growPages)
growPages = minimumPages;
while (heap.acquiring_pages >= 0)
heap.memory_added_condition.Wait(&heap.lock);
if (minimumPages == 0 && heap.free_pages_count >= growPages) {
return true;
}
GuardedHeapChunk* chunk = (GuardedHeapChunk*)
guarded_heap_allocate_meta(heap, sizeof(GuardedHeapChunk), flags);
heap.acquiring_pages = thread_get_current_thread_id();
mutex_unlock(&heap.lock);
VMAddressSpace* addressSpace = VMAddressSpace::Kernel();
AddressSpaceWriteLocker aspaceLocker(addressSpace, true);
VMArea* area = addressSpace->CreateArea("guarded heap area", B_LAZY_LOCK,
B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, flags | HEAP_PRIORITY_VIP);
VMAreas::Insert(area);
heap.cache->Lock();
heap.cache->InsertAreaLocked(area);
area->cache = heap.cache;
heap.cache->Unlock();
void* address = NULL;
virtual_address_restrictions restrictions = {};
if (addressSpace->InsertArea(area, growPages * B_PAGE_SIZE,
&restrictions, 0, &address) != B_OK) {
panic("guarded_heap: out of virtual memory");
return false;
}
area->cache_offset = area->Base();
aspaceLocker.Unlock();
mutex_lock(&heap.lock);
chunk->base = area->Base();
chunk->pages_count = area->Size() / B_PAGE_SIZE;
heap.free_chunks.Insert(chunk);
heap.free_pages_count += chunk->pages_count;
heap.acquiring_pages = -1;
heap.memory_added_condition.NotifyAll();
return true;
}
static GuardedHeapChunk*
guarded_heap_find_chunk(GuardedHeapChunksTree& tree, addr_t address)
{
GuardedHeapChunk* chunk = tree.FindClosest((addr_t)address, false, true);
if (chunk == NULL)
return chunk;
if (address >= chunk->base && address
< (chunk->base + chunk->pages_count * B_PAGE_SIZE))
return chunk;
return NULL;
}
static void*
guarded_heap_allocate(guarded_heap& heap, size_t size, size_t alignment,
uint32 flags)
{
MutexLocker locker(heap.lock);
if (alignment == 0)
alignment = 1;
if (heap.free_pages_count <= (HEAP_GROW_SIZE / B_PAGE_SIZE / 2))
guarded_heap_add_area(heap, 0, flags);
GuardedHeapChunk* spareChunk = (GuardedHeapChunk*)
guarded_heap_allocate_meta(heap, sizeof(GuardedHeapChunk), flags);
const size_t guardPages = 1;
const size_t alignedSize = ROUNDUP(size, alignment);
const size_t neededPages = ((alignedSize + B_PAGE_SIZE - 1) / B_PAGE_SIZE) + guardPages;
addr_t searchBase = heap.last_allocated;
GuardedHeapChunk* freeChunk = NULL;
bool restarted = false;
while (freeChunk == NULL) {
freeChunk = heap.free_chunks.FindClosest(searchBase, true, false);
while (freeChunk != NULL && freeChunk->pages_count < neededPages)
freeChunk = freeChunk->tree_list_link;
if (freeChunk != NULL)
break;
if (freeChunk == NULL && !restarted) {
searchBase = 0;
restarted = true;
continue;
}
if (!guarded_heap_add_area(heap, neededPages, flags)) {
heap.meta_allocator.Free(spareChunk);
return NULL;
}
searchBase = heap.last_allocated;
restarted = false;
}
GuardedHeapChunk* chunk = NULL;
if (freeChunk->pages_count > neededPages) {
chunk = spareChunk;
chunk->base = freeChunk->base;
chunk->pages_count = neededPages;
freeChunk->base += neededPages * B_PAGE_SIZE;
freeChunk->pages_count -= neededPages;
} else {
heap.meta_allocator.Free(spareChunk);
heap.free_chunks.Remove(freeChunk);
chunk = freeChunk;
}
heap.free_pages_count -= chunk->pages_count;
chunk->allocation_size = size;
chunk->alignment = alignment;
chunk->allocation_base = chunk->base
+ (chunk->pages_count - guardPages) * B_PAGE_SIZE
- size;
if (alignment > 1) {
chunk->allocation_base -= (chunk->allocation_base % alignment);
ASSERT(chunk->allocation_base >= chunk->base);
}
#if GUARDED_HEAP_STACK_TRACE_DEPTH > 0
allocatingChunk->stack_trace_depth = arch_debug_get_stack_trace(
allocatingChunk->stack_trace, GUARDED_HEAP_STACK_TRACE_DEPTH,
0, 3, STACK_TRACE_KERNEL);
#endif
if (heap.cache == NULL) {
heap.live_chunks.Insert(chunk);
heap.last_allocated = chunk->allocation_base;
return (void*)chunk->allocation_base;
}
chunk->team = (gKernelStartup ? 0 : team_get_current_team_id());
chunk->thread = (gKernelStartup ? 0 : thread_get_current_thread_id());
size_t mapPages = neededPages - guardPages;
if (vm_try_reserve_memory(mapPages * B_PAGE_SIZE, VM_PRIORITY_SYSTEM,
(flags & HEAP_DONT_WAIT_FOR_MEMORY) != 0 ? 0 : 1000000) != B_OK) {
panic("out of memory");
return NULL;
}
VMTranslationMap* map = VMAddressSpace::Kernel()->TranslationMap();
size_t toMap = map->MaxPagesNeededToMap(chunk->base,
chunk->base + mapPages * B_PAGE_SIZE);
vm_page_reservation reservation = {};
vm_page_reserve_pages(&reservation, mapPages + toMap, VM_PRIORITY_SYSTEM);
heap.cache->Lock();
map->Lock();
for (size_t i = 0; i < mapPages; i++) {
addr_t mapAddress = chunk->base + i * B_PAGE_SIZE;
vm_page* page = vm_page_allocate_page(&reservation,
PAGE_STATE_WIRED | VM_PAGE_ALLOC_CLEAR);
heap.cache->InsertPage(page, mapAddress);
map->Map(mapAddress, page->physical_page_number * B_PAGE_SIZE,
B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA,
heap.cache->areas.First()->MemoryType(), &reservation);
page->IncrementWiredCount();
DEBUG_PAGE_ACCESS_END(page);
}
map->Unlock();
heap.cache->Unlock();
vm_page_unreserve_pages(&reservation);
heap.live_chunks.Insert(chunk);
heap.last_allocated = chunk->allocation_base;
return (void*)chunk->allocation_base;
}
static bool
guarded_heap_free_page(guarded_heap& heap, addr_t pageAddress,
vm_page_reservation* reservation = NULL)
{
vm_page* page = heap.cache->LookupPage(pageAddress);
if (page == NULL)
return false;
DEBUG_PAGE_ACCESS_START(page);
page->DecrementWiredCount();
heap.cache->RemovePage(page);
vm_page_free_etc(heap.cache, page, reservation);
return true;
}
static void
guarded_heap_free(guarded_heap& heap, void* address, uint32 flags)
{
if (address == NULL)
return;
MutexLocker locker(heap.lock);
GuardedHeapChunk* chunk = guarded_heap_find_chunk(heap.live_chunks, (addr_t)address);
if (chunk == NULL) {
GuardedHeapChunk* deadChunk = guarded_heap_find_chunk(heap.dead_chunks, (addr_t)address);
GuardedHeapChunk* freeChunk = guarded_heap_find_chunk(heap.free_chunks, (addr_t)address);
if (deadChunk != NULL || freeChunk != NULL) {
chunk = deadChunk != NULL ? deadChunk : freeChunk;
panic("tried to free %p, which is a %s chunk (last accessor: team %d, thread %d)",
address, chunk == deadChunk ? "dead" : "free", chunk->team, chunk->thread);
} else {
panic("tried to free %p, but can't find a heap chunk for it", address);
}
return;
} else {
heap.live_chunks.Remove(chunk);
}
if (chunk->allocation_base != (addr_t)address) {
panic("tried to free %p, but allocation base is really %p",
address, (void*)chunk->allocation_base);
}
if (heap.cache == NULL) {
heap.dead_chunks.Insert(chunk);
return;
}
VMTranslationMap* map = VMAddressSpace::Kernel()->TranslationMap();
map->Lock();
map->Unmap(chunk->base, chunk->base
+ chunk->pages_count * B_PAGE_SIZE);
map->Unlock();
heap.cache->Lock();
vm_page_reservation reservation = {};
for (size_t i = 0; i < chunk->pages_count; i++)
guarded_heap_free_page(heap, chunk->base + i * B_PAGE_SIZE, &reservation);
heap.cache->Unlock();
vm_unreserve_memory(reservation.count * B_PAGE_SIZE);
vm_page_unreserve_pages(&reservation);
#if DEBUG_GUARDED_HEAP_DISABLE_MEMORY_REUSE
GuardedHeapChunksTree& tree = heap.dead_chunks;
#else
GuardedHeapChunksTree& tree = heap.free_chunks;
heap.free_pages_count += chunk->pages_count;
#endif
GuardedHeapChunk* joinLower = guarded_heap_find_chunk(tree, chunk->base - 1),
*joinUpper = guarded_heap_find_chunk(tree,
chunk->base + chunk->pages_count * B_PAGE_SIZE);
if (joinLower != NULL) {
joinLower->pages_count += chunk->pages_count;
joinLower->allocation_base = chunk->allocation_base;
joinLower->allocation_size = chunk->allocation_size;
joinLower->alignment = chunk->alignment;
heap.meta_allocator.Free(chunk);
chunk = joinLower;
} else {
tree.Insert(chunk);
}
if (joinUpper != NULL) {
tree.Remove(joinUpper);
chunk->pages_count += joinUpper->pages_count;
heap.meta_allocator.Free(joinUpper);
}
chunk->team = (gKernelStartup ? 0 : team_get_current_team_id());
chunk->thread = (gKernelStartup ? 0 : thread_get_current_thread_id());
#if GUARDED_HEAP_STACK_TRACE_DEPTH > 0
chunk->stack_trace_depth = arch_debug_get_stack_trace(
chunk->stack_trace, GUARDED_HEAP_STACK_TRACE_DEPTH,
0, 3, STACK_TRACE_KERNEL);
#endif
}
static void*
guarded_heap_realloc(guarded_heap& heap, void* address, size_t newSize, uint32 flags)
{
MutexLocker locker(heap.lock);
GuardedHeapChunk* chunk = guarded_heap_find_chunk(heap.live_chunks, (addr_t)address);
if (chunk == NULL) {
panic("realloc(%p): no such allocation", address);
return NULL;
}
if ((addr_t)address != chunk->allocation_base) {
panic("realloc(%p): chunk base is really %p", address,
(void*)chunk->allocation_base);
}
size_t oldSize = chunk->allocation_size;
locker.Unlock();
if (oldSize == newSize)
return address;
void* newBlock = malloc_etc(newSize, flags);
if (newBlock == NULL)
return NULL;
memcpy(newBlock, address, min_c(oldSize, newSize));
free_etc(address, flags);
return newBlock;
}
status_t
GuardedHeapCache::Fault(VMAddressSpace* aspace, off_t offset)
{
panic("guarded_heap: invalid access to %p @! guarded_heap_chunk %p",
(void*)offset, (void*)offset);
return B_BAD_ADDRESS;
}
static void
dump_guarded_heap_stack_trace(GuardedHeapChunk& chunk)
{
#if GUARDED_HEAP_STACK_TRACE_DEPTH > 0
kprintf("stack trace:\n");
for (size_t i = 0; i < chunk.stack_trace_depth; i++) {
addr_t address = chunk.stack_trace[i];
const char* symbol;
const char* imageName;
bool exactMatch;
addr_t baseAddress;
if (elf_debug_lookup_symbol_address(address, &baseAddress, &symbol,
&imageName, &exactMatch) == B_OK) {
kprintf(" %p %s + 0x%lx (%s)%s\n", (void*)address, symbol,
address - baseAddress, imageName,
exactMatch ? "" : " (nearest)");
} else
kprintf(" %p\n", (void*)address);
}
#endif
}
static int
dump_guarded_heap_chunk(int argc, char** argv)
{
if (argc != 2) {
print_debugger_command_usage(argv[0]);
return 0;
}
addr_t address = parse_expression(argv[1]);
const char* state = NULL;
const char* prefix = "last ";
GuardedHeapChunk* chunk = guarded_heap_find_chunk(sGuardedHeap.live_chunks, address);
if (chunk != NULL) {
state = "live";
prefix = "";
} else {
chunk = guarded_heap_find_chunk(sGuardedHeap.free_chunks, address);
if (chunk != NULL) {
state = "free";
} else {
chunk = guarded_heap_find_chunk(sGuardedHeap.dead_chunks, address);
if (chunk != NULL)
state = "dead";
}
}
if (chunk == NULL) {
kprintf("didn't find chunk for address\n");
return 1;
}
kprintf("address %p: %s chunk %p\n", (void*)address, state, chunk);
kprintf("base: %p\n", (void*)chunk->base);
kprintf("pages count: %" B_PRIuSIZE "\n", chunk->pages_count);
kprintf("%sallocation size: %" B_PRIuSIZE "\n", prefix, chunk->allocation_size);
kprintf("%sallocation base: %p\n", prefix, (void*)chunk->allocation_base);
kprintf("%salignment: %" B_PRIuSIZE "\n", prefix, chunk->alignment);
kprintf("%sallocating team: %" B_PRId32 "\n", prefix, chunk->team);
kprintf("%sallocating thread: %" B_PRId32 "\n", prefix, chunk->thread);
dump_guarded_heap_stack_trace(*chunk);
return 0;
}
static int
dump_guarded_heap(int argc, char** argv)
{
guarded_heap* heap = &sGuardedHeap;
if (argc != 1) {
if (argc == 2)
heap = (guarded_heap*)parse_expression(argv[1]);
else {
print_debugger_command_usage(argv[0]);
return 0;
}
}
kprintf("guarded heap: %p\n", heap);
kprintf("rw lock: %p\n", &heap->lock);
kprintf("page count: %" B_PRIu32 "\n", heap->cache->page_count);
return 0;
}
static int
dump_guarded_heap_allocations(int argc, char** argv)
{
team_id team = -1;
thread_id thread = -1;
addr_t address = 0;
bool statsOnly = false;
bool stackTrace = false;
for (int32 i = 1; i < argc; i++) {
if (strcmp(argv[i], "team") == 0)
team = parse_expression(argv[++i]);
else if (strcmp(argv[i], "thread") == 0)
thread = parse_expression(argv[++i]);
else if (strcmp(argv[i], "address") == 0)
address = parse_expression(argv[++i]);
else if (strcmp(argv[i], "stats") == 0)
statsOnly = true;
#if GUARDED_HEAP_STACK_TRACE_DEPTH > 0
else if (strcmp(argv[i], "trace") == 0)
stackTrace = true;
#endif
else {
print_debugger_command_usage(argv[0]);
return 0;
}
}
size_t totalSize = 0;
uint32 totalCount = 0;
GuardedHeapChunk* chunk = sGuardedHeap.live_chunks.FindMin();
while (chunk != NULL) {
if ((team < 0 || chunk->team == team)
&& (thread < 0 || chunk->thread == thread)
&& (address == 0 || (addr_t)chunk->allocation_base == address)) {
if (!statsOnly) {
kprintf("team: % 6" B_PRId32 "; thread: % 6" B_PRId32 "; "
"address: 0x%08" B_PRIxADDR "; size: %" B_PRIuSIZE
" bytes\n", chunk->team, chunk->thread,
(addr_t)chunk->allocation_base, chunk->allocation_size);
if (stackTrace)
dump_guarded_heap_stack_trace(*chunk);
}
totalSize += chunk->allocation_size;
totalCount++;
}
chunk = chunk->tree_list_link;
}
kprintf("total allocations: %" B_PRIu32 "; total bytes: %" B_PRIuSIZE
"\n", totalCount, totalSize);
return 0;
}
status_t
heap_init(addr_t address, size_t size)
{
sGuardedHeap.memory_added_condition.Init(&sGuardedHeap, "guarded heap");
sGuardedHeap.acquiring_pages = sGuardedHeap.acquiring_meta = -1;
size_t metaSize = ROUNDUP(((size / B_PAGE_SIZE) / 2) * sizeof(GuardedHeapChunk), B_PAGE_SIZE);
sGuardedHeapEarlyMetaBase = address;
sGuardedHeapEarlyBase = address + metaSize;
sGuardedHeapEarlySize = size - metaSize;
sGuardedHeap.meta_allocator.AddChunk((void*)address, metaSize);
GuardedHeapChunk* chunk = (GuardedHeapChunk*)
guarded_heap_allocate_meta(sGuardedHeap, sizeof(GuardedHeapChunk), 0);
chunk->base = sGuardedHeapEarlyBase;
chunk->pages_count = sGuardedHeapEarlySize / B_PAGE_SIZE;
sGuardedHeap.free_chunks.Insert(chunk);
sGuardedHeap.free_pages_count += chunk->pages_count;
return B_OK;
}
status_t
heap_init_post_area()
{
void* address = (void*)sGuardedHeapEarlyMetaBase;
area_id metaAreaId = create_area("guarded heap meta", &address, B_EXACT_ADDRESS,
sGuardedHeapEarlyBase - sGuardedHeapEarlyMetaBase, B_ALREADY_WIRED,
B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
ASSERT_ALWAYS(metaAreaId >= 0);
address = (void*)sGuardedHeapEarlyBase;
area_id areaId = create_area("guarded heap", &address, B_EXACT_ADDRESS,
sGuardedHeapEarlySize, B_ALREADY_WIRED,
B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
VMAreas::Lookup(areaId)->cache_offset = (addr_t)address;
return B_OK;
}
status_t
heap_init_post_sem()
{
new(&sGuardedHeapCache) GuardedHeapCache;
sGuardedHeapCache.Init();
sGuardedHeapCache.virtual_base = VMAddressSpace::Kernel()->Base();
sGuardedHeapCache.virtual_end = VMAddressSpace::Kernel()->EndAddress();
sGuardedHeap.cache = &sGuardedHeapCache;
VMArea* area = VMAreas::Lookup(area_for((void*)sGuardedHeapEarlyBase));
VMCache* initialCache = area->cache;
initialCache->Lock();
sGuardedHeap.cache->Lock();
sGuardedHeap.cache->Adopt(initialCache, 0, sGuardedHeapEarlySize, sGuardedHeapEarlyBase);
area->cache = sGuardedHeap.cache;
initialCache->Unlock();
initialCache->RemoveArea(area);
sGuardedHeap.cache->InsertAreaLocked(area);
sGuardedHeap.cache->Unlock();
initialCache->ReleaseRef();
MutexLocker locker(sGuardedHeap.lock);
sGuardedHeap.cache->Lock();
vm_page_reservation reservation = {};
VMTranslationMap* map = area->address_space->TranslationMap();
map->Lock();
GuardedHeapChunk* chunk = sGuardedHeap.live_chunks.FindMin();
while (chunk != NULL) {
addr_t unmap = chunk->base + (chunk->pages_count - 1) * B_PAGE_SIZE;
map->Unmap(unmap, unmap + B_PAGE_SIZE);
guarded_heap_free_page(sGuardedHeap, unmap, &reservation);
chunk = chunk->tree_list_link;
}
chunk = sGuardedHeap.free_chunks.FindMin();
while (chunk != NULL) {
for (size_t i = 0; i < chunk->pages_count; i++) {
addr_t pageAddress = chunk->base + i * B_PAGE_SIZE;
map->Unmap(pageAddress, pageAddress + B_PAGE_SIZE);
guarded_heap_free_page(sGuardedHeap, pageAddress, &reservation);
}
chunk = chunk->tree_list_link;
}
map->Unlock();
sGuardedHeap.cache->Unlock();
GuardedHeapChunk* linkChunk = NULL;
while ((chunk = sGuardedHeap.dead_chunks.FindMax()) != NULL) {
sGuardedHeap.dead_chunks.Remove(chunk);
chunk->tree_list_link = linkChunk;
linkChunk = chunk;
}
while (linkChunk != NULL) {
chunk = linkChunk;
linkChunk = linkChunk->tree_list_link;
sGuardedHeap.live_chunks.Insert(chunk);
locker.Unlock();
guarded_heap_free(sGuardedHeap, (void*)chunk->allocation_base, 0);
locker.Lock();
}
locker.Unlock();
vm_unreserve_memory(reservation.count * B_PAGE_SIZE);
vm_page_unreserve_pages(&reservation);
add_debugger_command("guarded_heap", &dump_guarded_heap,
"Dump info about the guarded heap");
add_debugger_command_etc("guarded_heap_chunk", &dump_guarded_heap_chunk,
"Dump info about a guarded heap chunk",
"<address>\nDump info about guarded heap chunk.\n",
0);
add_debugger_command_etc("allocations", &dump_guarded_heap_allocations,
"Dump current heap allocations",
#if GUARDED_HEAP_STACK_TRACE_DEPTH == 0
"[\"stats\"] [team] [thread] [address]\n"
#else
"[\"stats\"|\"trace\"] [team] [thread] [address]\n"
#endif
"If no parameters are given, all current alloactions are dumped.\n"
"If the optional argument \"stats\" is specified, only the allocation\n"
"counts and no individual allocations are printed.\n"
#if GUARDED_HEAP_STACK_TRACE_DEPTH > 0
"If the optional argument \"trace\" is specified, a stack trace for\n"
"each allocation is printed.\n"
#endif
"If a specific allocation address is given, only this allocation is\n"
"dumped.\n"
"If a team and/or thread is specified, only allocations of this\n"
"team/thread are dumped.\n", 0);
return B_OK;
}
void*
memalign(size_t alignment, size_t size)
{
return memalign_etc(alignment, size, 0);
}
void *
memalign_etc(size_t alignment, size_t size, uint32 flags)
{
if (size == 0)
size = 1;
return guarded_heap_allocate(sGuardedHeap, size, alignment, flags);
}
extern "C" int
posix_memalign(void** _pointer, size_t alignment, size_t size)
{
if ((alignment & (sizeof(void*) - 1)) != 0 || _pointer == NULL)
return B_BAD_VALUE;
*_pointer = guarded_heap_allocate(sGuardedHeap, size, alignment, 0);
return 0;
}
void
free_etc(void *address, uint32 flags)
{
guarded_heap_free(sGuardedHeap, address, flags);
}
void*
malloc(size_t size)
{
return memalign_etc(sizeof(void*), size, 0);
}
void
free(void* address)
{
free_etc(address, 0);
}
void*
realloc_etc(void* address, size_t newSize, uint32 flags)
{
if (newSize == 0) {
free_etc(address, flags);
return NULL;
}
if (address == NULL)
return malloc_etc(newSize, flags);
return guarded_heap_realloc(sGuardedHeap, address, newSize, flags);
}
void*
realloc(void* address, size_t newSize)
{
return realloc_etc(address, newSize, 0);
}
#endif
#if USE_GUARDED_HEAP_FOR_OBJECT_CACHE
struct ObjectCache {
size_t object_size;
size_t alignment;
void* cookie;
object_cache_constructor constructor;
object_cache_destructor destructor;
};
object_cache*
create_object_cache(const char* name, size_t object_size, uint32 flags)
{
return create_object_cache_etc(name, object_size, 0, 0, 0, 0, flags,
NULL, NULL, NULL, NULL);
}
object_cache*
create_object_cache_etc(const char*, size_t objectSize, size_t alignment, size_t, size_t,
size_t, uint32, void* cookie, object_cache_constructor ctor, object_cache_destructor dtor,
object_cache_reclaimer)
{
ObjectCache* cache = new ObjectCache;
if (cache == NULL)
return NULL;
cache->object_size = objectSize;
cache->alignment = alignment;
cache->cookie = cookie;
cache->constructor = ctor;
cache->destructor = dtor;
return cache;
}
void
delete_object_cache(object_cache* cache)
{
delete cache;
}
status_t
object_cache_set_minimum_reserve(object_cache* cache, size_t objectCount)
{
return B_OK;
}
void*
object_cache_alloc(object_cache* cache, uint32 flags)
{
void* object = memalign_etc(cache->alignment, cache->object_size, flags);
if (object == NULL)
return NULL;
if (cache->constructor != NULL)
cache->constructor(cache->cookie, object);
return object;
}
void
object_cache_free(object_cache* cache, void* object, uint32 flags)
{
if (cache->destructor != NULL)
cache->destructor(cache->cookie, object);
return free_etc(object, flags);
}
status_t
object_cache_reserve(object_cache* cache, size_t objectCount, uint32 flags)
{
return B_OK;
}
void
object_cache_get_usage(object_cache* cache, size_t* _allocatedMemory)
{
*_allocatedMemory = 0;
}
void
request_memory_manager_maintenance()
{
}
void
slab_init(kernel_args* args)
{
}
void
slab_init_post_area()
{
}
void
slab_init_post_sem()
{
}
void
slab_init_post_thread()
{
}
#endif
@@ -3992,7 +3992,7 @@
#if USE_DEBUG_HEAP_FOR_MALLOC || USE_GUARDED_HEAP_FOR_MALLOC
#if USE_DEBUG_HEAP_FOR_MALLOC
address = (void*)ROUNDDOWN(heapBase, B_PAGE_SIZE);
create_area("kernel heap", &address, B_EXACT_ADDRESS, heapSize,
B_ALREADY_WIRED, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);