void *M_ZeroSize(void *base, U64 size) { U8 *bytes = (U8 *) base; while (size--) { *bytes++ = 0; } return base; } void *M_CopySize(void *dst, void *src, U64 size) { U8 *se = (U8 *) src + (size - 1); U8 *de = (U8 *) dst + (size - 1); while (size--) { *de-- = *se--; } return dst; } M_Arena *_M_ArenaAlloc(U64 limit, M_ArenaOpts *opts) { M_Arena *result = 0; local_persist M_ArenaOpts _opts; if (!opts) { opts = &_opts; } U64 page_size = VM_PageSize(); U64 granularity = VM_AllocationGranularity(); U64 reserve = Max(AlignUp(limit, granularity), granularity); U64 initial = Clamp(page_size, AlignUp(opts->initial, page_size), reserve); void *base = VM_Reserve(reserve); if (base != 0) { if (VM_Commit(base, initial)) { result = cast(M_Arena *) base; result->current = result; result->prev = 0; result->base = 0; result->offset = sizeof(M_Arena); result->limit = reserve; result->committed = initial; result->increment = Max(AlignUp(opts->increment, page_size), page_size); result->flags = opts->flags; } else { VM_Release(base, reserve); } } Assert(result != 0); return result; } void *_M_ArenaPush(M_Arena *arena, U64 esize, M_ArenaPushOpts *opts) { void *result = 0; local_persist M_ArenaPushOpts _opts = { .count = 1, .align = 8 }; if (!opts) { opts = &_opts; } U64 alignment = Clamp(1, opts->align, 4096); M_Arena *current = arena->current; U64 total = esize * opts->count; U64 offset = AlignUp(current->offset, alignment); U64 end = offset + total; if (end > current->limit) { // Not enough space, chain a new arena if flags allow if ((arena->flags & M_ARENA_FIXED_SIZE) == 0) { U64 reserve = Min(total + sizeof(M_Arena), M_ARENA_CHAIN_RESERVE); M_Arena *next = M_ArenaAlloc(reserve, .flags = arena->flags); next->base = current->base + current->limit; SLL_PushN(arena->current, next, prev); current = next; offset = AlignUp(current->offset, alignment); end = offset + total; } } if (end > current->committed) { // Not enough committed, commit more memory U64 commit_offset = AlignUp(end, current->increment); U64 commit_limit = Min(commit_offset, current->limit); U8 *commit_base = cast(U8 *) current + current->committed; U64 commit_size = commit_limit - current->committed; if (VM_Commit(commit_base, commit_size)) { current->committed = commit_limit; } } if (end <= current->committed) { // Successfully got enough memory, push the allocation result = cast(U8 *) current + offset; current->offset = end; if (((opts->flags | arena->flags) & M_ARENA_NO_ZERO) == 0){ M_ZeroSize(result, total); } } Assert(result != 0); Assert(((U64) result & (alignment - 1)) == 0); return result; } void *_M_ArenaPushCopy(M_Arena *arena, void *from, U64 size, M_ArenaPushOpts *opts) { void *result = M_CopySize(_M_ArenaPush(arena, size, opts), from, size); return result; } void M_ArenaReset(M_Arena *arena) { M_Arena *base = arena->current; while (base->base != 0) { M_Arena *prev = base->prev; VM_Release(base, base->limit); base = prev; } Assert(arena == base); // @Todo: We could decommit some of the memory in the base arena, do we want to give a // parameter to choose how much :decommit base->offset = sizeof(M_Arena); arena->current = base; } void M_ArenaRelease(M_Arena *arena) { M_ArenaReset(arena); VM_Release(arena, arena->limit); } U64 M_ArenaOffset(M_Arena *arena) { U64 result = arena->current->base + arena->current->offset; return result; } void M_ArenaPop(M_Arena *arena, U64 offset) { M_Arena *base = arena->current; while (base->base > offset) { M_Arena *prev = base->prev; VM_Release(base, base->limit); base = prev; } // :decommit arena->current = base; base->offset = Max(offset - base->base, sizeof(M_Arena)); } void M_ArenaPopSize(M_Arena *arena, U64 size) { U64 offset = M_ArenaOffset(arena); offset -= Min(offset, size); M_ArenaPop(arena, offset); } #define M_TEMP_ARENA_LIMIT GB(4) static thread_var M_Arena *__tls_temp[2]; M_Temp M_TempAcquire(U64 count, M_Arena **conflicts) { M_Temp result = { 0 }; for (U32 it = 0; it < ArraySize(__tls_temp); ++it) { if (!__tls_temp[it]) { __tls_temp[it] = M_ArenaAlloc(M_TEMP_ARENA_LIMIT, .initial = MB(1), .increment = MB(1)); } result.arena = __tls_temp[it]; for (U32 c = 0; c < count; ++c) { if (__tls_temp[it] == conflicts[c]) { result.arena = 0; break; } } if (result.arena) { result.offset = M_ArenaOffset(result.arena); break; } } Assert(result.arena != 0); return result; } void M_TempRelease(M_Temp temp) { M_ArenaPop(temp.arena, temp.offset); }