Skip to content
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.

Commit 385961d

Browse files
committedNov 21, 2024·
Reapply "[libc] Use best-fit binary trie to make malloc logarithmic (#117065)"
This reverts commit 93b8364. - Correct riscv32 assumption about alignment (bit of a hack). - Fix test case where the largest_small and smallest sizes are the same.
1 parent 4862feb commit 385961d

18 files changed

+1198
-472
lines changed
 

‎libc/fuzzing/__support/CMakeLists.txt

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -23,3 +23,11 @@ add_libc_fuzzer(
2323
COMPILE_OPTIONS
2424
-D__LIBC_EXPLICIT_SIMD_OPT
2525
)
26+
27+
add_libc_fuzzer(
28+
freelist_heap_fuzz
29+
SRCS
30+
freelist_heap_fuzz.cpp
31+
DEPENDS
32+
libc.src.__support.freelist_heap
33+
)
Lines changed: 227 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,227 @@
1+
//===-- freelist_heap_fuzz.cpp --------------------------------------------===//
2+
//
3+
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4+
// See https://llvm.org/LICENSE.txt for license information.
5+
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6+
//
7+
//===----------------------------------------------------------------------===//
8+
///
9+
/// Fuzzing test for llvm-libc freelist-based heap implementation.
10+
///
11+
//===----------------------------------------------------------------------===//
12+
13+
#include "src/__support/CPP/bit.h"
14+
#include "src/__support/CPP/optional.h"
15+
#include "src/__support/freelist_heap.h"
16+
#include "src/string/memory_utils/inline_memcpy.h"
17+
#include "src/string/memory_utils/inline_memmove.h"
18+
#include "src/string/memory_utils/inline_memset.h"
19+
20+
using LIBC_NAMESPACE::FreeListHeap;
21+
using LIBC_NAMESPACE::inline_memset;
22+
using LIBC_NAMESPACE::cpp::nullopt;
23+
using LIBC_NAMESPACE::cpp::optional;
24+
25+
// Record of an outstanding allocation.
26+
struct Alloc {
27+
void *ptr;
28+
size_t size;
29+
size_t alignment;
30+
uint8_t canary; // Byte written to the allocation
31+
};
32+
33+
// A simple vector that tracks allocations using the heap.
34+
class AllocVec {
35+
public:
36+
AllocVec(FreeListHeap &heap) : heap(&heap), size_(0), capacity(0) {
37+
allocs = nullptr;
38+
}
39+
40+
bool empty() const { return !size_; }
41+
42+
size_t size() const { return size_; }
43+
44+
bool push_back(Alloc alloc) {
45+
if (size_ == capacity) {
46+
size_t new_cap = capacity ? capacity * 2 : 1;
47+
Alloc *new_allocs = reinterpret_cast<Alloc *>(
48+
heap->realloc(allocs, new_cap * sizeof(Alloc)));
49+
if (!new_allocs)
50+
return false;
51+
allocs = new_allocs;
52+
capacity = new_cap;
53+
}
54+
allocs[size_++] = alloc;
55+
return true;
56+
}
57+
58+
Alloc &operator[](size_t idx) { return allocs[idx]; }
59+
60+
void erase_idx(size_t idx) {
61+
LIBC_NAMESPACE::inline_memmove(&allocs[idx], &allocs[idx + 1],
62+
sizeof(Alloc) * (size_ - idx - 1));
63+
--size_;
64+
}
65+
66+
private:
67+
FreeListHeap *heap;
68+
Alloc *allocs;
69+
size_t size_;
70+
size_t capacity;
71+
};
72+
73+
// Choose a T value by casting libfuzzer data or exit.
74+
template <typename T>
75+
optional<T> choose(const uint8_t *&data, size_t &remainder) {
76+
if (sizeof(T) > remainder)
77+
return nullopt;
78+
T out;
79+
LIBC_NAMESPACE::inline_memcpy(&out, data, sizeof(T));
80+
data += sizeof(T);
81+
remainder -= sizeof(T);
82+
return out;
83+
}
84+
85+
// The type of allocation to perform
86+
enum class AllocType : uint8_t {
87+
MALLOC,
88+
ALIGNED_ALLOC,
89+
REALLOC,
90+
CALLOC,
91+
NUM_ALLOC_TYPES,
92+
};
93+
94+
template <>
95+
optional<AllocType> choose<AllocType>(const uint8_t *&data, size_t &remainder) {
96+
auto raw = choose<uint8_t>(data, remainder);
97+
if (!raw)
98+
return nullopt;
99+
return static_cast<AllocType>(
100+
*raw % static_cast<uint8_t>(AllocType::NUM_ALLOC_TYPES));
101+
}
102+
103+
constexpr size_t heap_size = 64 * 1024;
104+
105+
optional<size_t> choose_size(const uint8_t *&data, size_t &remainder) {
106+
auto raw = choose<size_t>(data, remainder);
107+
if (!raw)
108+
return nullopt;
109+
return *raw % heap_size;
110+
}
111+
112+
optional<size_t> choose_alloc_idx(const AllocVec &allocs, const uint8_t *&data,
113+
size_t &remainder) {
114+
if (allocs.empty())
115+
return nullopt;
116+
auto raw = choose<size_t>(data, remainder);
117+
if (!raw)
118+
return nullopt;
119+
return *raw % allocs.size();
120+
}
121+
122+
#define ASSIGN_OR_RETURN(TYPE, NAME, EXPR) \
123+
auto maybe_##NAME = EXPR; \
124+
if (!maybe_##NAME) \
125+
return 0; \
126+
TYPE NAME = *maybe_##NAME
127+
128+
extern "C" int LLVMFuzzerTestOneInput(const uint8_t *data, size_t remainder) {
129+
LIBC_NAMESPACE::FreeListHeapBuffer<heap_size> heap;
130+
AllocVec allocs(heap);
131+
132+
uint8_t canary = 0;
133+
while (true) {
134+
ASSIGN_OR_RETURN(auto, should_alloc, choose<bool>(data, remainder));
135+
if (should_alloc) {
136+
ASSIGN_OR_RETURN(auto, alloc_type, choose<AllocType>(data, remainder));
137+
ASSIGN_OR_RETURN(size_t, alloc_size, choose_size(data, remainder));
138+
139+
// Perform allocation.
140+
void *ptr = nullptr;
141+
size_t alignment = alignof(max_align_t);
142+
switch (alloc_type) {
143+
case AllocType::MALLOC:
144+
ptr = heap.allocate(alloc_size);
145+
break;
146+
case AllocType::ALIGNED_ALLOC: {
147+
ASSIGN_OR_RETURN(size_t, alignment, choose_size(data, remainder));
148+
alignment = LIBC_NAMESPACE::cpp::bit_ceil(alignment);
149+
ptr = heap.aligned_allocate(alignment, alloc_size);
150+
break;
151+
}
152+
case AllocType::REALLOC: {
153+
if (!alloc_size)
154+
return 0;
155+
ASSIGN_OR_RETURN(size_t, idx,
156+
choose_alloc_idx(allocs, data, remainder));
157+
Alloc &alloc = allocs[idx];
158+
ptr = heap.realloc(alloc.ptr, alloc_size);
159+
if (ptr) {
160+
// Extend the canary region if necessary.
161+
if (alloc_size > alloc.size)
162+
inline_memset(static_cast<char *>(ptr) + alloc.size, alloc.canary,
163+
alloc_size - alloc.size);
164+
alloc.ptr = ptr;
165+
alloc.size = alloc_size;
166+
alloc.alignment = alignof(max_align_t);
167+
}
168+
break;
169+
}
170+
case AllocType::CALLOC: {
171+
ASSIGN_OR_RETURN(size_t, count, choose_size(data, remainder));
172+
size_t total;
173+
if (__builtin_mul_overflow(count, alloc_size, &total))
174+
return 0;
175+
ptr = heap.calloc(count, alloc_size);
176+
if (ptr)
177+
for (size_t i = 0; i < total; ++i)
178+
if (static_cast<char *>(ptr)[i] != 0)
179+
__builtin_trap();
180+
break;
181+
}
182+
case AllocType::NUM_ALLOC_TYPES:
183+
__builtin_unreachable();
184+
}
185+
186+
if (ptr) {
187+
// aligned_allocate should automatically apply a minimum alignment.
188+
if (alignment < alignof(max_align_t))
189+
alignment = alignof(max_align_t);
190+
// Check alignment.
191+
if (reinterpret_cast<uintptr_t>(ptr) % alignment)
192+
__builtin_trap();
193+
194+
// Reallocation is treated specially above, since we would otherwise
195+
// lose the original size.
196+
if (alloc_type != AllocType::REALLOC) {
197+
// Fill the object with a canary byte.
198+
inline_memset(ptr, canary, alloc_size);
199+
200+
// Track the allocation.
201+
if (!allocs.push_back({ptr, alloc_size, alignment, canary}))
202+
return 0;
203+
++canary;
204+
}
205+
}
206+
} else {
207+
// Select a random allocation.
208+
ASSIGN_OR_RETURN(size_t, idx, choose_alloc_idx(allocs, data, remainder));
209+
Alloc &alloc = allocs[idx];
210+
211+
// Check alignment.
212+
if (reinterpret_cast<uintptr_t>(alloc.ptr) % alloc.alignment)
213+
__builtin_trap();
214+
215+
// Check the canary.
216+
uint8_t *ptr = reinterpret_cast<uint8_t *>(alloc.ptr);
217+
for (size_t i = 0; i < alloc.size; ++i)
218+
if (ptr[i] != alloc.canary)
219+
__builtin_trap();
220+
221+
// Free the allocation and untrack it.
222+
heap.free(alloc.ptr);
223+
allocs.erase_idx(idx);
224+
}
225+
}
226+
return 0;
227+
}

‎libc/src/__support/CMakeLists.txt

Lines changed: 24 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -14,25 +14,47 @@ add_header_library(
1414
libc.src.__support.CPP.type_traits
1515
)
1616

17-
add_header_library(
17+
add_object_library(
1818
freelist
1919
HDRS
2020
freelist.h
21+
SRCS
22+
freelist.cpp
2123
DEPENDS
24+
.block
2225
libc.src.__support.fixedvector
2326
libc.src.__support.CPP.array
2427
libc.src.__support.CPP.cstddef
2528
libc.src.__support.CPP.new
2629
libc.src.__support.CPP.span
2730
)
2831

32+
add_object_library(
33+
freetrie
34+
HDRS
35+
freetrie.h
36+
SRCS
37+
freetrie.cpp
38+
DEPENDS
39+
.block
40+
.freelist
41+
)
42+
43+
add_header_library(
44+
freestore
45+
HDRS
46+
freestore.h
47+
DEPENDS
48+
.freetrie
49+
)
50+
2951
add_header_library(
3052
freelist_heap
3153
HDRS
3254
freelist_heap.h
3355
DEPENDS
3456
.block
35-
.freelist
57+
.freestore
3658
libc.src.__support.CPP.cstddef
3759
libc.src.__support.CPP.array
3860
libc.src.__support.CPP.optional

‎libc/src/__support/block.h

Lines changed: 24 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -174,16 +174,32 @@ class Block {
174174
return inner_size - sizeof(prev_) + BLOCK_OVERHEAD;
175175
}
176176

177-
/// @returns The number of usable bytes inside the block.
177+
/// @returns The number of usable bytes inside the block were it to be
178+
/// allocated.
178179
size_t inner_size() const {
179180
if (!next())
180181
return 0;
181182
return inner_size(outer_size());
182183
}
183184

185+
/// @returns The number of usable bytes inside a block with the given outer
186+
/// size were it to be allocated.
184187
static size_t inner_size(size_t outer_size) {
185188
// The usable region includes the prev_ field of the next block.
186-
return outer_size - BLOCK_OVERHEAD + sizeof(prev_);
189+
return inner_size_free(outer_size) + sizeof(prev_);
190+
}
191+
192+
/// @returns The number of usable bytes inside the block if it remains free.
193+
size_t inner_size_free() const {
194+
if (!next())
195+
return 0;
196+
return inner_size_free(outer_size());
197+
}
198+
199+
/// @returns The number of usable bytes inside a block with the given outer
200+
/// size if it remains free.
201+
static size_t inner_size_free(size_t outer_size) {
202+
return outer_size - BLOCK_OVERHEAD;
187203
}
188204

189205
/// @returns A pointer to the usable space inside this block.
@@ -201,14 +217,11 @@ class Block {
201217

202218
/// Attempts to split this block.
203219
///
204-
/// If successful, the block will have an inner size of `new_inner_size`,
205-
/// rounded to ensure that the split point is on an ALIGNMENT boundary. The
206-
/// remaining space will be returned as a new block. Note that the prev_ field
207-
/// of the next block counts as part of the inner size of the returnd block.
208-
///
209-
/// This method may fail if the remaining space is too small to hold a new
210-
/// block. If this method fails for any reason, the original block is
211-
/// unmodified.
220+
/// If successful, the block will have an inner size of at least
221+
/// `new_inner_size`, rounded to ensure that the split point is on an
222+
/// ALIGNMENT boundary. The remaining space will be returned as a new block.
223+
/// Note that the prev_ field of the next block counts as part of the inner
224+
/// size of the returnd block.
212225
optional<Block *> split(size_t new_inner_size);
213226

214227
/// Merges this block with the one that comes after it.
@@ -442,7 +455,7 @@ Block<OffsetType, kAlign>::split(size_t new_inner_size) {
442455
// The prev_ field of the next block is always available, so there is a
443456
// minimum size to a block created through splitting.
444457
if (new_inner_size < sizeof(prev_))
445-
return {};
458+
new_inner_size = sizeof(prev_);
446459

447460
size_t old_inner_size = inner_size();
448461
new_inner_size =

‎libc/src/__support/freelist.cpp

Lines changed: 42 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,42 @@
1+
//===-- Implementation for freelist ---------------------------------------===//
2+
//
3+
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4+
// See https://llvm.org/LICENSE.txt for license information.
5+
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6+
//
7+
//===----------------------------------------------------------------------===//
8+
9+
#include "freelist.h"
10+
11+
namespace LIBC_NAMESPACE_DECL {
12+
13+
void FreeList::push(Node *node) {
14+
if (begin_) {
15+
LIBC_ASSERT(Block<>::from_usable_space(node)->outer_size() ==
16+
begin_->block()->outer_size() &&
17+
"freelist entries must have the same size");
18+
// Since the list is circular, insert the node immediately before begin_.
19+
node->prev = begin_->prev;
20+
node->next = begin_;
21+
begin_->prev->next = node;
22+
begin_->prev = node;
23+
} else {
24+
begin_ = node->prev = node->next = node;
25+
}
26+
}
27+
28+
void FreeList::remove(Node *node) {
29+
LIBC_ASSERT(begin_ && "cannot remove from empty list");
30+
if (node == node->next) {
31+
LIBC_ASSERT(node == begin_ &&
32+
"a self-referential node must be the only element");
33+
begin_ = nullptr;
34+
} else {
35+
node->prev->next = node->next;
36+
node->next->prev = node->prev;
37+
if (begin_ == node)
38+
begin_ = node->next;
39+
}
40+
}
41+
42+
} // namespace LIBC_NAMESPACE_DECL

‎libc/src/__support/freelist.h

Lines changed: 54 additions & 174 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
//===-- Interface for freelist_malloc -------------------------------------===//
1+
//===-- Interface for freelist --------------------------------------------===//
22
//
33
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
44
// See https://llvm.org/LICENSE.txt for license information.
@@ -9,200 +9,80 @@
99
#ifndef LLVM_LIBC_SRC___SUPPORT_FREELIST_H
1010
#define LLVM_LIBC_SRC___SUPPORT_FREELIST_H
1111

12-
#include "src/__support/CPP/array.h"
13-
#include "src/__support/CPP/cstddef.h"
14-
#include "src/__support/CPP/new.h"
15-
#include "src/__support/CPP/span.h"
16-
#include "src/__support/fixedvector.h"
17-
#include "src/__support/macros/config.h"
12+
#include "block.h"
1813

1914
namespace LIBC_NAMESPACE_DECL {
2015

21-
using cpp::span;
22-
23-
/// Basic [freelist](https://en.wikipedia.org/wiki/Free_list) implementation
24-
/// for an allocator. This implementation buckets by chunk size, with a list
25-
/// of user-provided buckets. Each bucket is a linked list of storage chunks.
26-
/// Because this freelist uses the added chunks themselves as list nodes, there
27-
/// is a lower bound of `sizeof(FreeList.FreeListNode)` bytes for chunks which
28-
/// can be added to this freelist. There is also an implicit bucket for
29-
/// "everything else", for chunks which do not fit into a bucket.
30-
///
31-
/// Each added chunk will be added to the smallest bucket under which it fits.
32-
/// If it does not fit into any user-provided bucket, it will be added to the
33-
/// default bucket.
34-
///
35-
/// As an example, assume that the `FreeList` is configured with buckets of
36-
/// sizes {64, 128, 256, and 512} bytes. The internal state may look like the
37-
/// following:
16+
/// A circularly-linked FIFO list storing free Blocks. All Blocks on a list
17+
/// are the same size. The blocks are referenced by Nodes in the list; the list
18+
/// refers to these, but it does not own them.
3819
///
39-
/// @code{.unparsed}
40-
/// bucket[0] (64B) --> chunk[12B] --> chunk[42B] --> chunk[64B] --> NULL
41-
/// bucket[1] (128B) --> chunk[65B] --> chunk[72B] --> NULL
42-
/// bucket[2] (256B) --> NULL
43-
/// bucket[3] (512B) --> chunk[312B] --> chunk[512B] --> chunk[416B] --> NULL
44-
/// bucket[4] (implicit) --> chunk[1024B] --> chunk[513B] --> NULL
45-
/// @endcode
46-
///
47-
/// Note that added chunks should be aligned to a 4-byte boundary.
48-
template <size_t NUM_BUCKETS = 6> class FreeList {
20+
/// Allocating free blocks in FIFO order maximizes the amount of time before a
21+
/// free block is reused. This in turn maximizes the number of opportunities for
22+
/// it to be coalesced with an adjacent block, which tends to reduce heap
23+
/// fragmentation.
24+
class FreeList {
4925
public:
50-
// Remove copy/move ctors
51-
FreeList(const FreeList &other) = delete;
52-
FreeList(FreeList &&other) = delete;
53-
FreeList &operator=(const FreeList &other) = delete;
54-
FreeList &operator=(FreeList &&other) = delete;
55-
56-
/// Adds a chunk to this freelist.
57-
bool add_chunk(cpp::span<cpp::byte> chunk);
58-
59-
/// Finds an eligible chunk for an allocation of size `size`.
60-
///
61-
/// @note This returns the first allocation possible within a given bucket;
62-
/// It does not currently optimize for finding the smallest chunk.
63-
///
64-
/// @returns
65-
/// * On success - A span representing the chunk.
66-
/// * On failure (e.g. there were no chunks available for that allocation) -
67-
/// A span with a size of 0.
68-
cpp::span<cpp::byte> find_chunk(size_t size) const;
69-
70-
template <typename Cond> cpp::span<cpp::byte> find_chunk_if(Cond op) const;
71-
72-
/// Removes a chunk from this freelist.
73-
bool remove_chunk(cpp::span<cpp::byte> chunk);
74-
75-
/// For a given size, find which index into chunks_ the node should be written
76-
/// to.
77-
constexpr size_t find_chunk_ptr_for_size(size_t size, bool non_null) const;
78-
79-
struct FreeListNode {
80-
FreeListNode *next;
81-
size_t size;
82-
};
83-
84-
constexpr void set_freelist_node(FreeListNode &node,
85-
cpp::span<cpp::byte> chunk);
86-
87-
constexpr explicit FreeList(const cpp::array<size_t, NUM_BUCKETS> &sizes)
88-
: chunks_(NUM_BUCKETS + 1, 0), sizes_(sizes.begin(), sizes.end()) {}
89-
90-
private:
91-
FixedVector<FreeList::FreeListNode *, NUM_BUCKETS + 1> chunks_;
92-
FixedVector<size_t, NUM_BUCKETS> sizes_;
93-
};
94-
95-
template <size_t NUM_BUCKETS>
96-
constexpr void FreeList<NUM_BUCKETS>::set_freelist_node(FreeListNode &node,
97-
span<cpp::byte> chunk) {
98-
// Add it to the correct list.
99-
size_t chunk_ptr = find_chunk_ptr_for_size(chunk.size(), false);
100-
node.size = chunk.size();
101-
node.next = chunks_[chunk_ptr];
102-
chunks_[chunk_ptr] = &node;
103-
}
104-
105-
template <size_t NUM_BUCKETS>
106-
bool FreeList<NUM_BUCKETS>::add_chunk(span<cpp::byte> chunk) {
107-
// Check that the size is enough to actually store what we need
108-
if (chunk.size() < sizeof(FreeListNode))
109-
return false;
110-
111-
FreeListNode *node = ::new (chunk.data()) FreeListNode;
112-
set_freelist_node(*node, chunk);
113-
114-
return true;
115-
}
116-
117-
template <size_t NUM_BUCKETS>
118-
template <typename Cond>
119-
span<cpp::byte> FreeList<NUM_BUCKETS>::find_chunk_if(Cond op) const {
120-
for (FreeListNode *node : chunks_) {
121-
while (node != nullptr) {
122-
span<cpp::byte> chunk(reinterpret_cast<cpp::byte *>(node), node->size);
123-
if (op(chunk))
124-
return chunk;
125-
126-
node = node->next;
26+
class Node {
27+
public:
28+
/// @returns The block containing this node.
29+
LIBC_INLINE const Block<> *block() const {
30+
return Block<>::from_usable_space(this);
12731
}
128-
}
12932

130-
return {};
131-
}
33+
/// @returns The block containing this node.
34+
LIBC_INLINE Block<> *block() { return Block<>::from_usable_space(this); }
13235

133-
template <size_t NUM_BUCKETS>
134-
span<cpp::byte> FreeList<NUM_BUCKETS>::find_chunk(size_t size) const {
135-
if (size == 0)
136-
return span<cpp::byte>();
36+
/// @returns The inner size of blocks in the list containing this node.
37+
LIBC_INLINE size_t size() const { return block()->inner_size(); }
13738

138-
size_t chunk_ptr = find_chunk_ptr_for_size(size, true);
139-
140-
// Check that there's data. This catches the case where we run off the
141-
// end of the array
142-
if (chunks_[chunk_ptr] == nullptr)
143-
return span<cpp::byte>();
39+
private:
40+
// Circularly linked pointers to adjacent nodes.
41+
Node *prev;
42+
Node *next;
43+
friend class FreeList;
44+
};
14445

145-
// Now iterate up the buckets, walking each list to find a good candidate
146-
for (size_t i = chunk_ptr; i < chunks_.size(); i++) {
147-
FreeListNode *node = chunks_[static_cast<unsigned short>(i)];
46+
LIBC_INLINE constexpr FreeList() : FreeList(nullptr) {}
47+
LIBC_INLINE constexpr FreeList(Node *begin) : begin_(begin) {}
14848

149-
while (node != nullptr) {
150-
if (node->size >= size)
151-
return span<cpp::byte>(reinterpret_cast<cpp::byte *>(node), node->size);
49+
LIBC_INLINE bool empty() const { return !begin_; }
15250

153-
node = node->next;
154-
}
51+
/// @returns The inner size of blocks in the list.
52+
LIBC_INLINE size_t size() const {
53+
LIBC_ASSERT(begin_ && "empty lists have no size");
54+
return begin_->size();
15555
}
15656

157-
// If we get here, we've checked every block in every bucket. There's
158-
// nothing that can support this allocation.
159-
return span<cpp::byte>();
160-
}
57+
/// @returns The first node in the list.
58+
LIBC_INLINE Node *begin() { return begin_; }
16159

162-
template <size_t NUM_BUCKETS>
163-
bool FreeList<NUM_BUCKETS>::remove_chunk(span<cpp::byte> chunk) {
164-
size_t chunk_ptr = find_chunk_ptr_for_size(chunk.size(), true);
60+
/// @returns The first block in the list.
61+
LIBC_INLINE Block<> *front() { return begin_->block(); }
16562

166-
// Check head first.
167-
if (chunks_[chunk_ptr] == nullptr)
168-
return false;
169-
170-
FreeListNode *node = chunks_[chunk_ptr];
171-
if (reinterpret_cast<cpp::byte *>(node) == chunk.data()) {
172-
chunks_[chunk_ptr] = node->next;
173-
return true;
63+
/// Push a block to the back of the list.
64+
/// The block must be large enough to contain a node.
65+
LIBC_INLINE void push(Block<> *block) {
66+
LIBC_ASSERT(!block->used() &&
67+
"only free blocks can be placed on free lists");
68+
LIBC_ASSERT(block->inner_size_free() >= sizeof(FreeList) &&
69+
"block too small to accomodate free list node");
70+
push(new (block->usable_space()) Node);
17471
}
17572

176-
// No? Walk the nodes.
177-
node = chunks_[chunk_ptr];
73+
/// Push an already-constructed node to the back of the list.
74+
/// This allows pushing derived node types with additional data.
75+
void push(Node *node);
17876

179-
while (node->next != nullptr) {
180-
if (reinterpret_cast<cpp::byte *>(node->next) == chunk.data()) {
181-
// Found it, remove this node out of the chain
182-
node->next = node->next->next;
183-
return true;
184-
}
77+
/// Pop the first node from the list.
78+
LIBC_INLINE void pop() { remove(begin_); }
18579

186-
node = node->next;
187-
}
80+
/// Remove an arbitrary node from the list.
81+
void remove(Node *node);
18882

189-
return false;
190-
}
191-
192-
template <size_t NUM_BUCKETS>
193-
constexpr size_t
194-
FreeList<NUM_BUCKETS>::find_chunk_ptr_for_size(size_t size,
195-
bool non_null) const {
196-
size_t chunk_ptr = 0;
197-
for (chunk_ptr = 0u; chunk_ptr < sizes_.size(); chunk_ptr++) {
198-
if (sizes_[chunk_ptr] >= size &&
199-
(!non_null || chunks_[chunk_ptr] != nullptr)) {
200-
break;
201-
}
202-
}
203-
204-
return chunk_ptr;
205-
}
83+
private:
84+
Node *begin_;
85+
};
20686

20787
} // namespace LIBC_NAMESPACE_DECL
20888

‎libc/src/__support/freelist_heap.h

Lines changed: 76 additions & 85 deletions
Original file line numberDiff line numberDiff line change
@@ -12,11 +12,12 @@
1212
#include <stddef.h>
1313

1414
#include "block.h"
15-
#include "freelist.h"
15+
#include "freestore.h"
1616
#include "src/__support/CPP/optional.h"
1717
#include "src/__support/CPP/span.h"
1818
#include "src/__support/libc_assert.h"
1919
#include "src/__support/macros/config.h"
20+
#include "src/__support/math_extras.h"
2021
#include "src/string/memory_utils/inline_memcpy.h"
2122
#include "src/string/memory_utils/inline_memset.h"
2223

@@ -28,23 +29,14 @@ extern "C" cpp::byte __llvm_libc_heap_limit;
2829
using cpp::optional;
2930
using cpp::span;
3031

31-
inline constexpr bool IsPow2(size_t x) { return x && (x & (x - 1)) == 0; }
32+
LIBC_INLINE constexpr bool IsPow2(size_t x) { return x && (x & (x - 1)) == 0; }
3233

33-
static constexpr cpp::array<size_t, 6> DEFAULT_BUCKETS{16, 32, 64,
34-
128, 256, 512};
35-
36-
template <size_t NUM_BUCKETS = DEFAULT_BUCKETS.size()> class FreeListHeap {
34+
class FreeListHeap {
3735
public:
38-
using BlockType = Block<>;
39-
using FreeListType = FreeList<NUM_BUCKETS>;
40-
41-
static constexpr size_t MIN_ALIGNMENT =
42-
cpp::max(BlockType::ALIGNMENT, alignof(max_align_t));
43-
44-
constexpr FreeListHeap() : begin_(&_end), end_(&__llvm_libc_heap_limit) {}
36+
constexpr FreeListHeap() : begin(&_end), end(&__llvm_libc_heap_limit) {}
4537

4638
constexpr FreeListHeap(span<cpp::byte> region)
47-
: begin_(region.begin()), end_(region.end()) {}
39+
: begin(region.begin()), end(region.end()) {}
4840

4941
void *allocate(size_t size);
5042
void *aligned_allocate(size_t alignment, size_t size);
@@ -54,89 +46,87 @@ template <size_t NUM_BUCKETS = DEFAULT_BUCKETS.size()> class FreeListHeap {
5446
void *realloc(void *ptr, size_t size);
5547
void *calloc(size_t num, size_t size);
5648

57-
cpp::span<cpp::byte> region() const { return {begin_, end_}; }
49+
cpp::span<cpp::byte> region() const { return {begin, end}; }
5850

5951
private:
6052
void init();
6153

6254
void *allocate_impl(size_t alignment, size_t size);
6355

64-
span<cpp::byte> block_to_span(BlockType *block) {
56+
span<cpp::byte> block_to_span(Block<> *block) {
6557
return span<cpp::byte>(block->usable_space(), block->inner_size());
6658
}
6759

68-
bool is_valid_ptr(void *ptr) { return ptr >= begin_ && ptr < end_; }
60+
bool is_valid_ptr(void *ptr) { return ptr >= begin && ptr < end; }
6961

70-
bool is_initialized_ = false;
71-
cpp::byte *begin_;
72-
cpp::byte *end_;
73-
FreeListType freelist_{DEFAULT_BUCKETS};
62+
cpp::byte *begin;
63+
cpp::byte *end;
64+
bool is_initialized = false;
65+
FreeStore free_store;
7466
};
7567

76-
template <size_t BUFF_SIZE, size_t NUM_BUCKETS = DEFAULT_BUCKETS.size()>
77-
class FreeListHeapBuffer : public FreeListHeap<NUM_BUCKETS> {
78-
using parent = FreeListHeap<NUM_BUCKETS>;
79-
using FreeListNode = typename parent::FreeListType::FreeListNode;
80-
68+
template <size_t BUFF_SIZE> class FreeListHeapBuffer : public FreeListHeap {
8169
public:
82-
constexpr FreeListHeapBuffer()
83-
: FreeListHeap<NUM_BUCKETS>{buffer}, buffer{} {}
70+
constexpr FreeListHeapBuffer() : FreeListHeap{buffer}, buffer{} {}
8471

8572
private:
8673
cpp::byte buffer[BUFF_SIZE];
8774
};
8875

89-
template <size_t NUM_BUCKETS> void FreeListHeap<NUM_BUCKETS>::init() {
90-
LIBC_ASSERT(!is_initialized_ && "duplicate initialization");
91-
auto result = BlockType::init(region());
92-
BlockType *block = *result;
93-
freelist_.add_chunk(block_to_span(block));
94-
is_initialized_ = true;
76+
LIBC_INLINE void FreeListHeap::init() {
77+
LIBC_ASSERT(!is_initialized && "duplicate initialization");
78+
auto result = Block<>::init(region());
79+
Block<> *block = *result;
80+
free_store.set_range({0, cpp::bit_ceil(block->inner_size())});
81+
free_store.insert(block);
82+
is_initialized = true;
9583
}
9684

97-
template <size_t NUM_BUCKETS>
98-
void *FreeListHeap<NUM_BUCKETS>::allocate_impl(size_t alignment, size_t size) {
85+
LIBC_INLINE void *FreeListHeap::allocate_impl(size_t alignment, size_t size) {
9986
if (size == 0)
10087
return nullptr;
10188

102-
if (!is_initialized_)
89+
if (!is_initialized)
10390
init();
10491

105-
// Find a chunk in the freelist. Split it if needed, then return.
106-
auto chunk =
107-
freelist_.find_chunk_if([alignment, size](span<cpp::byte> chunk) {
108-
BlockType *block = BlockType::from_usable_space(chunk.data());
109-
return block->can_allocate(alignment, size);
110-
});
92+
size_t request_size = size;
93+
94+
// TODO: usable_space should always be aligned to max_align_t.
95+
if (alignment > alignof(max_align_t) ||
96+
(Block<>::BLOCK_OVERHEAD % alignof(max_align_t) != 0)) {
97+
// TODO: This bound isn't precisely calculated yet. It assumes one extra
98+
// Block<>::ALIGNMENT to accomodate the possibility for padding block
99+
// overhead. (alignment - 1) ensures that there is an aligned point
100+
// somewhere in usable_space, but this isn't tight either, since
101+
// usable_space is also already somewhat aligned.
102+
if (add_overflow(size, (alignment - 1) + Block<>::ALIGNMENT, request_size))
103+
return nullptr;
104+
}
111105

112-
if (chunk.data() == nullptr)
106+
Block<> *block = free_store.remove_best_fit(request_size);
107+
if (!block)
113108
return nullptr;
114-
freelist_.remove_chunk(chunk);
115109

116-
BlockType *chunk_block = BlockType::from_usable_space(chunk.data());
117-
LIBC_ASSERT(!chunk_block->used());
110+
LIBC_ASSERT(block->can_allocate(alignment, size) &&
111+
"block should always be large enough to allocate at the correct "
112+
"alignment");
118113

119-
// Split that chunk. If there's a leftover chunk, add it to the freelist
120-
auto block_info = BlockType::allocate(chunk_block, alignment, size);
114+
auto block_info = Block<>::allocate(block, alignment, size);
121115
if (block_info.next)
122-
freelist_.add_chunk(block_to_span(block_info.next));
116+
free_store.insert(block_info.next);
123117
if (block_info.prev)
124-
freelist_.add_chunk(block_to_span(block_info.prev));
125-
chunk_block = block_info.block;
126-
127-
chunk_block->mark_used();
118+
free_store.insert(block_info.prev);
128119

129-
return chunk_block->usable_space();
120+
block_info.block->mark_used();
121+
return block_info.block->usable_space();
130122
}
131123

132-
template <size_t NUM_BUCKETS>
133-
void *FreeListHeap<NUM_BUCKETS>::allocate(size_t size) {
134-
return allocate_impl(MIN_ALIGNMENT, size);
124+
LIBC_INLINE void *FreeListHeap::allocate(size_t size) {
125+
return allocate_impl(alignof(max_align_t), size);
135126
}
136127

137-
template <size_t NUM_BUCKETS>
138-
void *FreeListHeap<NUM_BUCKETS>::aligned_allocate(size_t alignment,
139-
size_t size) {
128+
LIBC_INLINE void *FreeListHeap::aligned_allocate(size_t alignment,
129+
size_t size) {
140130
// The alignment must be an integral power of two.
141131
if (!IsPow2(alignment))
142132
return nullptr;
@@ -148,38 +138,37 @@ void *FreeListHeap<NUM_BUCKETS>::aligned_allocate(size_t alignment,
148138
return allocate_impl(alignment, size);
149139
}
150140

151-
template <size_t NUM_BUCKETS> void FreeListHeap<NUM_BUCKETS>::free(void *ptr) {
141+
LIBC_INLINE void FreeListHeap::free(void *ptr) {
152142
cpp::byte *bytes = static_cast<cpp::byte *>(ptr);
153143

154144
LIBC_ASSERT(is_valid_ptr(bytes) && "Invalid pointer");
155145

156-
BlockType *chunk_block = BlockType::from_usable_space(bytes);
157-
LIBC_ASSERT(chunk_block->next() && "sentinel last block cannot be freed");
158-
LIBC_ASSERT(chunk_block->used() && "The block is not in-use");
159-
chunk_block->mark_free();
146+
Block<> *block = Block<>::from_usable_space(bytes);
147+
LIBC_ASSERT(block->next() && "sentinel last block cannot be freed");
148+
LIBC_ASSERT(block->used() && "double free");
149+
block->mark_free();
160150

161151
// Can we combine with the left or right blocks?
162-
BlockType *prev_free = chunk_block->prev_free();
163-
BlockType *next = chunk_block->next();
152+
Block<> *prev_free = block->prev_free();
153+
Block<> *next = block->next();
164154

165155
if (prev_free != nullptr) {
166-
// Remove from freelist and merge
167-
freelist_.remove_chunk(block_to_span(prev_free));
168-
chunk_block = prev_free;
169-
chunk_block->merge_next();
156+
// Remove from free store and merge.
157+
free_store.remove(prev_free);
158+
block = prev_free;
159+
block->merge_next();
170160
}
171161
if (!next->used()) {
172-
freelist_.remove_chunk(block_to_span(next));
173-
chunk_block->merge_next();
162+
free_store.remove(next);
163+
block->merge_next();
174164
}
175165
// Add back to the freelist
176-
freelist_.add_chunk(block_to_span(chunk_block));
166+
free_store.insert(block);
177167
}
178168

179169
// Follows constract of the C standard realloc() function
180170
// If ptr is free'd, will return nullptr.
181-
template <size_t NUM_BUCKETS>
182-
void *FreeListHeap<NUM_BUCKETS>::realloc(void *ptr, size_t size) {
171+
LIBC_INLINE void *FreeListHeap::realloc(void *ptr, size_t size) {
183172
if (size == 0) {
184173
free(ptr);
185174
return nullptr;
@@ -194,10 +183,10 @@ void *FreeListHeap<NUM_BUCKETS>::realloc(void *ptr, size_t size) {
194183
if (!is_valid_ptr(bytes))
195184
return nullptr;
196185

197-
BlockType *chunk_block = BlockType::from_usable_space(bytes);
198-
if (!chunk_block->used())
186+
Block<> *block = Block<>::from_usable_space(bytes);
187+
if (!block->used())
199188
return nullptr;
200-
size_t old_size = chunk_block->inner_size();
189+
size_t old_size = block->inner_size();
201190

202191
// Do nothing and return ptr if the required memory size is smaller than
203192
// the current size.
@@ -214,15 +203,17 @@ void *FreeListHeap<NUM_BUCKETS>::realloc(void *ptr, size_t size) {
214203
return new_ptr;
215204
}
216205

217-
template <size_t NUM_BUCKETS>
218-
void *FreeListHeap<NUM_BUCKETS>::calloc(size_t num, size_t size) {
219-
void *ptr = allocate(num * size);
206+
LIBC_INLINE void *FreeListHeap::calloc(size_t num, size_t size) {
207+
size_t bytes;
208+
if (__builtin_mul_overflow(num, size, &bytes))
209+
return nullptr;
210+
void *ptr = allocate(bytes);
220211
if (ptr != nullptr)
221-
LIBC_NAMESPACE::inline_memset(ptr, 0, num * size);
212+
LIBC_NAMESPACE::inline_memset(ptr, 0, bytes);
222213
return ptr;
223214
}
224215

225-
extern FreeListHeap<> *freelist_heap;
216+
extern FreeListHeap *freelist_heap;
226217

227218
} // namespace LIBC_NAMESPACE_DECL
228219

‎libc/src/__support/freestore.h

Lines changed: 114 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,114 @@
1+
//===-- Interface for freestore ------------------------------------------===//
2+
//
3+
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4+
// See https://llvm.org/LICENSE.txt for license information.
5+
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6+
//
7+
//===----------------------------------------------------------------------===//
8+
9+
#ifndef LLVM_LIBC_SRC___SUPPORT_FREESTORE_H
10+
#define LLVM_LIBC_SRC___SUPPORT_FREESTORE_H
11+
12+
#include "freetrie.h"
13+
14+
namespace LIBC_NAMESPACE_DECL {
15+
16+
/// A best-fit store of variously-sized free blocks. Blocks can be inserted and
17+
/// removed in logarithmic time.
18+
class FreeStore {
19+
public:
20+
FreeStore() = default;
21+
FreeStore(const FreeStore &other) = delete;
22+
FreeStore &operator=(const FreeStore &other) = delete;
23+
24+
/// Sets the range of possible block sizes. This can only be called when the
25+
/// trie is empty.
26+
LIBC_INLINE void set_range(FreeTrie::SizeRange range) {
27+
large_trie.set_range(range);
28+
}
29+
30+
/// Insert a free block. If the block is too small to be tracked, nothing
31+
/// happens.
32+
void insert(Block<> *block);
33+
34+
/// Remove a free block. If the block is too small to be tracked, nothing
35+
/// happens.
36+
void remove(Block<> *block);
37+
38+
/// Remove a best-fit free block that can contain the given size when
39+
/// allocated. Returns nullptr if there is no such block.
40+
Block<> *remove_best_fit(size_t size);
41+
42+
private:
43+
static constexpr size_t ALIGNMENT = alignof(max_align_t);
44+
static constexpr size_t MIN_OUTER_SIZE =
45+
align_up(Block<>::BLOCK_OVERHEAD + sizeof(FreeList::Node), ALIGNMENT);
46+
static constexpr size_t MIN_LARGE_OUTER_SIZE =
47+
align_up(Block<>::BLOCK_OVERHEAD + sizeof(FreeTrie::Node), ALIGNMENT);
48+
static constexpr size_t NUM_SMALL_SIZES =
49+
(MIN_LARGE_OUTER_SIZE - MIN_OUTER_SIZE) / ALIGNMENT;
50+
51+
LIBC_INLINE static bool too_small(Block<> *block) {
52+
return block->outer_size() < MIN_OUTER_SIZE;
53+
}
54+
LIBC_INLINE static bool is_small(Block<> *block) {
55+
return block->outer_size() < MIN_LARGE_OUTER_SIZE;
56+
}
57+
58+
FreeList &small_list(Block<> *block);
59+
FreeList *find_best_small_fit(size_t size);
60+
61+
cpp::array<FreeList, NUM_SMALL_SIZES> small_lists;
62+
FreeTrie large_trie;
63+
};
64+
65+
LIBC_INLINE void FreeStore::insert(Block<> *block) {
66+
if (too_small(block))
67+
return;
68+
if (is_small(block))
69+
small_list(block).push(block);
70+
else
71+
large_trie.push(block);
72+
}
73+
74+
LIBC_INLINE void FreeStore::remove(Block<> *block) {
75+
if (too_small(block))
76+
return;
77+
if (is_small(block)) {
78+
small_list(block).remove(
79+
reinterpret_cast<FreeList::Node *>(block->usable_space()));
80+
} else {
81+
large_trie.remove(
82+
reinterpret_cast<FreeTrie::Node *>(block->usable_space()));
83+
}
84+
}
85+
86+
LIBC_INLINE Block<> *FreeStore::remove_best_fit(size_t size) {
87+
if (FreeList *list = find_best_small_fit(size)) {
88+
Block<> *block = list->front();
89+
list->pop();
90+
return block;
91+
}
92+
if (FreeTrie::Node *best_fit = large_trie.find_best_fit(size)) {
93+
Block<> *block = best_fit->block();
94+
large_trie.remove(best_fit);
95+
return block;
96+
}
97+
return nullptr;
98+
}
99+
100+
LIBC_INLINE FreeList &FreeStore::small_list(Block<> *block) {
101+
LIBC_ASSERT(is_small(block) && "only legal for small blocks");
102+
return small_lists[(block->outer_size() - MIN_OUTER_SIZE) / ALIGNMENT];
103+
}
104+
105+
LIBC_INLINE FreeList *FreeStore::find_best_small_fit(size_t size) {
106+
for (FreeList &list : small_lists)
107+
if (!list.empty() && list.size() >= size)
108+
return &list;
109+
return nullptr;
110+
}
111+
112+
} // namespace LIBC_NAMESPACE_DECL
113+
114+
#endif // LLVM_LIBC_SRC___SUPPORT_FREESTORE_H

‎libc/src/__support/freetrie.cpp

Lines changed: 64 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,64 @@
1+
//===-- Implementation for freetrie ---------------------------------------===//
2+
//
3+
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4+
// See https://llvm.org/LICENSE.txt for license information.
5+
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6+
//
7+
//===----------------------------------------------------------------------===//
8+
9+
#include "freetrie.h"
10+
11+
namespace LIBC_NAMESPACE_DECL {
12+
13+
void FreeTrie::remove(Node *node) {
14+
LIBC_ASSERT(!empty() && "cannot remove from empty trie");
15+
FreeList list = node;
16+
list.pop();
17+
Node *new_node = static_cast<Node *>(list.begin());
18+
if (!new_node) {
19+
// The freelist is empty. Replace the subtrie root with an arbitrary leaf.
20+
// This is legal because there is no relationship between the size of the
21+
// root and its children.
22+
Node *leaf = node;
23+
while (leaf->lower || leaf->upper)
24+
leaf = leaf->lower ? leaf->lower : leaf->upper;
25+
if (leaf == node) {
26+
// If the root is a leaf, then removing it empties the subtrie.
27+
replace_node(node, nullptr);
28+
return;
29+
}
30+
31+
replace_node(leaf, nullptr);
32+
new_node = leaf;
33+
}
34+
35+
if (!is_head(node))
36+
return;
37+
38+
// Copy the trie links to the new head.
39+
new_node->lower = node->lower;
40+
new_node->upper = node->upper;
41+
new_node->parent = node->parent;
42+
replace_node(node, new_node);
43+
}
44+
45+
void FreeTrie::replace_node(Node *node, Node *new_node) {
46+
LIBC_ASSERT(is_head(node) && "only head nodes contain trie links");
47+
48+
if (node->parent) {
49+
Node *&parent_child =
50+
node->parent->lower == node ? node->parent->lower : node->parent->upper;
51+
LIBC_ASSERT(parent_child == node &&
52+
"no reference to child node found in parent");
53+
parent_child = new_node;
54+
} else {
55+
LIBC_ASSERT(root == node && "non-root node had no parent");
56+
root = new_node;
57+
}
58+
if (node->lower)
59+
node->lower->parent = new_node;
60+
if (node->upper)
61+
node->upper->parent = new_node;
62+
}
63+
64+
} // namespace LIBC_NAMESPACE_DECL

‎libc/src/__support/freetrie.h

Lines changed: 237 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,237 @@
1+
//===-- Interface for freetrie --------------------------------------------===//
2+
//
3+
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4+
// See https://llvm.org/LICENSE.txt for license information.
5+
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6+
//
7+
//===----------------------------------------------------------------------===//
8+
9+
#ifndef LLVM_LIBC_SRC___SUPPORT_FREETRIE_H
10+
#define LLVM_LIBC_SRC___SUPPORT_FREETRIE_H
11+
12+
#include "freelist.h"
13+
14+
namespace LIBC_NAMESPACE_DECL {
15+
16+
/// A trie of free lists.
17+
///
18+
/// This is an unusual little data structure originally from Doug Lea's malloc.
19+
/// Finding the best fit from a set of differently-sized free list typically
20+
/// required some kind of ordered map, and these are typically implemented using
21+
/// a self-balancing binary search tree. Those are notorious for having a
22+
/// relatively large number of special cases, while this trie has relatively
23+
/// few, which helps with code size.
24+
///
25+
/// Operations on the trie are logarithmic not on the number of nodes within it,
26+
/// but rather the fixed range of possible sizes that the trie can contain. This
27+
/// means that the data structure would likely actually perform worse than an
28+
/// e.g. red-black tree, but its implementation is still much simpler.
29+
///
30+
/// Each trie node's children subdivide the range of possible sizes into two
31+
/// halves: a lower and an upper. The node itself holds a free list of some size
32+
/// within its range. This makes it possible to summarily replace any node with
33+
/// any leaf within its subtrie, which makes it very straightforward to remove a
34+
/// node. Insertion is also simple; the only real complexity lies with finding
35+
/// the best fit. This can still be done in logarithmic time with only a few
36+
/// cases to consider.
37+
///
38+
/// The trie refers to, but does not own, the Nodes that comprise it.
39+
class FreeTrie {
40+
public:
41+
/// A trie node that is also a free list. Only the head node of each list is
42+
/// actually part of the trie. The subtrie contains a continous SizeRange of
43+
/// free lists. The lower and upper subtrie's contain the lower and upper half
44+
/// of the subtries range. There is no direct relationship between the size of
45+
/// this node's free list and the contents of the lower and upper subtries.
46+
class Node : public FreeList::Node {
47+
/// The child subtrie covering the lower half of this subtrie's size range.
48+
/// Undefined if this is not the head of the list.
49+
Node *lower;
50+
/// The child subtrie covering the upper half of this subtrie's size range.
51+
/// Undefined if this is not the head of the list.
52+
Node *upper;
53+
/// The parent subtrie. nullptr if this is the root or not the head of the
54+
/// list.
55+
Node *parent;
56+
57+
friend class FreeTrie;
58+
};
59+
60+
/// Power-of-two range of sizes covered by a subtrie.
61+
struct SizeRange {
62+
size_t min;
63+
size_t width;
64+
65+
LIBC_INLINE constexpr SizeRange(size_t min, size_t width)
66+
: min(min), width(width) {
67+
LIBC_ASSERT(!(width & (width - 1)) && "width must be a power of two");
68+
}
69+
70+
/// @returns The lower half of the size range.
71+
LIBC_INLINE SizeRange lower() const { return {min, width / 2}; }
72+
73+
/// @returns The upper half of the size range.
74+
LIBC_INLINE SizeRange upper() const { return {min + width / 2, width / 2}; }
75+
76+
/// @returns The largest size in this range.
77+
LIBC_INLINE size_t max() const { return min + (width - 1); }
78+
79+
/// @returns Whether the range contains the given size.
80+
LIBC_INLINE bool contains(size_t size) const {
81+
return min <= size && size < min + width;
82+
}
83+
};
84+
85+
LIBC_INLINE constexpr FreeTrie() : FreeTrie(SizeRange{0, 0}) {}
86+
LIBC_INLINE constexpr FreeTrie(SizeRange range) : range(range) {}
87+
88+
/// Sets the range of possible block sizes. This can only be called when the
89+
/// trie is empty.
90+
LIBC_INLINE void set_range(FreeTrie::SizeRange range) {
91+
LIBC_ASSERT(empty() && "cannot change the range of a preexisting trie");
92+
this->range = range;
93+
}
94+
95+
/// @returns Whether the trie contains any blocks.
96+
LIBC_INLINE bool empty() const { return !root; }
97+
98+
/// Push a block to the trie.
99+
void push(Block<> *block);
100+
101+
/// Remove a node from this trie node's free list.
102+
void remove(Node *node);
103+
104+
/// @returns A smallest node that can allocate the given size; otherwise
105+
/// nullptr.
106+
Node *find_best_fit(size_t size);
107+
108+
private:
109+
/// @returns Whether a node is the head of its containing freelist.
110+
bool is_head(Node *node) const { return node->parent || node == root; }
111+
112+
/// Replaces references to one node with another (or nullptr) in all adjacent
113+
/// parent and child nodes.
114+
void replace_node(Node *node, Node *new_node);
115+
116+
Node *root = nullptr;
117+
SizeRange range;
118+
};
119+
120+
LIBC_INLINE void FreeTrie::push(Block<> *block) {
121+
LIBC_ASSERT(block->inner_size_free() >= sizeof(Node) &&
122+
"block too small to accomodate free trie node");
123+
size_t size = block->inner_size();
124+
LIBC_ASSERT(range.contains(size) && "requested size out of trie range");
125+
126+
// Find the position in the tree to push to.
127+
Node **cur = &root;
128+
Node *parent = nullptr;
129+
SizeRange cur_range = range;
130+
while (*cur && (*cur)->size() != size) {
131+
LIBC_ASSERT(cur_range.contains(size) && "requested size out of trie range");
132+
parent = *cur;
133+
if (size <= cur_range.lower().max()) {
134+
cur = &(*cur)->lower;
135+
cur_range = cur_range.lower();
136+
} else {
137+
cur = &(*cur)->upper;
138+
cur_range = cur_range.upper();
139+
}
140+
}
141+
142+
Node *node = new (block->usable_space()) Node;
143+
FreeList list = *cur;
144+
if (list.empty()) {
145+
node->parent = parent;
146+
node->lower = node->upper = nullptr;
147+
} else {
148+
node->parent = nullptr;
149+
}
150+
list.push(node);
151+
*cur = static_cast<Node *>(list.begin());
152+
}
153+
154+
LIBC_INLINE FreeTrie::Node *FreeTrie::find_best_fit(size_t size) {
155+
if (empty() || range.max() < size)
156+
return nullptr;
157+
158+
Node *cur = root;
159+
SizeRange cur_range = range;
160+
Node *best_fit = nullptr;
161+
Node *deferred_upper_trie = nullptr;
162+
FreeTrie::SizeRange deferred_upper_range{0, 0};
163+
164+
while (true) {
165+
LIBC_ASSERT(cur_range.contains(cur->size()) &&
166+
"trie node size out of range");
167+
LIBC_ASSERT(cur_range.max() >= size &&
168+
"range could not fit requested size");
169+
LIBC_ASSERT((!best_fit || cur_range.min < best_fit->size()) &&
170+
"range could not contain a best fit");
171+
172+
// If the current node is an exact fit, it is a best fit.
173+
if (cur->size() == size)
174+
return cur;
175+
176+
if (cur->size() > size && (!best_fit || cur->size() < best_fit->size())) {
177+
// The current node is a better fit.
178+
best_fit = cur;
179+
180+
// If there is a deferred upper subtrie, then the current node is
181+
// somewhere in its lower sibling subtrie. That means that the new best
182+
// fit is better than the best fit in the deferred subtrie.
183+
LIBC_ASSERT(
184+
(!deferred_upper_trie ||
185+
deferred_upper_range.min > best_fit->size()) &&
186+
"deferred upper subtrie should be outclassed by new best fit");
187+
deferred_upper_trie = nullptr;
188+
}
189+
190+
// Determine which subtries might contain the best fit.
191+
bool lower_impossible = !cur->lower || cur_range.lower().max() < size;
192+
bool upper_impossible =
193+
!cur->upper ||
194+
// If every node in the lower trie fits
195+
(!lower_impossible && cur_range.min >= size) ||
196+
// If every node in the upper trie is worse than the current best
197+
(best_fit && cur_range.upper().min >= best_fit->size());
198+
199+
if (lower_impossible && upper_impossible) {
200+
if (!deferred_upper_trie)
201+
return best_fit;
202+
// Scan the deferred upper subtrie and consider whether any element within
203+
// provides a better fit.
204+
//
205+
// This can only ever be reached once. In a deferred upper subtrie, every
206+
// node fits, so the higher of two subtries can never contain a best fit.
207+
cur = deferred_upper_trie;
208+
cur_range = deferred_upper_range;
209+
deferred_upper_trie = nullptr;
210+
continue;
211+
}
212+
213+
if (lower_impossible) {
214+
cur = cur->upper;
215+
cur_range = cur_range.upper();
216+
} else if (upper_impossible) {
217+
cur = cur->lower;
218+
cur_range = cur_range.lower();
219+
} else {
220+
// Both subtries might contain a better fit. Any fit in the lower subtrie
221+
// is better than the any fit in the upper subtrie, so scan the lower
222+
// and return to the upper only if no better fits were found. (Any better
223+
// fit found clears the deferred upper subtrie.)
224+
LIBC_ASSERT((!deferred_upper_trie ||
225+
cur_range.upper().max() < deferred_upper_range.min) &&
226+
"old deferred upper subtrie should be outclassed by new");
227+
deferred_upper_trie = cur->upper;
228+
deferred_upper_range = cur_range.upper();
229+
cur = cur->lower;
230+
cur_range = cur_range.lower();
231+
}
232+
}
233+
}
234+
235+
} // namespace LIBC_NAMESPACE_DECL
236+
237+
#endif // LLVM_LIBC_SRC___SUPPORT_FREETRIE_H

‎libc/src/stdlib/freelist_malloc.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -18,8 +18,8 @@
1818

1919
namespace LIBC_NAMESPACE_DECL {
2020

21-
static LIBC_CONSTINIT FreeListHeap<> freelist_heap_symbols;
22-
FreeListHeap<> *freelist_heap = &freelist_heap_symbols;
21+
static LIBC_CONSTINIT FreeListHeap freelist_heap_symbols;
22+
FreeListHeap *freelist_heap = &freelist_heap_symbols;
2323

2424
LLVM_LIBC_FUNCTION(void *, malloc, (size_t size)) {
2525
return freelist_heap->allocate(size);

‎libc/test/src/__support/CMakeLists.txt

Lines changed: 27 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,34 @@ if(NOT LIBC_TARGET_OS_IS_GPU)
2424
DEPENDS
2525
libc.src.__support.CPP.array
2626
libc.src.__support.CPP.span
27+
libc.src.__support.block
28+
libc.src.__support.freelist
29+
)
30+
31+
add_libc_test(
32+
freetrie_test
33+
SUITE
34+
libc-support-tests
35+
SRCS
36+
freetrie_test.cpp
37+
DEPENDS
38+
libc.src.__support.CPP.optional
39+
libc.src.__support.block
40+
libc.src.__support.freetrie
41+
)
42+
43+
add_libc_test(
44+
freestore_test
45+
SUITE
46+
libc-support-tests
47+
SRCS
48+
freestore_test.cpp
49+
DEPENDS
50+
libc.src.__support.CPP.optional
51+
libc.src.__support.block
2752
libc.src.__support.freelist
53+
libc.src.__support.freestore
54+
libc.src.__support.freetrie
2855
)
2956
endif()
3057

‎libc/test/src/__support/block_test.cpp

Lines changed: 0 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -238,20 +238,6 @@ TEST_FOR_EACH_BLOCK_TYPE(CannotMakeSecondBlockLargerInSplit) {
238238
ASSERT_FALSE(result.has_value());
239239
}
240240

241-
TEST_FOR_EACH_BLOCK_TYPE(CannotMakeZeroSizeFirstBlock) {
242-
// This block doesn't support splitting with zero payload size, since the
243-
// prev_ field of the next block is always available.
244-
constexpr size_t kN = 1024;
245-
246-
alignas(BlockType::ALIGNMENT) array<byte, kN> bytes;
247-
auto result = BlockType::init(bytes);
248-
ASSERT_TRUE(result.has_value());
249-
BlockType *block = *result;
250-
251-
result = block->split(0);
252-
EXPECT_FALSE(result.has_value());
253-
}
254-
255241
TEST_FOR_EACH_BLOCK_TYPE(CanMakeMinimalSizeFirstBlock) {
256242
// This block does support splitting with minimal payload size.
257243
constexpr size_t kN = 1024;

‎libc/test/src/__support/freelist_heap_test.cpp

Lines changed: 25 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -13,9 +13,12 @@
1313
#include "src/string/memcpy.h"
1414
#include "test/UnitTest/Test.h"
1515

16-
namespace LIBC_NAMESPACE_DECL {
17-
16+
using LIBC_NAMESPACE::Block;
1817
using LIBC_NAMESPACE::freelist_heap;
18+
using LIBC_NAMESPACE::FreeListHeap;
19+
using LIBC_NAMESPACE::FreeListHeapBuffer;
20+
using LIBC_NAMESPACE::cpp::byte;
21+
using LIBC_NAMESPACE::cpp::span;
1922

2023
// Similar to `LlvmLibcBlockTest` in block_test.cpp, we'd like to run the same
2124
// tests independently for different parameters. In this case, we'd like to test
@@ -28,23 +31,23 @@ using LIBC_NAMESPACE::freelist_heap;
2831
// made in tests leak and aren't free'd. This is fine for the purposes of this
2932
// test file.
3033
#define TEST_FOR_EACH_ALLOCATOR(TestCase, BufferSize) \
31-
class LlvmLibcFreeListHeapTest##TestCase : public testing::Test { \
34+
class LlvmLibcFreeListHeapTest##TestCase \
35+
: public LIBC_NAMESPACE::testing::Test { \
3236
public: \
3337
FreeListHeapBuffer<BufferSize> fake_global_buffer; \
3438
void SetUp() override { \
3539
freelist_heap = \
3640
new (&fake_global_buffer) FreeListHeapBuffer<BufferSize>; \
3741
} \
38-
void RunTest(FreeListHeap<> &allocator, [[maybe_unused]] size_t N); \
42+
void RunTest(FreeListHeap &allocator, [[maybe_unused]] size_t N); \
3943
}; \
4044
TEST_F(LlvmLibcFreeListHeapTest##TestCase, TestCase) { \
41-
alignas(FreeListHeap<>::BlockType) \
42-
cpp::byte buf[BufferSize] = {cpp::byte(0)}; \
43-
FreeListHeap<> allocator(buf); \
45+
alignas(Block<>) byte buf[BufferSize] = {byte(0)}; \
46+
FreeListHeap allocator(buf); \
4447
RunTest(allocator, BufferSize); \
4548
RunTest(*freelist_heap, freelist_heap->region().size()); \
4649
} \
47-
void LlvmLibcFreeListHeapTest##TestCase::RunTest(FreeListHeap<> &allocator, \
50+
void LlvmLibcFreeListHeapTest##TestCase::RunTest(FreeListHeap &allocator, \
4851
size_t N)
4952

5053
TEST_FOR_EACH_ALLOCATOR(CanAllocate, 2048) {
@@ -92,14 +95,13 @@ TEST_FOR_EACH_ALLOCATOR(ReturnsNullWhenAllocationTooLarge, 2048) {
9295
// is used for other test cases and we don't explicitly free them.
9396
TEST(LlvmLibcFreeListHeap, ReturnsNullWhenFull) {
9497
constexpr size_t N = 2048;
95-
alignas(FreeListHeap<>::BlockType) cpp::byte buf[N] = {cpp::byte(0)};
98+
alignas(Block<>) byte buf[N] = {byte(0)};
9699

97-
FreeListHeap<> allocator(buf);
100+
FreeListHeap allocator(buf);
98101

99102
// Use aligned_allocate so we don't need to worry about ensuring the `buf`
100103
// being aligned to max_align_t.
101-
EXPECT_NE(allocator.aligned_allocate(
102-
1, N - 2 * FreeListHeap<>::BlockType::BLOCK_OVERHEAD),
104+
EXPECT_NE(allocator.aligned_allocate(1, N - 2 * Block<>::BLOCK_OVERHEAD),
103105
static_cast<void *>(nullptr));
104106
EXPECT_EQ(allocator.allocate(1), static_cast<void *>(nullptr));
105107
}
@@ -134,9 +136,9 @@ TEST_FOR_EACH_ALLOCATOR(ReallocHasSameContent, 2048) {
134136
constexpr size_t ALLOC_SIZE = sizeof(int);
135137
constexpr size_t kNewAllocSize = sizeof(int) * 2;
136138
// Data inside the allocated block.
137-
cpp::byte data1[ALLOC_SIZE];
139+
byte data1[ALLOC_SIZE];
138140
// Data inside the reallocated block.
139-
cpp::byte data2[ALLOC_SIZE];
141+
byte data2[ALLOC_SIZE];
140142

141143
int *ptr1 = reinterpret_cast<int *>(allocator.allocate(ALLOC_SIZE));
142144
*ptr1 = 42;
@@ -188,10 +190,9 @@ TEST_FOR_EACH_ALLOCATOR(CanCalloc, 2048) {
188190
constexpr size_t ALLOC_SIZE = 128;
189191
constexpr size_t NUM = 4;
190192
constexpr int size = NUM * ALLOC_SIZE;
191-
constexpr cpp::byte zero{0};
193+
constexpr byte zero{0};
192194

193-
cpp::byte *ptr1 =
194-
reinterpret_cast<cpp::byte *>(allocator.calloc(NUM, ALLOC_SIZE));
195+
byte *ptr1 = reinterpret_cast<byte *>(allocator.calloc(NUM, ALLOC_SIZE));
195196

196197
// calloc'd content is zero.
197198
for (int i = 0; i < size; i++) {
@@ -203,10 +204,9 @@ TEST_FOR_EACH_ALLOCATOR(CanCallocWeirdSize, 2048) {
203204
constexpr size_t ALLOC_SIZE = 143;
204205
constexpr size_t NUM = 3;
205206
constexpr int size = NUM * ALLOC_SIZE;
206-
constexpr cpp::byte zero{0};
207+
constexpr byte zero{0};
207208

208-
cpp::byte *ptr1 =
209-
reinterpret_cast<cpp::byte *>(allocator.calloc(NUM, ALLOC_SIZE));
209+
byte *ptr1 = reinterpret_cast<byte *>(allocator.calloc(NUM, ALLOC_SIZE));
210210

211211
// calloc'd content is zero.
212212
for (int i = 0; i < size; i++) {
@@ -241,17 +241,16 @@ TEST_FOR_EACH_ALLOCATOR(AlignedAlloc, 2048) {
241241

242242
// This test is not part of the TEST_FOR_EACH_ALLOCATOR since we want to
243243
// explicitly ensure that the buffer can still return aligned allocations even
244-
// if the underlying buffer is at most aligned to the BlockType alignment. This
244+
// if the underlying buffer is at most aligned to the Block<> alignment. This
245245
// is so we can check that we can still get aligned allocations even if the
246246
// underlying buffer is not aligned to the alignments we request.
247-
TEST(LlvmLibcFreeListHeap, AlignedAllocOnlyBlockTypeAligned) {
247+
TEST(LlvmLibcFreeListHeap, AlignedAllocOnlyBlockAligned) {
248248
constexpr size_t BUFFER_SIZE = 4096;
249-
constexpr size_t BUFFER_ALIGNMENT = alignof(FreeListHeap<>::BlockType) * 2;
250-
alignas(BUFFER_ALIGNMENT) cpp::byte buf[BUFFER_SIZE] = {cpp::byte(0)};
249+
constexpr size_t BUFFER_ALIGNMENT = alignof(Block<>) * 2;
250+
alignas(BUFFER_ALIGNMENT) byte buf[BUFFER_SIZE] = {byte(0)};
251251

252252
// Ensure the underlying buffer is at most aligned to the block type.
253-
FreeListHeap<> allocator(
254-
span<cpp::byte>(buf).subspan(alignof(FreeListHeap<>::BlockType)));
253+
FreeListHeap allocator(span<byte>(buf).subspan(alignof(Block<>)));
255254

256255
constexpr size_t ALIGNMENTS[] = {1, 2, 4, 8, 16, 32, 64, 128, 256};
257256
constexpr size_t SIZE_SCALES[] = {1, 2, 3, 4, 5};
@@ -289,5 +288,3 @@ TEST_FOR_EACH_ALLOCATOR(InvalidAlignedAllocAlignment, 2048) {
289288
ptr = allocator.aligned_allocate(0, 8);
290289
EXPECT_EQ(ptr, static_cast<void *>(nullptr));
291290
}
292-
293-
} // namespace LIBC_NAMESPACE_DECL

‎libc/test/src/__support/freelist_malloc_test.cpp

Lines changed: 5 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,7 @@
1313
#include "src/stdlib/malloc.h"
1414
#include "test/UnitTest/Test.h"
1515

16+
using LIBC_NAMESPACE::Block;
1617
using LIBC_NAMESPACE::freelist_heap;
1718
using LIBC_NAMESPACE::FreeListHeap;
1819
using LIBC_NAMESPACE::FreeListHeapBuffer;
@@ -22,15 +23,13 @@ TEST(LlvmLibcFreeListMalloc, Malloc) {
2223
constexpr size_t kCallocNum = 4;
2324
constexpr size_t kCallocSize = 64;
2425

25-
typedef FreeListHeap<>::BlockType Block;
26-
2726
void *ptr1 = LIBC_NAMESPACE::malloc(kAllocSize);
28-
auto *block = Block::from_usable_space(ptr1);
27+
auto *block = Block<>::from_usable_space(ptr1);
2928
EXPECT_GE(block->inner_size(), kAllocSize);
3029

3130
LIBC_NAMESPACE::free(ptr1);
32-
ASSERT_NE(block->next(), static_cast<Block *>(nullptr));
33-
ASSERT_EQ(block->next()->next(), static_cast<Block *>(nullptr));
31+
ASSERT_NE(block->next(), static_cast<Block<> *>(nullptr));
32+
ASSERT_EQ(block->next()->next(), static_cast<Block<> *>(nullptr));
3433
size_t heap_size = block->inner_size();
3534

3635
void *ptr2 = LIBC_NAMESPACE::calloc(kCallocNum, kCallocSize);
@@ -47,7 +46,7 @@ TEST(LlvmLibcFreeListMalloc, Malloc) {
4746
void *ptr3 = LIBC_NAMESPACE::aligned_alloc(ALIGN, kAllocSize);
4847
EXPECT_NE(ptr3, static_cast<void *>(nullptr));
4948
EXPECT_EQ(reinterpret_cast<uintptr_t>(ptr3) % ALIGN, size_t(0));
50-
auto *aligned_block = reinterpret_cast<Block *>(ptr3);
49+
auto *aligned_block = reinterpret_cast<Block<> *>(ptr3);
5150
EXPECT_GE(aligned_block->inner_size(), kAllocSize);
5251

5352
LIBC_NAMESPACE::free(ptr3);

‎libc/test/src/__support/freelist_test.cpp

Lines changed: 37 additions & 150 deletions
Original file line numberDiff line numberDiff line change
@@ -8,159 +8,46 @@
88

99
#include <stddef.h>
1010

11-
#include "src/__support/CPP/array.h"
12-
#include "src/__support/CPP/span.h"
1311
#include "src/__support/freelist.h"
1412
#include "test/UnitTest/Test.h"
1513

14+
using LIBC_NAMESPACE::Block;
1615
using LIBC_NAMESPACE::FreeList;
17-
using LIBC_NAMESPACE::cpp::array;
1816
using LIBC_NAMESPACE::cpp::byte;
19-
using LIBC_NAMESPACE::cpp::span;
20-
21-
static constexpr size_t SIZE = 8;
22-
static constexpr array<size_t, SIZE> example_sizes = {64, 128, 256, 512,
23-
1024, 2048, 4096, 8192};
24-
25-
TEST(LlvmLibcFreeList, EmptyListHasNoMembers) {
26-
FreeList<SIZE> list(example_sizes);
27-
28-
auto item = list.find_chunk(4);
29-
EXPECT_EQ(item.size(), static_cast<size_t>(0));
30-
item = list.find_chunk(128);
31-
EXPECT_EQ(item.size(), static_cast<size_t>(0));
32-
}
33-
34-
TEST(LlvmLibcFreeList, CanRetrieveAddedMember) {
35-
FreeList<SIZE> list(example_sizes);
36-
constexpr size_t N = 512;
37-
38-
byte data[N] = {byte(0)};
39-
40-
bool ok = list.add_chunk(span<byte>(data, N));
41-
EXPECT_TRUE(ok);
42-
43-
auto item = list.find_chunk(N);
44-
EXPECT_EQ(item.size(), N);
45-
EXPECT_EQ(item.data(), data);
46-
}
47-
48-
TEST(LlvmLibcFreeList, CanRetrieveAddedMemberForSmallerSize) {
49-
FreeList<SIZE> list(example_sizes);
50-
constexpr size_t N = 512;
51-
52-
byte data[N] = {byte(0)};
53-
54-
ASSERT_TRUE(list.add_chunk(span<byte>(data, N)));
55-
auto item = list.find_chunk(N / 2);
56-
EXPECT_EQ(item.size(), N);
57-
EXPECT_EQ(item.data(), data);
58-
}
59-
60-
TEST(LlvmLibcFreeList, CanRemoveItem) {
61-
FreeList<SIZE> list(example_sizes);
62-
constexpr size_t N = 512;
63-
64-
byte data[N] = {byte(0)};
65-
66-
ASSERT_TRUE(list.add_chunk(span<byte>(data, N)));
67-
EXPECT_TRUE(list.remove_chunk(span<byte>(data, N)));
68-
69-
auto item = list.find_chunk(N);
70-
EXPECT_EQ(item.size(), static_cast<size_t>(0));
71-
}
72-
73-
TEST(LlvmLibcFreeList, FindReturnsSmallestChunk) {
74-
FreeList<SIZE> list(example_sizes);
75-
constexpr size_t kN1 = 512;
76-
constexpr size_t kN2 = 1024;
77-
78-
byte data1[kN1] = {byte(0)};
79-
byte data2[kN2] = {byte(0)};
80-
81-
ASSERT_TRUE(list.add_chunk(span<byte>(data1, kN1)));
82-
ASSERT_TRUE(list.add_chunk(span<byte>(data2, kN2)));
83-
84-
auto chunk = list.find_chunk(kN1 / 2);
85-
EXPECT_EQ(chunk.size(), kN1);
86-
EXPECT_EQ(chunk.data(), data1);
87-
88-
chunk = list.find_chunk(kN1);
89-
EXPECT_EQ(chunk.size(), kN1);
90-
EXPECT_EQ(chunk.data(), data1);
91-
92-
chunk = list.find_chunk(kN1 + 1);
93-
EXPECT_EQ(chunk.size(), kN2);
94-
EXPECT_EQ(chunk.data(), data2);
95-
}
96-
97-
TEST(LlvmLibcFreeList, FindReturnsCorrectChunkInSameBucket) {
98-
// If we have two values in the same bucket, ensure that the allocation will
99-
// pick an appropriately sized one.
100-
FreeList<SIZE> list(example_sizes);
101-
constexpr size_t kN1 = 512;
102-
constexpr size_t kN2 = 257;
103-
104-
byte data1[kN1] = {byte(0)};
105-
byte data2[kN2] = {byte(0)};
106-
107-
// List should now be 257 -> 512 -> NULL
108-
ASSERT_TRUE(list.add_chunk(span<byte>(data1, kN1)));
109-
ASSERT_TRUE(list.add_chunk(span<byte>(data2, kN2)));
110-
111-
auto chunk = list.find_chunk(kN2 + 1);
112-
EXPECT_EQ(chunk.size(), kN1);
113-
}
114-
115-
TEST(LlvmLibcFreeList, FindCanMoveUpThroughBuckets) {
116-
// Ensure that finding a chunk will move up through buckets if no appropriate
117-
// chunks were found in a given bucket
118-
FreeList<SIZE> list(example_sizes);
119-
constexpr size_t kN1 = 257;
120-
constexpr size_t kN2 = 513;
121-
122-
byte data1[kN1] = {byte(0)};
123-
byte data2[kN2] = {byte(0)};
124-
125-
// List should now be:
126-
// bkt[3] (257 bytes up to 512 bytes) -> 257 -> NULL
127-
// bkt[4] (513 bytes up to 1024 bytes) -> 513 -> NULL
128-
ASSERT_TRUE(list.add_chunk(span<byte>(data1, kN1)));
129-
ASSERT_TRUE(list.add_chunk(span<byte>(data2, kN2)));
130-
131-
// Request a 300 byte chunk. This should return the 513 byte one
132-
auto chunk = list.find_chunk(kN1 + 1);
133-
EXPECT_EQ(chunk.size(), kN2);
134-
}
135-
136-
TEST(LlvmLibcFreeList, RemoveUnknownChunkReturnsNotFound) {
137-
FreeList<SIZE> list(example_sizes);
138-
constexpr size_t N = 512;
139-
140-
byte data[N] = {byte(0)};
141-
byte data2[N] = {byte(0)};
142-
143-
ASSERT_TRUE(list.add_chunk(span<byte>(data, N)));
144-
EXPECT_FALSE(list.remove_chunk(span<byte>(data2, N)));
145-
}
146-
147-
TEST(LlvmLibcFreeList, CanStoreMultipleChunksPerBucket) {
148-
FreeList<SIZE> list(example_sizes);
149-
constexpr size_t N = 512;
150-
151-
byte data1[N] = {byte(0)};
152-
byte data2[N] = {byte(0)};
153-
154-
ASSERT_TRUE(list.add_chunk(span<byte>(data1, N)));
155-
ASSERT_TRUE(list.add_chunk(span<byte>(data2, N)));
156-
157-
auto chunk1 = list.find_chunk(N);
158-
ASSERT_TRUE(list.remove_chunk(chunk1));
159-
auto chunk2 = list.find_chunk(N);
160-
ASSERT_TRUE(list.remove_chunk(chunk2));
161-
162-
// Ordering of the chunks doesn't matter
163-
EXPECT_TRUE(chunk1.data() != chunk2.data());
164-
EXPECT_TRUE(chunk1.data() == data1 || chunk1.data() == data2);
165-
EXPECT_TRUE(chunk2.data() == data1 || chunk2.data() == data2);
17+
using LIBC_NAMESPACE::cpp::optional;
18+
19+
TEST(LlvmLibcFreeList, FreeList) {
20+
byte mem[1024];
21+
optional<Block<> *> maybeBlock = Block<>::init(mem);
22+
ASSERT_TRUE(maybeBlock.has_value());
23+
Block<> *block1 = *maybeBlock;
24+
25+
maybeBlock = block1->split(128);
26+
ASSERT_TRUE(maybeBlock.has_value());
27+
Block<> *block2 = *maybeBlock;
28+
29+
maybeBlock = block2->split(128);
30+
ASSERT_TRUE(maybeBlock.has_value());
31+
32+
FreeList list;
33+
list.push(block1);
34+
ASSERT_FALSE(list.empty());
35+
EXPECT_EQ(list.front(), block1);
36+
37+
list.push(block2);
38+
EXPECT_EQ(list.front(), block1);
39+
40+
list.pop();
41+
ASSERT_FALSE(list.empty());
42+
EXPECT_EQ(list.front(), block2);
43+
44+
list.pop();
45+
ASSERT_TRUE(list.empty());
46+
47+
list.push(block1);
48+
list.push(block2);
49+
list.remove(reinterpret_cast<FreeList::Node *>(block2->usable_space()));
50+
EXPECT_EQ(list.front(), block1);
51+
list.pop();
52+
ASSERT_TRUE(list.empty());
16653
}
Lines changed: 107 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,107 @@
1+
//===-- Unittests for a freestore -------------------------------*- C++ -*-===//
2+
//
3+
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4+
// See https://llvm.org/LICENSE.txt for license information.
5+
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6+
//
7+
//===----------------------------------------------------------------------===//
8+
9+
#include <stddef.h>
10+
11+
#include "src/__support/freestore.h"
12+
#include "test/UnitTest/Test.h"
13+
14+
using LIBC_NAMESPACE::Block;
15+
using LIBC_NAMESPACE::FreeList;
16+
using LIBC_NAMESPACE::FreeStore;
17+
using LIBC_NAMESPACE::FreeTrie;
18+
using LIBC_NAMESPACE::cpp::byte;
19+
using LIBC_NAMESPACE::cpp::optional;
20+
21+
// Inserting or removing blocks too small to be tracked does nothing.
22+
TEST(LlvmLibcFreeStore, TooSmall) {
23+
byte mem[1024];
24+
optional<Block<> *> maybeBlock = Block<>::init(mem);
25+
ASSERT_TRUE(maybeBlock.has_value());
26+
Block<> *too_small = *maybeBlock;
27+
maybeBlock = too_small->split(sizeof(Block<>::offset_type));
28+
ASSERT_TRUE(maybeBlock.has_value());
29+
Block<> *remainder = *maybeBlock;
30+
31+
FreeStore store;
32+
store.set_range({0, 4096});
33+
store.insert(too_small);
34+
store.insert(remainder);
35+
36+
EXPECT_EQ(store.remove_best_fit(too_small->inner_size()), remainder);
37+
store.remove(too_small);
38+
}
39+
40+
TEST(LlvmLibcFreeStore, RemoveBestFit) {
41+
byte mem[1024];
42+
optional<Block<> *> maybeBlock = Block<>::init(mem);
43+
ASSERT_TRUE(maybeBlock.has_value());
44+
45+
Block<> *smallest = *maybeBlock;
46+
maybeBlock =
47+
smallest->split(sizeof(FreeList::Node) + sizeof(Block<>::offset_type));
48+
ASSERT_TRUE(maybeBlock.has_value());
49+
50+
Block<> *largest_small = *maybeBlock;
51+
maybeBlock =
52+
largest_small->split(sizeof(FreeTrie::Node) +
53+
sizeof(Block<>::offset_type) - alignof(max_align_t));
54+
ASSERT_TRUE(maybeBlock.has_value());
55+
if (largest_small->inner_size() == smallest->inner_size())
56+
largest_small = smallest;
57+
ASSERT_GE(largest_small->inner_size(), smallest->inner_size());
58+
59+
Block<> *remainder = *maybeBlock;
60+
61+
FreeStore store;
62+
store.set_range({0, 4096});
63+
store.insert(smallest);
64+
if (largest_small != smallest)
65+
store.insert(largest_small);
66+
store.insert(remainder);
67+
68+
// Find exact match for smallest.
69+
ASSERT_EQ(store.remove_best_fit(smallest->inner_size()), smallest);
70+
store.insert(smallest);
71+
72+
// Find exact match for largest.
73+
ASSERT_EQ(store.remove_best_fit(largest_small->inner_size()), largest_small);
74+
store.insert(largest_small);
75+
76+
// Search smallest for best fit.
77+
ASSERT_EQ(store.remove_best_fit(smallest->inner_size() + 1), largest_small);
78+
store.insert(largest_small);
79+
80+
// Continue search for best fit to large blocks.
81+
EXPECT_EQ(store.remove_best_fit(largest_small->inner_size() + 1), remainder);
82+
}
83+
84+
TEST(LlvmLibcFreeStore, Remove) {
85+
byte mem[1024];
86+
optional<Block<> *> maybeBlock = Block<>::init(mem);
87+
ASSERT_TRUE(maybeBlock.has_value());
88+
89+
Block<> *small = *maybeBlock;
90+
maybeBlock =
91+
small->split(sizeof(FreeList::Node) + sizeof(Block<>::offset_type));
92+
ASSERT_TRUE(maybeBlock.has_value());
93+
94+
Block<> *remainder = *maybeBlock;
95+
96+
FreeStore store;
97+
store.set_range({0, 4096});
98+
store.insert(small);
99+
store.insert(remainder);
100+
101+
store.remove(remainder);
102+
ASSERT_EQ(store.remove_best_fit(remainder->inner_size()),
103+
static_cast<Block<> *>(nullptr));
104+
store.remove(small);
105+
ASSERT_EQ(store.remove_best_fit(small->inner_size()),
106+
static_cast<Block<> *>(nullptr));
107+
}
Lines changed: 125 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,125 @@
1+
//===-- Unittests for a freetrie --------------------------------*- C++ -*-===//
2+
//
3+
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4+
// See https://llvm.org/LICENSE.txt for license information.
5+
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6+
//
7+
//===----------------------------------------------------------------------===//
8+
9+
#include <stddef.h>
10+
11+
#include "src/__support/freetrie.h"
12+
#include "test/UnitTest/Test.h"
13+
14+
using LIBC_NAMESPACE::Block;
15+
using LIBC_NAMESPACE::FreeTrie;
16+
using LIBC_NAMESPACE::cpp::byte;
17+
using LIBC_NAMESPACE::cpp::optional;
18+
19+
TEST(LlvmLibcFreeTrie, FindBestFitRoot) {
20+
FreeTrie trie({0, 4096});
21+
EXPECT_EQ(trie.find_best_fit(123), static_cast<FreeTrie::Node *>(nullptr));
22+
23+
byte mem[1024];
24+
optional<Block<> *> maybeBlock = Block<>::init(mem);
25+
ASSERT_TRUE(maybeBlock.has_value());
26+
Block<> *block = *maybeBlock;
27+
trie.push(block);
28+
29+
FreeTrie::Node *root = trie.find_best_fit(0);
30+
ASSERT_EQ(root->block(), block);
31+
EXPECT_EQ(trie.find_best_fit(block->inner_size() - 1), root);
32+
EXPECT_EQ(trie.find_best_fit(block->inner_size()), root);
33+
EXPECT_EQ(trie.find_best_fit(block->inner_size() + 1),
34+
static_cast<FreeTrie::Node *>(nullptr));
35+
EXPECT_EQ(trie.find_best_fit(4095), static_cast<FreeTrie::Node *>(nullptr));
36+
}
37+
38+
TEST(LlvmLibcFreeTrie, FindBestFitLower) {
39+
byte mem[4096];
40+
optional<Block<> *> maybeBlock = Block<>::init(mem);
41+
ASSERT_TRUE(maybeBlock.has_value());
42+
Block<> *lower = *maybeBlock;
43+
maybeBlock = lower->split(512);
44+
ASSERT_TRUE(maybeBlock.has_value());
45+
Block<> *root = *maybeBlock;
46+
47+
FreeTrie trie({0, 4096});
48+
trie.push(root);
49+
trie.push(lower);
50+
51+
EXPECT_EQ(trie.find_best_fit(0)->block(), lower);
52+
}
53+
54+
TEST(LlvmLibcFreeTrie, FindBestFitUpper) {
55+
byte mem[4096];
56+
optional<Block<> *> maybeBlock = Block<>::init(mem);
57+
ASSERT_TRUE(maybeBlock.has_value());
58+
Block<> *root = *maybeBlock;
59+
maybeBlock = root->split(512);
60+
ASSERT_TRUE(maybeBlock.has_value());
61+
Block<> *upper = *maybeBlock;
62+
63+
FreeTrie trie({0, 4096});
64+
trie.push(root);
65+
trie.push(upper);
66+
67+
EXPECT_EQ(trie.find_best_fit(root->inner_size() + 1)->block(), upper);
68+
// The upper subtrie should be skipped if it could not contain a better fit.
69+
EXPECT_EQ(trie.find_best_fit(root->inner_size() - 1)->block(), root);
70+
}
71+
72+
TEST(LlvmLibcFreeTrie, FindBestFitLowerAndUpper) {
73+
byte mem[4096];
74+
optional<Block<> *> maybeBlock = Block<>::init(mem);
75+
ASSERT_TRUE(maybeBlock.has_value());
76+
Block<> *root = *maybeBlock;
77+
maybeBlock = root->split(1024);
78+
ASSERT_TRUE(maybeBlock.has_value());
79+
Block<> *lower = *maybeBlock;
80+
maybeBlock = lower->split(128);
81+
ASSERT_TRUE(maybeBlock.has_value());
82+
Block<> *upper = *maybeBlock;
83+
84+
FreeTrie trie({0, 4096});
85+
trie.push(root);
86+
trie.push(lower);
87+
trie.push(upper);
88+
89+
// The lower subtrie is examined first.
90+
EXPECT_EQ(trie.find_best_fit(0)->block(), lower);
91+
// The upper subtrie is examined if there are no fits found in the upper
92+
// subtrie.
93+
EXPECT_EQ(trie.find_best_fit(2048)->block(), upper);
94+
}
95+
96+
TEST(LlvmLibcFreeTrie, Remove) {
97+
byte mem[4096];
98+
optional<Block<> *> maybeBlock = Block<>::init(mem);
99+
ASSERT_TRUE(maybeBlock.has_value());
100+
Block<> *small1 = *maybeBlock;
101+
maybeBlock = small1->split(512);
102+
ASSERT_TRUE(maybeBlock.has_value());
103+
Block<> *small2 = *maybeBlock;
104+
maybeBlock = small2->split(512);
105+
ASSERT_TRUE(maybeBlock.has_value());
106+
ASSERT_EQ(small1->inner_size(), small2->inner_size());
107+
Block<> *large = *maybeBlock;
108+
109+
// Removing the root empties the trie.
110+
FreeTrie trie({0, 4096});
111+
trie.push(large);
112+
FreeTrie::Node *large_node = trie.find_best_fit(0);
113+
ASSERT_EQ(large_node->block(), large);
114+
trie.remove(large_node);
115+
ASSERT_TRUE(trie.empty());
116+
117+
// Removing the head of a trie list preserves the trie structure.
118+
trie.push(small1);
119+
trie.push(small2);
120+
trie.push(large);
121+
trie.remove(trie.find_best_fit(small1->inner_size()));
122+
EXPECT_EQ(trie.find_best_fit(large->inner_size())->block(), large);
123+
trie.remove(trie.find_best_fit(small1->inner_size()));
124+
EXPECT_EQ(trie.find_best_fit(large->inner_size())->block(), large);
125+
}

0 commit comments

Comments
 (0)
Please sign in to comment.