1 /// Fast slab-entry based memory allocator with simple defragmentation.
2 module tern.experimental.heap_allocator;
3 
4 import std.experimental.allocator.mmap_allocator;
5 import core.sync.mutex;
6 import tanya.container.list;
7 import tanya.container.hashtable;
8 import tern.memory;
9 
10 private enum SLAB_SIZE = 1_048_576;
11 private enum ALIGN = size_t.sizeof * 4;
12 private static immutable MmapAllocator os;
13 private shared static Mutex mutex;
14 private static SList!Slab slabs;
15 
16 shared static this()
17 {
18     slabs.insertFront(Slab(os.allocate(SLAB_SIZE).ptr, SLAB_SIZE));
19     mutex = new shared Mutex();
20 }
21 
22 private struct Slab
23 {
24 public:
25 final:
26 @nogc:
27     void* baseAddress;
28     size_t offset;
29     size_t size;
30     HashTable!(void*, Entry) entries;
31     SList!Entry available;
32 
33     pure this(void* baseAddress, size_t size)
34     {
35         this.baseAddress = baseAddress;
36         this.size = size;
37     }
38 
39     pragma(inline)
40     bool empty()
41     {
42         size_t count;
43         foreach (entry; available)
44             count++;
45         return count == entries.length;
46     }
47 
48     pragma(inline)
49     bool free(void* ptr)
50     {
51         if (ptr !in entries)
52             return false;
53         
54         available.insertFront(entries[ptr]);
55         if (empty)
56         {
57             available.clear();
58             entries.clear();
59         }
60         return true;
61     }
62 
63     pragma(inline)
64     void* insertEntry(size_t size)
65     {
66         void* ptr = baseAddress + offset;
67         size += ALIGN - (cast(size_t)ptr & (ALIGN - 1));
68         entries[ptr] = Entry(ptr, size);
69         offset += size;
70         return ptr;
71     }
72 }
73 
74 public struct Entry
75 {
76 public:
77 final:
78     void* ptr;
79     size_t size;
80 }
81 
82 public:
83 final:
84 @nogc:
85 static:
86 /**
87  * Allocates an entry of `size` 
88  *
89  * Params:
90  *  threadSafe = Should this operation be thread safe? Default false.
91  *  size = Size to be allocated.
92  *
93  * Returns:
94  *  Pointer to the allocated entry.
95  */
96 pragma(inline)
97 @trusted void* malloc(bool threadSafe = false)(size_t size)
98 {
99     static if (threadSafe)
100     {
101         synchronized (mutex)
102         {
103             foreach (ref slab; slabs)
104             {
105                 if (slab.offset + size <= slab.size)
106                 {
107                     foreach (entry; slab.available)
108                     {
109                         if (entry.size >= size)
110                             return entry.ptr;
111                     }
112                     return slab.insertEntry(size);
113                 }
114             }
115 
116             if (size > SLAB_SIZE)
117             {
118                 slabs.insertFront(Slab(os.allocate(size).ptr, size));
119                 return slabs.front.insertEntry(size);
120             }
121 
122             slabs.insertFront(Slab(os.allocate(SLAB_SIZE).ptr, SLAB_SIZE));
123             return slabs.front.insertEntry(size);
124         }
125     }
126     else
127     {
128         foreach (ref slab; slabs)
129         {
130             if (slab.offset + size <= slab.size)
131             {
132                 foreach (entry; slab.available)
133                 {
134                     if (entry.size >= size)
135                         return entry.ptr;
136                 }
137                 return slab.insertEntry(size);
138             }
139         }
140 
141         if (size > SLAB_SIZE)
142         {
143             slabs.insertFront(Slab(os.allocate(size).ptr, size));
144             return slabs.front.insertEntry(size);
145         }
146 
147         slabs.insertFront(Slab(os.allocate(SLAB_SIZE).ptr, SLAB_SIZE));
148         return slabs.front.insertEntry(size);
149     }
150 }
151 
152 /**
153  * Allocates an entry of `size` and clears the entry.
154  *
155  * Params:
156  *  threadSafe = Should this operation be thread safe? Default false.
157  *  size = Size of the new entry.
158  *
159  * Returns:
160  *  Pointer to the allocated entry.
161  */
162 pragma(inline)
163 @trusted void* calloc(bool threadSafe = false)(size_t size)
164 {
165     static if (threadSafe)
166     {
167         synchronized (mutex)
168         {
169             void* ptr = malloc(size);
170             memzero(ptr, size);
171             return ptr;
172         }
173     }
174     else
175     {
176         void* ptr = malloc(size);
177         memzero(ptr, size);
178         return ptr;
179     }
180 }
181 
182 /**
183  * Reallocates `ptr` with `size`  
184  * Tries to avoid actually doing a new allocation if possible.
185  *
186  * Params:
187  *  threadSafe = Should this operation be thread safe? Default false.
188  *  ptr = Pointer to entry to be reallocated.
189  *  size = Size of the new entry.
190  */
191 pragma(inline)
192 @trusted void realloc(bool threadSafe = false)(ref void* ptr, size_t size)
193 {
194     static if (threadSafe)
195     {
196         synchronized (mutex)
197         {
198             foreach (ref slab; slabs)
199             {
200                 if (ptr >= slab.baseAddress && ptr < slab.baseAddress + slab.offset)
201                 {
202                     if (ptr !in slab.entries)
203                         return;
204 
205                     Entry entry = slab.entries[ptr];
206                     if (entry.size >= size || (cast(size_t)entry.ptr + size < slab.offset + slab.size && entry.ptr + entry.size !in slab.entries))
207                         return;
208 
209                     foreach (_entry; slab.available)
210                     {
211                         if (_entry.ptr == entry.ptr + entry.size)
212                             return;
213                     }
214 
215                     if (entry.ptr == ptr)
216                     {
217                         void* dest = malloc(size);
218                         memcpy(ptr, dest, entry.size);
219                         slab.free(ptr);
220                         ptr = dest;
221                     }
222                 }
223             }
224         }
225     }
226     else
227     {
228         foreach (ref slab; slabs)
229         {
230             if (ptr >= slab.baseAddress && ptr < slab.baseAddress + slab.offset)
231             {
232                 if (ptr !in slab.entries)
233                     return;
234 
235                 Entry entry = slab.entries[ptr];
236                 if (entry.size >= size || (cast(size_t)entry.ptr + size < slab.offset + slab.size && entry.ptr + entry.size !in slab.entries))
237                     return;
238 
239                 foreach (_entry; slab.available)
240                 {
241                     if (_entry.ptr == entry.ptr + entry.size)
242                         return;
243                 }
244 
245                 if (entry.ptr == ptr)
246                 {
247                     void* dest = malloc(size);
248                     memcpy(ptr, dest, entry.size);
249                     slab.free(ptr);
250                     ptr = dest;
251                 }
252             }
253         }
254     }
255 }
256 
257 /**
258  * Zeroes the entry pointed to by `ptr`.
259  *
260  * Params:
261  *  threadSafe = Should this operation be thread safe? Default false.
262  *  ptr = Pointer to entry to be zeroed.
263  */
264 pragma(inline)
265 @trusted void wake(bool threadSafe = false)(void* ptr)
266 {
267     static if (threadSafe)
268     {
269         synchronized (mutex)
270         {
271             foreach (ref slab; slabs)
272             {
273                 if (ptr >= slab.baseAddress && ptr < slab.baseAddress + slab.offset)
274                 {
275                     if (ptr !in slab.entries)
276                         return;
277 
278                     Entry entry = slab.entries[ptr];
279                     memzero(ptr, entry.size);
280                 }
281             }
282         }
283     }
284     else
285     {
286         foreach (ref slab; slabs)
287         {
288             if (ptr >= slab.baseAddress && ptr < slab.baseAddress + slab.offset)
289             {
290                 if (ptr !in slab.entries)
291                     return;
292 
293                 Entry entry = slab.entries[ptr];
294                 memzero(ptr, entry.size);
295             }
296         }
297     }
298 }
299 
300 /**
301  * Frees `ptr`, self explanatory.
302  *
303  * Params:
304  *  threadSafe = Should this operation be thread safe? Default false.
305  *  ptr = Pointer to entry to be freed.
306  *
307  * Returns:
308  *  True if this succeeded, otherwise false.
309  */
310 pragma(inline)
311 @trusted bool free(bool threadSafe = false)(void* ptr)
312 {
313     static if (threadSafe)
314     {
315         synchronized (mutex)
316         {
317             foreach (ref slab; slabs)
318             {
319                 if (slab.free(ptr))
320                     return true;
321             }
322             return false;
323         }
324     }
325     else
326     {
327         foreach (ref slab; slabs)
328         {
329             if (slab.free(ptr))
330                 return true;
331         }
332         return false;
333     }
334 }
335 
336 pragma(inline)
337 @trusted bool deallocate(bool threadSafe = false)(void* ptr)
338 {
339     static if (threadSafe)
340     {
341         synchronized (mutex)
342         {
343             foreach (ref slab; slabs)
344             {
345                 if (ptr == slab.baseAddress)
346                 {
347                     void[] arr = void;
348                     (cast(size_t*)&arr)[0] = slab.size;
349                     (cast(void**)&arr)[1] = ptr;
350 
351                     if (!os.deallocate(arr))
352                         return false;
353 
354                     slab.size = 0;
355                     return true;
356                 }
357             }
358             return false;
359         }
360     }
361     else
362     {
363         foreach (ref slab; slabs)
364         {
365             if (ptr == slab.baseAddress)
366             {
367                 void[] arr = void;
368                 (cast(size_t*)&arr)[0] = slab.size;
369                 (cast(void**)&arr)[1] = ptr;
370 
371                 if (!os.deallocate(arr))
372                     return false;
373 
374                 slab.size = 0;
375                 return true;
376             }
377         }
378         return false;
379     }
380 }