1 |
/*
|
2 |
* Stack-less Just-In-Time compiler
|
3 |
*
|
4 |
* Copyright 2009-2010 Zoltan Herczeg (hzmester@freemail.hu). All rights reserved.
|
5 |
*
|
6 |
* Redistribution and use in source and binary forms, with or without modification, are
|
7 |
* permitted provided that the following conditions are met:
|
8 |
*
|
9 |
* 1. Redistributions of source code must retain the above copyright notice, this list of
|
10 |
* conditions and the following disclaimer.
|
11 |
*
|
12 |
* 2. Redistributions in binary form must reproduce the above copyright notice, this list
|
13 |
* of conditions and the following disclaimer in the documentation and/or other materials
|
14 |
* provided with the distribution.
|
15 |
*
|
16 |
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) AND CONTRIBUTORS ``AS IS'' AND ANY
|
17 |
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
18 |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
|
19 |
* SHALL THE COPYRIGHT HOLDER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
20 |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
|
21 |
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
|
22 |
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
23 |
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
|
24 |
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
25 |
*/
|
26 |
|
27 |
/*
|
28 |
This file contains a simple executable memory allocator
|
29 |
|
30 |
It is assumed, that executable code blocks are usually medium (or sometimes
|
31 |
large) memory blocks, and the allocator is not too frequently called (less
|
32 |
optimized than other allocators). Thus, using it as a generic allocator is
|
33 |
not suggested.
|
34 |
|
35 |
How does it work:
|
36 |
Memory is allocated in continuous memory areas called chunks by alloc_chunk()
|
37 |
Chunk format:
|
38 |
[ block ][ block ] ... [ block ][ block terminator ]
|
39 |
|
40 |
All blocks and the block terminator is started with block_header. The block
|
41 |
header contains the size of the previous and the next block. These sizes
|
42 |
can also contain special values.
|
43 |
Block size:
|
44 |
0 - The block is a free_block, with a different size member.
|
45 |
1 - The block is a block terminator.
|
46 |
n - The block is used at the moment, and the value contains its size.
|
47 |
Previous block size:
|
48 |
0 - This is the first block of the memory chunk.
|
49 |
n - The size of the previous block.
|
50 |
|
51 |
Using these size values we can go forward or backward on the block chain.
|
52 |
The unused blocks are stored in a chain list pointed by free_blocks. This
|
53 |
list is useful if we need to find a suitable memory area when the allocator
|
54 |
is called.
|
55 |
|
56 |
When a block is freed, the new free block is connected to its adjacent free
|
57 |
blocks if possible.
|
58 |
|
59 |
[ free block ][ used block ][ free block ]
|
60 |
and "used block" is freed, the three blocks are connected together:
|
61 |
[ one big free block ]
|
62 |
*/
|
63 |
|
64 |
/* --------------------------------------------------------------------- */
|
65 |
/* System (OS) functions */
|
66 |
/* --------------------------------------------------------------------- */
|
67 |
|
68 |
/* 64 KByte. */
|
69 |
#define CHUNK_SIZE 0x10000
|
70 |
|
71 |
/*
|
72 |
alloc_chunk / free_chunk :
|
73 |
* allocate executable system memory chunks
|
74 |
* the size is always divisible by CHUNK_SIZE
|
75 |
allocator_grab_lock / allocator_release_lock :
|
76 |
* make the allocator thread safe
|
77 |
* can be empty if the OS (or the application) does not support threading
|
78 |
* only the allocator requires this lock, sljit is fully thread safe
|
79 |
as it only uses local variables
|
80 |
*/
|
81 |
|
82 |
#ifdef _WIN32
|
83 |
|
84 |
static SLJIT_INLINE void* alloc_chunk(sljit_uw size)
|
85 |
{
|
86 |
return VirtualAlloc(0, size, MEM_COMMIT | MEM_RESERVE, PAGE_EXECUTE_READWRITE);
|
87 |
}
|
88 |
|
89 |
static SLJIT_INLINE void free_chunk(void* chunk, sljit_uw size)
|
90 |
{
|
91 |
SLJIT_UNUSED_ARG(size);
|
92 |
VirtualFree(chunk, 0, MEM_RELEASE);
|
93 |
}
|
94 |
|
95 |
#else
|
96 |
|
97 |
#include <sys/mman.h>
|
98 |
|
99 |
static SLJIT_INLINE void* alloc_chunk(sljit_uw size)
|
100 |
{
|
101 |
void* retval = mmap(0, size, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_PRIVATE | MAP_ANON, -1, 0);
|
102 |
return (retval != MAP_FAILED) ? retval : NULL;
|
103 |
}
|
104 |
|
105 |
static SLJIT_INLINE void free_chunk(void* chunk, sljit_uw size)
|
106 |
{
|
107 |
munmap(chunk, size);
|
108 |
}
|
109 |
|
110 |
#endif
|
111 |
|
112 |
/* --------------------------------------------------------------------- */
|
113 |
/* Common functions */
|
114 |
/* --------------------------------------------------------------------- */
|
115 |
|
116 |
#define CHUNK_MASK (~(CHUNK_SIZE - 1))
|
117 |
|
118 |
struct block_header {
|
119 |
sljit_uw size;
|
120 |
sljit_uw prev_size;
|
121 |
};
|
122 |
|
123 |
struct free_block {
|
124 |
struct block_header header;
|
125 |
struct free_block *next;
|
126 |
struct free_block *prev;
|
127 |
sljit_uw size;
|
128 |
};
|
129 |
|
130 |
#define AS_BLOCK_HEADER(base, offset) \
|
131 |
((struct block_header*)(((sljit_ub*)base) + offset))
|
132 |
#define AS_FREE_BLOCK(base, offset) \
|
133 |
((struct free_block*)(((sljit_ub*)base) + offset))
|
134 |
#define MEM_START(base) ((void*)(((sljit_ub*)base) + sizeof(struct block_header)))
|
135 |
#define ALIGN_SIZE(size) (((size) + sizeof(struct block_header) + 7) & ~7)
|
136 |
|
137 |
static struct free_block* free_blocks;
|
138 |
static sljit_uw allocated_size;
|
139 |
static sljit_uw total_size;
|
140 |
|
141 |
static SLJIT_INLINE void sljit_insert_free_block(struct free_block *free_block, sljit_uw size)
|
142 |
{
|
143 |
free_block->header.size = 0;
|
144 |
free_block->size = size;
|
145 |
|
146 |
free_block->next = free_blocks;
|
147 |
free_block->prev = 0;
|
148 |
if (free_blocks)
|
149 |
free_blocks->prev = free_block;
|
150 |
free_blocks = free_block;
|
151 |
}
|
152 |
|
153 |
static SLJIT_INLINE void sljit_remove_free_block(struct free_block *free_block)
|
154 |
{
|
155 |
if (free_block->next)
|
156 |
free_block->next->prev = free_block->prev;
|
157 |
|
158 |
if (free_block->prev)
|
159 |
free_block->prev->next = free_block->next;
|
160 |
else {
|
161 |
SLJIT_ASSERT(free_blocks == free_block);
|
162 |
free_blocks = free_block->next;
|
163 |
}
|
164 |
}
|
165 |
|
166 |
SLJIT_API_FUNC_ATTRIBUTE void* sljit_malloc_exec(sljit_uw size)
|
167 |
{
|
168 |
struct block_header *header;
|
169 |
struct block_header *next_header;
|
170 |
struct free_block *free_block;
|
171 |
sljit_uw chunk_size;
|
172 |
|
173 |
allocator_grab_lock();
|
174 |
if (size < sizeof(struct free_block))
|
175 |
size = sizeof(struct free_block);
|
176 |
size = ALIGN_SIZE(size);
|
177 |
|
178 |
free_block = free_blocks;
|
179 |
while (free_block) {
|
180 |
if (free_block->size >= size) {
|
181 |
chunk_size = free_block->size;
|
182 |
if (chunk_size > size + 64) {
|
183 |
/* We just cut a block from the end of the free block. */
|
184 |
chunk_size -= size;
|
185 |
free_block->size = chunk_size;
|
186 |
header = AS_BLOCK_HEADER(free_block, chunk_size);
|
187 |
header->prev_size = chunk_size;
|
188 |
AS_BLOCK_HEADER(header, size)->prev_size = size;
|
189 |
}
|
190 |
else {
|
191 |
sljit_remove_free_block(free_block);
|
192 |
header = (struct block_header*)free_block;
|
193 |
size = chunk_size;
|
194 |
}
|
195 |
allocated_size += size;
|
196 |
header->size = size;
|
197 |
allocator_release_lock();
|
198 |
return MEM_START(header);
|
199 |
}
|
200 |
free_block = free_block->next;
|
201 |
}
|
202 |
|
203 |
chunk_size = (size + sizeof(struct block_header) + CHUNK_SIZE - 1) & CHUNK_MASK;
|
204 |
header = (struct block_header*)alloc_chunk(chunk_size);
|
205 |
PTR_FAIL_IF(!header);
|
206 |
|
207 |
chunk_size -= sizeof(struct block_header);
|
208 |
total_size += chunk_size;
|
209 |
|
210 |
header->prev_size = 0;
|
211 |
if (chunk_size > size + 64) {
|
212 |
/* Cut the allocated space into a free and a used block. */
|
213 |
allocated_size += size;
|
214 |
header->size = size;
|
215 |
chunk_size -= size;
|
216 |
|
217 |
free_block = AS_FREE_BLOCK(header, size);
|
218 |
free_block->header.prev_size = size;
|
219 |
sljit_insert_free_block(free_block, chunk_size);
|
220 |
next_header = AS_BLOCK_HEADER(free_block, chunk_size);
|
221 |
}
|
222 |
else {
|
223 |
/* All space belongs to this allocation. */
|
224 |
allocated_size += chunk_size;
|
225 |
header->size = chunk_size;
|
226 |
next_header = AS_BLOCK_HEADER(header, chunk_size);
|
227 |
}
|
228 |
next_header->size = 1;
|
229 |
next_header->prev_size = chunk_size;
|
230 |
allocator_release_lock();
|
231 |
return MEM_START(header);
|
232 |
}
|
233 |
|
234 |
SLJIT_API_FUNC_ATTRIBUTE void sljit_free_exec(void* ptr)
|
235 |
{
|
236 |
struct block_header *header;
|
237 |
struct free_block* free_block;
|
238 |
|
239 |
allocator_grab_lock();
|
240 |
header = AS_BLOCK_HEADER(ptr, -(sljit_w)sizeof(struct block_header));
|
241 |
allocated_size -= header->size;
|
242 |
|
243 |
/* Connecting free blocks together if possible. */
|
244 |
|
245 |
/* If header->prev_size == 0, free_block will equal to header.
|
246 |
In this case, free_block->header.size will be > 0. */
|
247 |
free_block = AS_FREE_BLOCK(header, -(sljit_w)header->prev_size);
|
248 |
if (SLJIT_UNLIKELY(!free_block->header.size)) {
|
249 |
free_block->size += header->size;
|
250 |
header = AS_BLOCK_HEADER(free_block, free_block->size);
|
251 |
header->prev_size = free_block->size;
|
252 |
}
|
253 |
else {
|
254 |
free_block = (struct free_block*)header;
|
255 |
sljit_insert_free_block(free_block, header->size);
|
256 |
}
|
257 |
|
258 |
header = AS_BLOCK_HEADER(free_block, free_block->size);
|
259 |
if (SLJIT_UNLIKELY(!header->size)) {
|
260 |
free_block->size += ((struct free_block*)header)->size;
|
261 |
sljit_remove_free_block((struct free_block*)header);
|
262 |
header = AS_BLOCK_HEADER(free_block, free_block->size);
|
263 |
header->prev_size = free_block->size;
|
264 |
}
|
265 |
|
266 |
/* The whole chunk is free. */
|
267 |
if (SLJIT_UNLIKELY(!free_block->header.prev_size && header->size == 1)) {
|
268 |
/* If this block is freed, we still have (allocated_size / 2) free space. */
|
269 |
if (total_size - free_block->size > (allocated_size * 3 / 2)) {
|
270 |
total_size -= free_block->size;
|
271 |
sljit_remove_free_block(free_block);
|
272 |
free_chunk(free_block, free_block->size + sizeof(struct block_header));
|
273 |
}
|
274 |
}
|
275 |
|
276 |
allocator_release_lock();
|
277 |
}
|