/[pcre]/code/trunk/sljit/sljitExecAllocator.c
ViewVC logotype

Contents of /code/trunk/sljit/sljitExecAllocator.c

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1757 - (show annotations)
Tue Nov 19 12:54:59 2019 UTC (6 months, 1 week ago) by zherczeg
File MIME type: text/plain
File size: 11709 byte(s)
JIT compiler update.
1 /*
2 * Stack-less Just-In-Time compiler
3 *
4 * Copyright Zoltan Herczeg (hzmester@freemail.hu). All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without modification, are
7 * permitted provided that the following conditions are met:
8 *
9 * 1. Redistributions of source code must retain the above copyright notice, this list of
10 * conditions and the following disclaimer.
11 *
12 * 2. Redistributions in binary form must reproduce the above copyright notice, this list
13 * of conditions and the following disclaimer in the documentation and/or other materials
14 * provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) AND CONTRIBUTORS ``AS IS'' AND ANY
17 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
19 * SHALL THE COPYRIGHT HOLDER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
21 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
22 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
24 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27 /*
28 This file contains a simple executable memory allocator
29
30 It is assumed, that executable code blocks are usually medium (or sometimes
31 large) memory blocks, and the allocator is not too frequently called (less
32 optimized than other allocators). Thus, using it as a generic allocator is
33 not suggested.
34
35 How does it work:
36 Memory is allocated in continuous memory areas called chunks by alloc_chunk()
37 Chunk format:
38 [ block ][ block ] ... [ block ][ block terminator ]
39
40 All blocks and the block terminator is started with block_header. The block
41 header contains the size of the previous and the next block. These sizes
42 can also contain special values.
43 Block size:
44 0 - The block is a free_block, with a different size member.
45 1 - The block is a block terminator.
46 n - The block is used at the moment, and the value contains its size.
47 Previous block size:
48 0 - This is the first block of the memory chunk.
49 n - The size of the previous block.
50
51 Using these size values we can go forward or backward on the block chain.
52 The unused blocks are stored in a chain list pointed by free_blocks. This
53 list is useful if we need to find a suitable memory area when the allocator
54 is called.
55
56 When a block is freed, the new free block is connected to its adjacent free
57 blocks if possible.
58
59 [ free block ][ used block ][ free block ]
60 and "used block" is freed, the three blocks are connected together:
61 [ one big free block ]
62 */
63
64 /* --------------------------------------------------------------------- */
65 /* System (OS) functions */
66 /* --------------------------------------------------------------------- */
67
68 /* 64 KByte. */
69 #define CHUNK_SIZE 0x10000
70
71 /*
72 alloc_chunk / free_chunk :
73 * allocate executable system memory chunks
74 * the size is always divisible by CHUNK_SIZE
75 allocator_grab_lock / allocator_release_lock :
76 * make the allocator thread safe
77 * can be empty if the OS (or the application) does not support threading
78 * only the allocator requires this lock, sljit is fully thread safe
79 as it only uses local variables
80 */
81
82 #ifdef _WIN32
83
84 static SLJIT_INLINE void* alloc_chunk(sljit_uw size)
85 {
86 return VirtualAlloc(NULL, size, MEM_COMMIT | MEM_RESERVE, PAGE_EXECUTE_READWRITE);
87 }
88
89 static SLJIT_INLINE void free_chunk(void *chunk, sljit_uw size)
90 {
91 SLJIT_UNUSED_ARG(size);
92 VirtualFree(chunk, 0, MEM_RELEASE);
93 }
94
95 #else
96
97 #ifdef __APPLE__
98 /* Configures TARGET_OS_OSX when appropriate */
99 #include <TargetConditionals.h>
100
101 #if TARGET_OS_OSX && defined(MAP_JIT)
102 #include <sys/utsname.h>
103 #endif /* TARGET_OS_OSX && MAP_JIT */
104
105 #ifdef MAP_JIT
106
107 static SLJIT_INLINE int get_map_jit_flag()
108 {
109 #if TARGET_OS_OSX
110 /* On macOS systems, returns MAP_JIT if it is defined _and_ we're running on a version
111 of macOS where it's OK to have more than one JIT block. On non-macOS systems, returns
112 MAP_JIT if it is defined. */
113 static int map_jit_flag = -1;
114
115 /* The following code is thread safe because multiple initialization
116 sets map_jit_flag to the same value and the code has no side-effects.
117 Changing the kernel version witout system restart is (very) unlikely. */
118 if (map_jit_flag == -1) {
119 struct utsname name;
120
121 map_jit_flag = 0;
122 uname(&name);
123
124 /* Kernel version for 10.14.0 (Mojave) */
125 if (atoi(name.release) >= 18) {
126 /* Only use MAP_JIT if a hardened runtime is used, because MAP_JIT is incompatible with fork(). */
127 void *ptr = mmap(NULL, getpagesize(), PROT_WRITE|PROT_EXEC, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
128
129 if (ptr == MAP_FAILED) {
130 map_jit_flag = MAP_JIT;
131 } else {
132 munmap(ptr, getpagesize());
133 }
134 }
135 }
136
137 return map_jit_flag;
138 #else /* !TARGET_OS_OSX */
139 return MAP_JIT;
140 #endif /* TARGET_OS_OSX */
141 }
142
143 #endif /* MAP_JIT */
144
145 #endif /* __APPLE__ */
146
147 static SLJIT_INLINE void* alloc_chunk(sljit_uw size)
148 {
149 void *retval;
150 const int prot = PROT_READ | PROT_WRITE | PROT_EXEC;
151
152 #ifdef MAP_ANON
153
154 int flags = MAP_PRIVATE | MAP_ANON;
155
156 #ifdef MAP_JIT
157 flags |= get_map_jit_flag();
158 #endif
159
160 retval = mmap(NULL, size, prot, flags, -1, 0);
161 #else /* !MAP_ANON */
162 if (dev_zero < 0) {
163 if (open_dev_zero())
164 return NULL;
165 }
166 retval = mmap(NULL, size, prot, MAP_PRIVATE, dev_zero, 0);
167 #endif /* MAP_ANON */
168
169 if (retval == MAP_FAILED)
170 retval = NULL;
171 else {
172 if (mprotect(retval, size, prot) < 0) {
173 munmap(retval, size);
174 retval = NULL;
175 }
176 }
177
178 return retval;
179 }
180
181 static SLJIT_INLINE void free_chunk(void *chunk, sljit_uw size)
182 {
183 munmap(chunk, size);
184 }
185
186 #endif
187
188 /* --------------------------------------------------------------------- */
189 /* Common functions */
190 /* --------------------------------------------------------------------- */
191
192 #define CHUNK_MASK (~(CHUNK_SIZE - 1))
193
194 struct block_header {
195 sljit_uw size;
196 sljit_uw prev_size;
197 };
198
199 struct free_block {
200 struct block_header header;
201 struct free_block *next;
202 struct free_block *prev;
203 sljit_uw size;
204 };
205
206 #define AS_BLOCK_HEADER(base, offset) \
207 ((struct block_header*)(((sljit_u8*)base) + offset))
208 #define AS_FREE_BLOCK(base, offset) \
209 ((struct free_block*)(((sljit_u8*)base) + offset))
210 #define MEM_START(base) ((void*)(((sljit_u8*)base) + sizeof(struct block_header)))
211 #define ALIGN_SIZE(size) (((size) + sizeof(struct block_header) + 7) & ~7)
212
213 static struct free_block* free_blocks;
214 static sljit_uw allocated_size;
215 static sljit_uw total_size;
216
217 static SLJIT_INLINE void sljit_insert_free_block(struct free_block *free_block, sljit_uw size)
218 {
219 free_block->header.size = 0;
220 free_block->size = size;
221
222 free_block->next = free_blocks;
223 free_block->prev = NULL;
224 if (free_blocks)
225 free_blocks->prev = free_block;
226 free_blocks = free_block;
227 }
228
229 static SLJIT_INLINE void sljit_remove_free_block(struct free_block *free_block)
230 {
231 if (free_block->next)
232 free_block->next->prev = free_block->prev;
233
234 if (free_block->prev)
235 free_block->prev->next = free_block->next;
236 else {
237 SLJIT_ASSERT(free_blocks == free_block);
238 free_blocks = free_block->next;
239 }
240 }
241
242 SLJIT_API_FUNC_ATTRIBUTE void* sljit_malloc_exec(sljit_uw size)
243 {
244 struct block_header *header;
245 struct block_header *next_header;
246 struct free_block *free_block;
247 sljit_uw chunk_size;
248
249 allocator_grab_lock();
250 if (size < (64 - sizeof(struct block_header)))
251 size = (64 - sizeof(struct block_header));
252 size = ALIGN_SIZE(size);
253
254 free_block = free_blocks;
255 while (free_block) {
256 if (free_block->size >= size) {
257 chunk_size = free_block->size;
258 if (chunk_size > size + 64) {
259 /* We just cut a block from the end of the free block. */
260 chunk_size -= size;
261 free_block->size = chunk_size;
262 header = AS_BLOCK_HEADER(free_block, chunk_size);
263 header->prev_size = chunk_size;
264 AS_BLOCK_HEADER(header, size)->prev_size = size;
265 }
266 else {
267 sljit_remove_free_block(free_block);
268 header = (struct block_header*)free_block;
269 size = chunk_size;
270 }
271 allocated_size += size;
272 header->size = size;
273 allocator_release_lock();
274 return MEM_START(header);
275 }
276 free_block = free_block->next;
277 }
278
279 chunk_size = (size + sizeof(struct block_header) + CHUNK_SIZE - 1) & CHUNK_MASK;
280 header = (struct block_header*)alloc_chunk(chunk_size);
281 if (!header) {
282 allocator_release_lock();
283 return NULL;
284 }
285
286 chunk_size -= sizeof(struct block_header);
287 total_size += chunk_size;
288
289 header->prev_size = 0;
290 if (chunk_size > size + 64) {
291 /* Cut the allocated space into a free and a used block. */
292 allocated_size += size;
293 header->size = size;
294 chunk_size -= size;
295
296 free_block = AS_FREE_BLOCK(header, size);
297 free_block->header.prev_size = size;
298 sljit_insert_free_block(free_block, chunk_size);
299 next_header = AS_BLOCK_HEADER(free_block, chunk_size);
300 }
301 else {
302 /* All space belongs to this allocation. */
303 allocated_size += chunk_size;
304 header->size = chunk_size;
305 next_header = AS_BLOCK_HEADER(header, chunk_size);
306 }
307 next_header->size = 1;
308 next_header->prev_size = chunk_size;
309 allocator_release_lock();
310 return MEM_START(header);
311 }
312
313 SLJIT_API_FUNC_ATTRIBUTE void sljit_free_exec(void* ptr)
314 {
315 struct block_header *header;
316 struct free_block* free_block;
317
318 allocator_grab_lock();
319 header = AS_BLOCK_HEADER(ptr, -(sljit_sw)sizeof(struct block_header));
320 allocated_size -= header->size;
321
322 /* Connecting free blocks together if possible. */
323
324 /* If header->prev_size == 0, free_block will equal to header.
325 In this case, free_block->header.size will be > 0. */
326 free_block = AS_FREE_BLOCK(header, -(sljit_sw)header->prev_size);
327 if (SLJIT_UNLIKELY(!free_block->header.size)) {
328 free_block->size += header->size;
329 header = AS_BLOCK_HEADER(free_block, free_block->size);
330 header->prev_size = free_block->size;
331 }
332 else {
333 free_block = (struct free_block*)header;
334 sljit_insert_free_block(free_block, header->size);
335 }
336
337 header = AS_BLOCK_HEADER(free_block, free_block->size);
338 if (SLJIT_UNLIKELY(!header->size)) {
339 free_block->size += ((struct free_block*)header)->size;
340 sljit_remove_free_block((struct free_block*)header);
341 header = AS_BLOCK_HEADER(free_block, free_block->size);
342 header->prev_size = free_block->size;
343 }
344
345 /* The whole chunk is free. */
346 if (SLJIT_UNLIKELY(!free_block->header.prev_size && header->size == 1)) {
347 /* If this block is freed, we still have (allocated_size / 2) free space. */
348 if (total_size - free_block->size > (allocated_size * 3 / 2)) {
349 total_size -= free_block->size;
350 sljit_remove_free_block(free_block);
351 free_chunk(free_block, free_block->size + sizeof(struct block_header));
352 }
353 }
354
355 allocator_release_lock();
356 }
357
358 SLJIT_API_FUNC_ATTRIBUTE void sljit_free_unused_memory_exec(void)
359 {
360 struct free_block* free_block;
361 struct free_block* next_free_block;
362
363 allocator_grab_lock();
364
365 free_block = free_blocks;
366 while (free_block) {
367 next_free_block = free_block->next;
368 if (!free_block->header.prev_size &&
369 AS_BLOCK_HEADER(free_block, free_block->size)->size == 1) {
370 total_size -= free_block->size;
371 sljit_remove_free_block(free_block);
372 free_chunk(free_block, free_block->size + sizeof(struct block_header));
373 }
374 free_block = next_free_block;
375 }
376
377 SLJIT_ASSERT((total_size && free_blocks) || (!total_size && !free_blocks));
378 allocator_release_lock();
379 }

  ViewVC Help
Powered by ViewVC 1.1.5