-
Notifications
You must be signed in to change notification settings - Fork 0
/
tramp-heap.c
175 lines (141 loc) · 3.77 KB
/
tramp-heap.c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
#define _GNU_SOURCE
#include <limits.h>
#include <pthread.h>
#include "tramp.h"
#define BITS_PER_INT (CHAR_BIT * sizeof(int))
#define MASK_SIZE ((TRAMP_COUNT + BITS_PER_INT - 1) / BITS_PER_INT)
struct tramp_heap_page;
struct tramp_heap_data
{
struct tramp_heap_page *prev, *next;
unsigned int inuse;
unsigned int inuse_mask[MASK_SIZE];
};
struct tramp_heap_page
{
char code[PAGE_SIZE];
struct tramp_heap_data data __attribute__((aligned(PAGE_SIZE)));
};
#define TRAMP_HEAP_RESERVE \
((sizeof (struct tramp_heap_data) + TRAMP_SIZE - 1) / TRAMP_SIZE)
#define TRAMP_HEAP_COUNT \
(TRAMP_COUNT - TRAMP_HEAP_RESERVE)
static pthread_mutex_t lock = PTHREAD_MUTEX_INITIALIZER;
static struct tramp_heap_page *cur_page;
static struct tramp_heap_page *notfull_page_list;
void *
__tramp_heap_alloc (uintptr_t fnaddr, uintptr_t chain_value)
{
struct tramp_heap_page *page;
unsigned int index;
pthread_mutex_lock (&lock);
/* Find a page with unused entries. */
page = cur_page;
if (page == NULL)
{
page = notfull_page_list;
if (page != NULL)
{
notfull_page_list = page->data.next;
if (page->data.next)
{
page->data.next->data.prev = NULL;
page->data.next = NULL;
}
}
else
page = __tramp_alloc_pair ();
}
/* Increment the use count on this page. */
index = page->data.inuse++;
cur_page = (index == TRAMP_HEAP_COUNT - 1 ? NULL : page);
/* Find a free entry in the page. Try index first. */
{
unsigned int iofs, bofs, mask, old;
iofs = index / BITS_PER_INT;
bofs = index % BITS_PER_INT;
mask = 1u << bofs;
old = page->data.inuse_mask[iofs];
if (old & mask)
{
while (old != ~0u)
{
if (++iofs == MASK_SIZE)
iofs = 0;
old = page->data.inuse_mask[iofs];
}
mask = ~old & -~old;
bofs = __builtin_ctz (mask);
index = iofs * BITS_PER_INT + bofs;
}
page->data.inuse_mask[iofs] = old | mask;
}
{
void *tramp_code;
uintptr_t *tramp_data;
unsigned int index;
tramp_code = page->code + (index + TRAMP_HEAP_RESERVE) * TRAMP_SIZE;
tramp_data = tramp_code + PAGE_SIZE;
tramp_data[TRAMP_FUNCADDR_FIRST ? 0 : 1] = fnaddr;
tramp_data[TRAMP_FUNCADDR_FIRST ? 1 : 0] = chain_value;
pthread_mutex_unlock (&lock);
return tramp_code;
}
}
void
__tramp_heap_free (void *tramp)
{
struct tramp_heap_page *page;
unsigned int index;
page = (void *)((uintptr_t)tramp & -PAGE_SIZE);
index = ((uintptr_t)tramp & (PAGE_SIZE - 1)) / TRAMP_SIZE;
index -= TRAMP_HEAP_RESERVE;
pthread_mutex_lock (&lock);
/* Decrement the inuse counter on the page. Shuffle the page around
to the proper list while we're at it. */
{
unsigned int inuse = --page->data.inuse;
if (page != cur_page)
{
/* If the page had been full, it isn't on any lists. */
if (inuse == TRAMP_HEAP_COUNT - 1)
{
struct tramp_heap_page *next = notfull_page_list;
page->data.next = next;
if (next)
next->data.prev = page;
notfull_page_list = page;
}
/* If the page is now empty, remove it from the notfull list.
Then either pop it into the cur_page slot or free it. */
else if (inuse == 0)
{
struct tramp_heap_page *next, *prev;
next = page->data.next;
prev = page->data.prev;
if (next)
next->data.prev = prev;
if (prev)
prev->data.next = next;
page->data.next = page->data.prev = NULL;
if (cur_page == NULL)
cur_page = page;
else
{
__tramp_free_pair (page);
goto egress;
}
}
}
}
/* Clear the inuse bit in the mask. */
{
unsigned int iofs, bofs, mask;
iofs = index / BITS_PER_INT;
bofs = index % BITS_PER_INT;
mask = 1u << bofs;
page->data.inuse_mask[iofs] &= ~mask;
}
egress:
pthread_mutex_unlock (&lock);
}