-
Notifications
You must be signed in to change notification settings - Fork 1
/
thread_local_alloc.c
455 lines (402 loc) · 15.6 KB
/
thread_local_alloc.c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
/*
* Copyright (c) 2000-2005 by Hewlett-Packard Company. All rights reserved.
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
*
* Permission is hereby granted to use or copy this program
* for any purpose, provided the above notices are retained on all copies.
* Permission to modify the code and to distribute modified code is granted,
* provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice.
*/
#include "private/gc_priv.h"
#if defined(THREAD_LOCAL_ALLOC)
#ifndef THREADS
# error "invalid config - THREAD_LOCAL_ALLOC requires GC_THREADS"
#endif
#include "private/thread_local_alloc.h"
#include <stdlib.h>
#ifdef SHENANGO_THREADS
#include "private/shenango_support.h"
struct padded_tlfs {
struct thread_local_freelists tlfs;
} __aligned(128);
static struct padded_tlfs all_tlfs[NCPU];
static unsigned int registered_tlfs;
static DEFINE_SPINLOCK(tlfsinit);
static void GC_alloc_tlfs(void)
{
struct thread_local_freelists *tlfs;
spin_lock(&tlfsinit);
tlfs = &all_tlfs[registered_tlfs++].tlfs;
spin_unlock(&tlfsinit);
GC_init_thread_local(tlfs);
}
GC_INNER void GC_mark_thread_local_free_lists(void)
{
unsigned int i;
for (i = 0; i < maxks; i++)
GC_mark_thread_local_fls_for(&all_tlfs[i].tlfs);
}
#endif
#if defined(USE_COMPILER_TLS)
__thread GC_ATTR_TLS_FAST
#elif defined(USE_WIN32_COMPILER_TLS)
__declspec(thread) GC_ATTR_TLS_FAST
#endif
GC_key_t GC_thread_key;
#ifndef SHENANGO_THREADS
static GC_bool keys_initialized;
#endif
/* Return a single nonempty freelist fl to the global one pointed to */
/* by gfl. */
static void return_single_freelist(void *fl, void **gfl)
{
if (*gfl == 0) {
*gfl = fl;
} else {
void *q, **qptr;
GC_ASSERT(GC_size(fl) == GC_size(*gfl));
/* Concatenate: */
qptr = &(obj_link(fl));
while ((word)(q = *qptr) >= HBLKSIZE)
qptr = &(obj_link(q));
GC_ASSERT(0 == q);
*qptr = *gfl;
*gfl = fl;
}
}
/* Recover the contents of the freelist array fl into the global one gfl.*/
/* We hold the allocator lock. */
static void return_freelists(void **fl, void **gfl)
{
int i;
for (i = 1; i < TINY_FREELISTS; ++i) {
if ((word)(fl[i]) >= HBLKSIZE) {
return_single_freelist(fl[i], &gfl[i]);
}
/* Clear fl[i], since the thread structure may hang around. */
/* Do it in a way that is likely to trap if we access it. */
fl[i] = (ptr_t)HBLKSIZE;
}
/* The 0 granule freelist really contains 1 granule objects. */
# ifdef GC_GCJ_SUPPORT
if (fl[0] == ERROR_FL) return;
# endif
if ((word)(fl[0]) >= HBLKSIZE) {
return_single_freelist(fl[0], &gfl[1]);
}
}
#ifdef USE_PTHREAD_SPECIFIC
/* Re-set the TLS value on thread cleanup to allow thread-local */
/* allocations to happen in the TLS destructors. */
/* GC_unregister_my_thread (and similar routines) will finally set */
/* the GC_thread_key to NULL preventing this destructor from being */
/* called repeatedly. */
static void reset_thread_key(void* v) {
pthread_setspecific(GC_thread_key, v);
}
#else
# define reset_thread_key 0
#endif
/* Each thread structure must be initialized. */
/* This call must be made from the new thread. */
GC_INNER void GC_init_thread_local(GC_tlfs p)
{
int i, j, res;
#ifndef SHENANGO_THREADS
GC_ASSERT(I_HOLD_LOCK());
if (!EXPECT(keys_initialized, TRUE)) {
GC_ASSERT((word)&GC_thread_key % sizeof(word) == 0);
res = GC_key_create(&GC_thread_key, reset_thread_key);
if (COVERT_DATAFLOW(res) != 0) {
ABORT("Failed to create key for local allocator");
}
keys_initialized = TRUE;
}
#endif
res = GC_setspecific(GC_thread_key, p);
#ifndef SHENANGO_THREADS
if (COVERT_DATAFLOW(res) != 0) {
ABORT("Failed to set thread specific allocation pointers");
}
#endif
for (j = 0; j < TINY_FREELISTS; ++j) {
for (i = 0; i < THREAD_FREELISTS_KINDS; ++i) {
p -> _freelists[i][j] = (void *)(word)1;
}
# ifdef GC_GCJ_SUPPORT
p -> gcj_freelists[j] = (void *)(word)1;
# endif
}
/* The size 0 free lists are handled like the regular free lists, */
/* to ensure that the explicit deallocation works. However, */
/* allocation of a size 0 "gcj" object is always an error. */
# ifdef GC_GCJ_SUPPORT
p -> gcj_freelists[0] = ERROR_FL;
# endif
}
/* We hold the allocator lock. */
GC_INNER void GC_destroy_thread_local(GC_tlfs p)
{
int k;
/* We currently only do this from the thread itself. */
GC_STATIC_ASSERT(THREAD_FREELISTS_KINDS <= MAXOBJKINDS);
for (k = 0; k < THREAD_FREELISTS_KINDS; ++k) {
if (k == (int)GC_n_kinds)
break; /* kind is not created */
return_freelists(p -> _freelists[k], GC_obj_kinds[k].ok_freelist);
}
# ifdef GC_GCJ_SUPPORT
return_freelists(p -> gcj_freelists, (void **)GC_gcjobjfreelist);
# endif
}
#if defined(THREAD_LOCAL_STAT_DEBUG) && defined(SHENANGO_THREADS)
static volatile unsigned long global_fills[256 * 8] __aligned(64);
static volatile unsigned long local_fills[256 * 8] __aligned(64);
void get_counts(unsigned long *glob, unsigned long *loc)
{
static unsigned long last_global;
static unsigned long last_local;
unsigned long new_global = 0;
unsigned long new_local = 0;
for (unsigned int i = 0; i < maxks; i++) {
new_global += global_fills[i * 8];
new_local += local_fills[i * 8];
}
*glob = new_global - last_global;
*loc = new_local - last_local;
last_global = new_global;
last_local = new_local;
}
static inline void increment_global(void)
{
global_fills[get_current_affinity() * 8]++;
}
static inline void increment_local(void)
{
local_fills[get_current_affinity() * 8]++;
}
#else
static inline void increment_global(void) {}
static inline void increment_local(void) {}
#endif
#ifndef SHENANGO_THREADS
#define preempt_enable ;
#define preempt_disable ;
#define get_current_affinity(x) (0)
#endif
GC_API GC_ATTR_MALLOC void * GC_CALL GC_malloc_kind(size_t bytes, int kind)
{
size_t granules;
void *tsd;
void *result;
increment_local();
# if MAXOBJKINDS > THREAD_FREELISTS_KINDS
if (EXPECT(kind >= THREAD_FREELISTS_KINDS, FALSE)) {
increment_global();
return GC_malloc_kind_global(bytes, kind);
}
# endif
preempt_disable();
# if !defined(USE_PTHREAD_SPECIFIC) && !defined(USE_WIN32_SPECIFIC)
{
GC_key_t k = GC_thread_key;
if (EXPECT(0 == k, FALSE)) {
GC_alloc_tlfs();
k = GC_thread_key;
if (EXPECT(0 == k, FALSE)) {
preempt_enable();
/* We haven't yet run GC_init_parallel. That means */
/* we also aren't locking, so this is fairly cheap. */
return GC_malloc_kind_global(bytes, kind);
}
}
tsd = GC_getspecific(k);
}
# else
if (!EXPECT(keys_initialized, TRUE))
return GC_malloc_kind_global(bytes, kind);
tsd = GC_getspecific(GC_thread_key);
# endif
# if !defined(USE_COMPILER_TLS) && !defined(USE_WIN32_COMPILER_TLS)
if (EXPECT(0 == tsd, FALSE)) {
return GC_malloc_kind_global(bytes, kind);
}
# endif
GC_ASSERT(GC_is_initialized);
GC_ASSERT(GC_is_thread_tsd_valid(tsd));
granules = ROUNDED_UP_GRANULES(bytes);
if (GC_EXPECT((granules) >= GC_TINY_FREELISTS,0)) {
increment_global();
preempt_enable();
result = GC_malloc_kind_global(bytes, kind);
} else {
void **my_fl = (((GC_tlfs)tsd) -> _freelists[kind]) + (granules);
void *my_entry=*my_fl;
void *next;
for (;;) {
if (GC_EXPECT((GC_word)my_entry
> (DIRECT_GRANULES) + GC_TINY_FREELISTS + 1, 1)) {
next = *(void **)(my_entry);
result = (void *)my_entry;
GC_FAST_M_AO_STORE(my_fl, next);
(void)(kind == PTRFREE ? NULL : (obj_link(result) = 0));
GC_PREFETCH_FOR_WRITE(next);
if ((kind) != GC_I_PTRFREE) {
GC_end_stubborn_change(my_fl);
GC_reachable_here(next);
}
GC_ASSERT(GC_size(result) >= (granules)*GC_GRANULE_BYTES);
GC_ASSERT((kind) == GC_I_PTRFREE || ((GC_word *)result)[1] == 0);
preempt_enable();
break;
}
/* Entry contains counter or NULL */
if ((GC_signed_word)my_entry - (GC_signed_word)(DIRECT_GRANULES) <= 0
/* (GC_word)my_entry <= (num_direct) */
&& my_entry != 0 /* NULL */) {
/* Small counter value, not NULL */
GC_FAST_M_AO_STORE(my_fl, (char *)my_entry
+ (granules) + 1);
increment_global();
preempt_enable();
result = GC_malloc_kind_global(bytes, kind);
break;
} else {
/* Large counter or NULL */
unsigned int prevaff = get_current_affinity();
void *ptrout;
preempt_enable();
GC_generic_malloc_many(((granules) == 0? GC_GRANULE_BYTES :
GC_RAW_BYTES_FROM_INDEX(granules)),
kind, &ptrout);
if (GC_EXPECT(ptrout == 0, 0)) {
result = (*GC_get_oom_fn())((granules)*GC_GRANULE_BYTES);
break;
}
preempt_disable();
// is our thread local reference still valid?
if (GC_EXPECT(prevaff != get_current_affinity(), 0)) {
// take the first object in the free list and lose the rest
result = ptrout;
*(void **)ptrout = 0;
preempt_enable();
break;
}
// possibly overwrite existing fl
*my_fl = ptrout;
my_entry = ptrout;
}
}
}
# ifdef LOG_ALLOCS
GC_log_printf("GC_malloc_kind(%lu, %d) returned %p, recent GC #%lu\n",
(unsigned long)bytes, kind, result,
(unsigned long)GC_gc_no);
# endif
return result;
}
#ifdef GC_GCJ_SUPPORT
# include "gc_gcj.h"
/* Gcj-style allocation without locks is extremely tricky. The */
/* fundamental issue is that we may end up marking a free list, which */
/* has freelist links instead of "vtable" pointers. That is usually */
/* OK, since the next object on the free list will be cleared, and */
/* will thus be interpreted as containing a zero descriptor. That's */
/* fine if the object has not yet been initialized. But there are */
/* interesting potential races. */
/* In the case of incremental collection, this seems hopeless, since */
/* the marker may run asynchronously, and may pick up the pointer to */
/* the next freelist entry (which it thinks is a vtable pointer), get */
/* suspended for a while, and then see an allocated object instead */
/* of the vtable. This may be avoidable with either a handshake with */
/* the collector or, probably more easily, by moving the free list */
/* links to the second word of each object. The latter isn't a */
/* universal win, since on architecture like Itanium, nonzero offsets */
/* are not necessarily free. And there may be cache fill order issues. */
/* For now, we punt with incremental GC. This probably means that */
/* incremental GC should be enabled before we fork a second thread. */
/* Unlike the other thread local allocation calls, we assume that the */
/* collector has been explicitly initialized. */
GC_API GC_ATTR_MALLOC void * GC_CALL GC_gcj_malloc(size_t bytes,
void * ptr_to_struct_containing_descr)
{
if (EXPECT(GC_incremental, FALSE)) {
return GC_core_gcj_malloc(bytes, ptr_to_struct_containing_descr);
} else {
size_t granules = ROUNDED_UP_GRANULES(bytes);
void *result;
void **tiny_fl;
GC_ASSERT(GC_gcj_malloc_initialized);
tiny_fl = ((GC_tlfs)GC_getspecific(GC_thread_key))->gcj_freelists;
GC_FAST_MALLOC_GRANS(result, granules, tiny_fl, DIRECT_GRANULES,
GC_gcj_kind,
GC_core_gcj_malloc(bytes,
ptr_to_struct_containing_descr),
{AO_compiler_barrier();
*(void **)result = ptr_to_struct_containing_descr;});
/* This forces the initialization of the "method ptr". */
/* This is necessary to ensure some very subtle properties */
/* required if a GC is run in the middle of such an allocation. */
/* Here we implicitly also assume atomicity for the free list. */
/* and method pointer assignments. */
/* We must update the freelist before we store the pointer. */
/* Otherwise a GC at this point would see a corrupted */
/* free list. */
/* A real memory barrier is not needed, since the */
/* action of stopping this thread will cause prior writes */
/* to complete. */
/* We assert that any concurrent marker will stop us. */
/* Thus it is impossible for a mark procedure to see the */
/* allocation of the next object, but to see this object */
/* still containing a free list pointer. Otherwise the */
/* marker, by misinterpreting the freelist link as a vtable */
/* pointer, might find a random "mark descriptor" in the next */
/* object. */
return result;
}
}
#endif /* GC_GCJ_SUPPORT */
/* The thread support layer must arrange to mark thread-local */
/* free lists explicitly, since the link field is often */
/* invisible to the marker. It knows how to find all threads; */
/* we take care of an individual thread freelist structure. */
GC_INNER void GC_mark_thread_local_fls_for(GC_tlfs p)
{
ptr_t q;
int i, j;
for (j = 0; j < TINY_FREELISTS; ++j) {
for (i = 0; i < THREAD_FREELISTS_KINDS; ++i) {
/* Load the pointer atomically as it might be updated */
/* concurrently by GC_FAST_MALLOC_GRANS. */
q = (ptr_t)AO_load((volatile AO_t *)&p->_freelists[i][j]);
if ((word)q > HBLKSIZE)
GC_set_fl_marks(q);
}
# ifdef GC_GCJ_SUPPORT
if (EXPECT(j > 0, TRUE)) {
q = (ptr_t)AO_load((volatile AO_t *)&p->gcj_freelists[j]);
if ((word)q > HBLKSIZE)
GC_set_fl_marks(q);
}
# endif
}
}
#if defined(GC_ASSERTIONS)
/* Check that all thread-local free-lists in p are completely marked. */
void GC_check_tls_for(GC_tlfs p)
{
int i, j;
for (j = 1; j < TINY_FREELISTS; ++j) {
for (i = 0; i < THREAD_FREELISTS_KINDS; ++i) {
GC_check_fl_marks(&p->_freelists[i][j]);
}
# ifdef GC_GCJ_SUPPORT
GC_check_fl_marks(&p->gcj_freelists[j]);
# endif
}
}
#endif /* GC_ASSERTIONS */
#endif /* THREAD_LOCAL_ALLOC */