-
Notifications
You must be signed in to change notification settings - Fork 0
/
jsgc.c
3201 lines (2883 loc) · 103 KB
/
jsgc.c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
* vim: set ts=8 sw=4 et tw=78:
*
* ***** BEGIN LICENSE BLOCK *****
* Version: MPL 1.1/GPL 2.0/LGPL 2.1
*
* The contents of this file are subject to the Mozilla Public License Version
* 1.1 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* http://www.mozilla.org/MPL/
*
* Software distributed under the License is distributed on an "AS IS" basis,
* WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
* for the specific language governing rights and limitations under the
* License.
*
* The Original Code is Mozilla Communicator client code, released
* March 31, 1998.
*
* The Initial Developer of the Original Code is
* Netscape Communications Corporation.
* Portions created by the Initial Developer are Copyright (C) 1998
* the Initial Developer. All Rights Reserved.
*
* Contributor(s):
*
* Alternatively, the contents of this file may be used under the terms of
* either of the GNU General Public License Version 2 or later (the "GPL"),
* or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
* in which case the provisions of the GPL or the LGPL are applicable instead
* of those above. If you wish to allow use of your version of this file only
* under the terms of either the GPL or the LGPL, and not to allow others to
* use your version of this file under the terms of the MPL, indicate your
* decision by deleting the provisions above and replace them with the notice
* and other provisions required by the GPL or the LGPL. If you do not delete
* the provisions above, a recipient may use your version of this file under
* the terms of any one of the MPL, the GPL or the LGPL.
*
* ***** END LICENSE BLOCK ***** */
/*
* JS Mark-and-Sweep Garbage Collector.
*
* This GC allocates fixed-sized things with sizes up to GC_NBYTES_MAX (see
* jsgc.h). It allocates from a special GC arena pool with each arena allocated
* using malloc. It uses an ideally parallel array of flag bytes to hold the
* mark bit, finalizer type index, etc.
*
* XXX swizzle page to freelist for better locality of reference
*/
#include "jsstddef.h"
#include <stdlib.h> /* for free */
#include <string.h> /* for memset used when DEBUG */
#include "jstypes.h"
#include "jsutil.h" /* Added by JSIFY */
#include "jshash.h" /* Added by JSIFY */
#include "jsapi.h"
#include "jsatom.h"
#include "jsbit.h"
#include "jsclist.h"
#include "jscntxt.h"
#include "jsconfig.h"
#include "jsdbgapi.h"
#include "jsexn.h"
#include "jsfun.h"
#include "jsgc.h"
#include "jsinterp.h"
#include "jsiter.h"
#include "jslock.h"
#include "jsnum.h"
#include "jsobj.h"
#include "jsscope.h"
#include "jsscript.h"
#include "jsstr.h"
#if JS_HAS_XML_SUPPORT
#include "jsxml.h"
#endif
/*
* GC arena sizing depends on amortizing arena overhead using a large number
* of things per arena, and on the thing/flags ratio of 8:1 on most platforms.
*
* On 64-bit platforms, we would have half as many things per arena because
* pointers are twice as big, so we double the bytes for things per arena.
* This preserves the 1024 byte flags sub-arena size, which relates to the
* GC_PAGE_SIZE (see below for why).
*/
#if JS_BYTES_PER_WORD == 8
# define GC_THINGS_SHIFT 14 /* 16KB for things on Alpha, etc. */
#else
# define GC_THINGS_SHIFT 13 /* 8KB for things on most platforms */
#endif
#define GC_THINGS_SIZE JS_BIT(GC_THINGS_SHIFT)
#define GC_FLAGS_SIZE (GC_THINGS_SIZE / sizeof(JSGCThing))
/*
* A GC arena contains one flag byte for each thing in its heap, and supports
* O(1) lookup of a flag given its thing's address.
*
* To implement this, we take advantage of the thing/flags numerology: given
* the 8K bytes worth of GC-things, there are 1K flag bytes. Within each 9K
* allocation for things+flags there are always 8 consecutive 1K-pages each
* aligned on 1K boundary. We use these pages to allocate things and the
* remaining 1K of space before and after the aligned pages to store flags.
* If we are really lucky and things+flags starts on a 1K boundary, then
* flags would consist of a single 1K chunk that comes after 8K of things.
* Otherwise there are 2 chunks of flags, one before and one after things.
*
* To be able to find the flag byte for a particular thing, we put a
* JSGCPageInfo record at the beginning of each 1K-aligned page to hold that
* page's offset from the beginning of things+flags allocation and we allocate
* things after this record. Thus for each thing |thing_address & ~1023|
* gives the address of a JSGCPageInfo record from which we read page_offset.
* Due to page alignment
* (page_offset & ~1023) + (thing_address & 1023)
* gives thing_offset from the beginning of 8K paged things. We then divide
* thing_offset by sizeof(JSGCThing) to get thing_index.
*
* Now |page_address - page_offset| is things+flags arena_address and
* (page_offset & 1023) is the offset of the first page from the start of
* things+flags area. Thus if
* thing_index < (page_offset & 1023)
* then
* allocation_start_address + thing_index < address_of_the_first_page
* and we use
* allocation_start_address + thing_index
* as the address to store thing's flags. If
* thing_index >= (page_offset & 1023),
* then we use the chunk of flags that comes after the pages with things
* and calculate the address for the flag byte as
* address_of_the_first_page + 8K + (thing_index - (page_offset & 1023))
* which is just
* allocation_start_address + thing_index + 8K.
*
* When we allocate things with size equal to sizeof(JSGCThing), the overhead
* of this scheme for 32 bit platforms is (8+8*(8+1))/(8+9K) or 0.87%
* (assuming 4 bytes for each JSGCArena header, and 8 bytes for each
* JSGCThing and JSGCPageInfo). When thing_size > 8, the scheme wastes the
* flag byte for each extra 8 bytes beyond sizeof(JSGCThing) in thing_size
* and the overhead is close to 1/8 or 12.5%.
* FIXME: How can we avoid this overhead?
*
* Here's some ASCII art showing an arena:
*
* split or the first 1-K aligned address.
* |
* V
* +--+-------+-------+-------+-------+-------+-------+-------+-------+-----+
* |fB| tp0 | tp1 | tp2 | tp3 | tp4 | tp5 | tp6 | tp7 | fA |
* +--+-------+-------+-------+-------+-------+-------+-------+-------+-----+
* ^ ^
* tI ---------+ |
* tJ -------------------------------------------+
*
* - fB are the "before split" flags, fA are the "after split" flags
* - tp0-tp7 are the 8 thing pages
* - thing tI points into tp1, whose flags are below the split, in fB
* - thing tJ points into tp5, clearly above the split
*
* In general, one of the thing pages will have some of its things' flags on
* the low side of the split, and the rest of its things' flags on the high
* side. All the other pages have flags only below or only above.
*
* (If we need to implement card-marking for an incremental GC write barrier,
* we can replace word-sized offsetInArena in JSGCPageInfo by pair of
* uint8 card_mark and uint16 offsetInArena fields as the offset can not exceed
* GC_THINGS_SIZE. This would gives an extremely efficient write barrier:
* when mutating an object obj, just store a 1 byte at
* (uint8 *) ((jsuword)obj & ~1023) on 32-bit platforms.)
*/
#define GC_PAGE_SHIFT 10
#define GC_PAGE_MASK ((jsuword) JS_BITMASK(GC_PAGE_SHIFT))
#define GC_PAGE_SIZE JS_BIT(GC_PAGE_SHIFT)
#define GC_PAGE_COUNT (1 << (GC_THINGS_SHIFT - GC_PAGE_SHIFT))
typedef struct JSGCPageInfo {
jsuword offsetInArena; /* offset from the arena start */
jsuword unscannedBitmap; /* bitset for fast search of marked
but not yet scanned GC things */
} JSGCPageInfo;
struct JSGCArena {
JSGCArenaList *list; /* allocation list for the arena */
JSGCArena *prev; /* link field for allocation list */
JSGCArena *prevUnscanned; /* link field for the list of arenas
with marked but not yet scanned
things */
jsuword unscannedPages; /* bitset for fast search of pages
with marked but not yet scanned
things */
uint8 base[1]; /* things+flags allocation area */
};
#define GC_ARENA_SIZE \
(offsetof(JSGCArena, base) + GC_THINGS_SIZE + GC_FLAGS_SIZE)
#define FIRST_THING_PAGE(a) \
(((jsuword)(a)->base + GC_FLAGS_SIZE - 1) & ~GC_PAGE_MASK)
#define PAGE_TO_ARENA(pi) \
((JSGCArena *)((jsuword)(pi) - (pi)->offsetInArena \
- offsetof(JSGCArena, base)))
#define PAGE_INDEX(pi) \
((size_t)((pi)->offsetInArena >> GC_PAGE_SHIFT))
#define THING_TO_PAGE(thing) \
((JSGCPageInfo *)((jsuword)(thing) & ~GC_PAGE_MASK))
/*
* Given a thing size n, return the size of the gap from the page start before
* the first thing. We know that any n not a power of two packs from
* the end of the page leaving at least enough room for one JSGCPageInfo, but
* not for another thing, at the front of the page (JS_ASSERTs below insist
* on this).
*
* This works because all allocations are a multiple of sizeof(JSGCThing) ==
* sizeof(JSGCPageInfo) in size.
*/
#define PAGE_THING_GAP(n) (((n) & ((n) - 1)) ? (GC_PAGE_SIZE % (n)) : (n))
#ifdef JS_THREADSAFE
/*
* The maximum number of things to put to the local free list by taking
* several things from the global free list or from the tail of the last
* allocated arena to amortize the cost of rt->gcLock.
*
* We use number 8 based on benchmarks from bug 312238.
*/
#define MAX_THREAD_LOCAL_THINGS 8
#endif
JS_STATIC_ASSERT(sizeof(JSGCThing) == sizeof(JSGCPageInfo));
JS_STATIC_ASSERT(sizeof(JSGCThing) >= sizeof(JSObject));
JS_STATIC_ASSERT(sizeof(JSGCThing) >= sizeof(JSString));
JS_STATIC_ASSERT(sizeof(JSGCThing) >= sizeof(jsdouble));
JS_STATIC_ASSERT(GC_FLAGS_SIZE >= GC_PAGE_SIZE);
JS_STATIC_ASSERT(sizeof(JSStackHeader) >= 2 * sizeof(jsval));
/*
* JSPtrTable capacity growth descriptor. The table grows by powers of two
* starting from capacity JSPtrTableInfo.minCapacity, but switching to linear
* growth when capacity reaches JSPtrTableInfo.linearGrowthThreshold.
*/
typedef struct JSPtrTableInfo {
uint16 minCapacity;
uint16 linearGrowthThreshold;
} JSPtrTableInfo;
#define GC_ITERATOR_TABLE_MIN 4
#define GC_ITERATOR_TABLE_LINEAR 1024
static const JSPtrTableInfo iteratorTableInfo = {
GC_ITERATOR_TABLE_MIN,
GC_ITERATOR_TABLE_LINEAR
};
/* Calculate table capacity based on the current value of JSPtrTable.count. */
static size_t
PtrTableCapacity(size_t count, const JSPtrTableInfo *info)
{
size_t linear, log, capacity;
linear = info->linearGrowthThreshold;
JS_ASSERT(info->minCapacity <= linear);
if (count == 0) {
capacity = 0;
} else if (count < linear) {
log = JS_CEILING_LOG2W(count);
JS_ASSERT(log != JS_BITS_PER_WORD);
capacity = (size_t)1 << log;
if (capacity < info->minCapacity)
capacity = info->minCapacity;
} else {
capacity = JS_ROUNDUP(count, linear);
}
JS_ASSERT(capacity >= count);
return capacity;
}
static void
FreePtrTable(JSPtrTable *table, const JSPtrTableInfo *info)
{
if (table->array) {
JS_ASSERT(table->count > 0);
free(table->array);
table->array = NULL;
table->count = 0;
}
JS_ASSERT(table->count == 0);
}
static JSBool
AddToPtrTable(JSContext *cx, JSPtrTable *table, const JSPtrTableInfo *info,
void *ptr)
{
size_t count, capacity;
void **array;
count = table->count;
capacity = PtrTableCapacity(count, info);
if (count == capacity) {
if (capacity < info->minCapacity) {
JS_ASSERT(capacity == 0);
JS_ASSERT(!table->array);
capacity = info->minCapacity;
} else {
/*
* Simplify the overflow detection assuming pointer is bigger
* than byte.
*/
JS_STATIC_ASSERT(2 <= sizeof table->array[0]);
capacity = (capacity < info->linearGrowthThreshold)
? 2 * capacity
: capacity + info->linearGrowthThreshold;
if (capacity > (size_t)-1 / sizeof table->array[0])
goto bad;
}
array = (void **) realloc(table->array,
capacity * sizeof table->array[0]);
if (!array)
goto bad;
#ifdef DEBUG
memset(array + count, JS_FREE_PATTERN,
(capacity - count) * sizeof table->array[0]);
#endif
table->array = array;
}
table->array[count] = ptr;
table->count = count + 1;
return JS_TRUE;
bad:
JS_ReportOutOfMemory(cx);
return JS_FALSE;
}
static void
ShrinkPtrTable(JSPtrTable *table, const JSPtrTableInfo *info,
size_t newCount)
{
size_t oldCapacity, capacity;
void **array;
JS_ASSERT(newCount <= table->count);
if (newCount == table->count)
return;
oldCapacity = PtrTableCapacity(table->count, info);
table->count = newCount;
capacity = PtrTableCapacity(newCount, info);
if (oldCapacity != capacity) {
array = table->array;
JS_ASSERT(array);
if (capacity == 0) {
free(array);
table->array = NULL;
return;
}
array = (void **) realloc(array, capacity * sizeof array[0]);
if (array)
table->array = array;
}
#ifdef DEBUG
memset(table->array + newCount, JS_FREE_PATTERN,
(capacity - newCount) * sizeof table->array[0]);
#endif
}
#ifdef JS_GCMETER
# define METER(x) x
#else
# define METER(x) ((void) 0)
#endif
static JSBool
NewGCArena(JSRuntime *rt, JSGCArenaList *arenaList)
{
JSGCArena *a;
jsuword offset;
JSGCPageInfo *pi;
uint32 *bytesptr;
/* Check if we are allowed and can allocate a new arena. */
if (rt->gcBytes >= rt->gcMaxBytes)
return JS_FALSE;
a = (JSGCArena *)malloc(GC_ARENA_SIZE);
if (!a)
return JS_FALSE;
/* Initialize the JSGCPageInfo records at the start of every thing page. */
offset = (GC_PAGE_SIZE - ((jsuword)a->base & GC_PAGE_MASK)) & GC_PAGE_MASK;
JS_ASSERT((jsuword)a->base + offset == FIRST_THING_PAGE(a));
do {
pi = (JSGCPageInfo *) (a->base + offset);
pi->offsetInArena = offset;
pi->unscannedBitmap = 0;
offset += GC_PAGE_SIZE;
} while (offset < GC_THINGS_SIZE);
METER(++arenaList->stats.narenas);
METER(arenaList->stats.maxarenas
= JS_MAX(arenaList->stats.maxarenas, arenaList->stats.narenas));
a->list = arenaList;
a->prev = arenaList->last;
a->prevUnscanned = NULL;
a->unscannedPages = 0;
arenaList->last = a;
arenaList->lastLimit = 0;
bytesptr = (arenaList == &rt->gcArenaList[0])
? &rt->gcBytes
: &rt->gcPrivateBytes;
*bytesptr += GC_ARENA_SIZE;
return JS_TRUE;
}
static void
DestroyGCArena(JSRuntime *rt, JSGCArenaList *arenaList, JSGCArena **ap)
{
JSGCArena *a;
uint32 *bytesptr;
a = *ap;
JS_ASSERT(a);
bytesptr = (arenaList == &rt->gcArenaList[0])
? &rt->gcBytes
: &rt->gcPrivateBytes;
JS_ASSERT(*bytesptr >= GC_ARENA_SIZE);
*bytesptr -= GC_ARENA_SIZE;
METER(rt->gcStats.afree++);
METER(--arenaList->stats.narenas);
if (a == arenaList->last)
arenaList->lastLimit = (uint16)(a->prev ? GC_THINGS_SIZE : 0);
*ap = a->prev;
#ifdef DEBUG
memset(a, JS_FREE_PATTERN, GC_ARENA_SIZE);
#endif
free(a);
}
static void
InitGCArenaLists(JSRuntime *rt)
{
uintN i, thingSize;
JSGCArenaList *arenaList;
for (i = 0; i < GC_NUM_FREELISTS; i++) {
arenaList = &rt->gcArenaList[i];
thingSize = GC_FREELIST_NBYTES(i);
JS_ASSERT((size_t)(uint16)thingSize == thingSize);
arenaList->last = NULL;
arenaList->lastLimit = 0;
arenaList->thingSize = (uint16)thingSize;
arenaList->freeList = NULL;
METER(memset(&arenaList->stats, 0, sizeof arenaList->stats));
}
}
static void
FinishGCArenaLists(JSRuntime *rt)
{
uintN i;
JSGCArenaList *arenaList;
for (i = 0; i < GC_NUM_FREELISTS; i++) {
arenaList = &rt->gcArenaList[i];
while (arenaList->last)
DestroyGCArena(rt, arenaList, &arenaList->last);
arenaList->freeList = NULL;
}
}
uint8 *
js_GetGCThingFlags(void *thing)
{
JSGCPageInfo *pi;
jsuword offsetInArena, thingIndex;
pi = THING_TO_PAGE(thing);
offsetInArena = pi->offsetInArena;
JS_ASSERT(offsetInArena < GC_THINGS_SIZE);
thingIndex = ((offsetInArena & ~GC_PAGE_MASK) |
((jsuword)thing & GC_PAGE_MASK)) / sizeof(JSGCThing);
JS_ASSERT(thingIndex < GC_PAGE_SIZE);
if (thingIndex >= (offsetInArena & GC_PAGE_MASK))
thingIndex += GC_THINGS_SIZE;
return (uint8 *)pi - offsetInArena + thingIndex;
}
JSRuntime*
js_GetGCStringRuntime(JSString *str)
{
JSGCPageInfo *pi;
JSGCArenaList *list;
pi = THING_TO_PAGE(str);
list = PAGE_TO_ARENA(pi)->list;
JS_ASSERT(list->thingSize == sizeof(JSGCThing));
JS_ASSERT(GC_FREELIST_INDEX(sizeof(JSGCThing)) == 0);
return (JSRuntime *)((uint8 *)list - offsetof(JSRuntime, gcArenaList));
}
JSBool
js_IsAboutToBeFinalized(JSContext *cx, void *thing)
{
uint8 flags = *js_GetGCThingFlags(thing);
return !(flags & (GCF_MARK | GCF_LOCK | GCF_FINAL));
}
typedef void (*GCFinalizeOp)(JSContext *cx, JSGCThing *thing);
#ifndef DEBUG
# define js_FinalizeDouble NULL
#endif
#if !JS_HAS_XML_SUPPORT
# define js_FinalizeXMLNamespace NULL
# define js_FinalizeXMLQName NULL
# define js_FinalizeXML NULL
#endif
static GCFinalizeOp gc_finalizers[GCX_NTYPES] = {
(GCFinalizeOp) js_FinalizeObject, /* GCX_OBJECT */
(GCFinalizeOp) js_FinalizeString, /* GCX_STRING */
(GCFinalizeOp) js_FinalizeDouble, /* GCX_DOUBLE */
(GCFinalizeOp) js_FinalizeString, /* GCX_MUTABLE_STRING */
NULL, /* GCX_PRIVATE */
(GCFinalizeOp) js_FinalizeXMLNamespace, /* GCX_NAMESPACE */
(GCFinalizeOp) js_FinalizeXMLQName, /* GCX_QNAME */
(GCFinalizeOp) js_FinalizeXML, /* GCX_XML */
NULL, /* GCX_EXTERNAL_STRING */
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL
};
#ifdef GC_MARK_DEBUG
static const char newborn_external_string[] = "newborn external string";
static const char *gc_typenames[GCX_NTYPES] = {
"newborn object",
"newborn string",
"newborn double",
"newborn mutable string",
"newborn private",
"newborn Namespace",
"newborn QName",
"newborn XML",
newborn_external_string,
newborn_external_string,
newborn_external_string,
newborn_external_string,
newborn_external_string,
newborn_external_string,
newborn_external_string,
newborn_external_string
};
#endif
intN
js_ChangeExternalStringFinalizer(JSStringFinalizeOp oldop,
JSStringFinalizeOp newop)
{
uintN i;
for (i = GCX_EXTERNAL_STRING; i < GCX_NTYPES; i++) {
if (gc_finalizers[i] == (GCFinalizeOp) oldop) {
gc_finalizers[i] = (GCFinalizeOp) newop;
return (intN) i;
}
}
return -1;
}
/* This is compatible with JSDHashEntryStub. */
typedef struct JSGCRootHashEntry {
JSDHashEntryHdr hdr;
void *root;
const char *name;
} JSGCRootHashEntry;
/* Initial size of the gcRootsHash table (SWAG, small enough to amortize). */
#define GC_ROOTS_SIZE 256
#define GC_FINALIZE_LEN 1024
JSBool
js_InitGC(JSRuntime *rt, uint32 maxbytes)
{
InitGCArenaLists(rt);
if (!JS_DHashTableInit(&rt->gcRootsHash, JS_DHashGetStubOps(), NULL,
sizeof(JSGCRootHashEntry), GC_ROOTS_SIZE)) {
rt->gcRootsHash.ops = NULL;
return JS_FALSE;
}
rt->gcLocksHash = NULL; /* create lazily */
/*
* Separate gcMaxMallocBytes from gcMaxBytes but initialize to maxbytes
* for default backward API compatibility.
*/
rt->gcMaxBytes = rt->gcMaxMallocBytes = maxbytes;
return JS_TRUE;
}
#ifdef JS_GCMETER
JS_FRIEND_API(void)
js_DumpGCStats(JSRuntime *rt, FILE *fp)
{
uintN i;
size_t totalThings, totalMaxThings, totalBytes;
fprintf(fp, "\nGC allocation statistics:\n");
#define UL(x) ((unsigned long)(x))
#define ULSTAT(x) UL(rt->gcStats.x)
totalThings = 0;
totalMaxThings = 0;
totalBytes = 0;
for (i = 0; i < GC_NUM_FREELISTS; i++) {
JSGCArenaList *list = &rt->gcArenaList[i];
JSGCArenaStats *stats = &list->stats;
if (stats->maxarenas == 0) {
fprintf(fp, "ARENA LIST %u (thing size %lu): NEVER USED\n",
i, UL(GC_FREELIST_NBYTES(i)));
continue;
}
fprintf(fp, "ARENA LIST %u (thing size %lu):\n",
i, UL(GC_FREELIST_NBYTES(i)));
fprintf(fp, " arenas: %lu\n", UL(stats->narenas));
fprintf(fp, " max arenas: %lu\n", UL(stats->maxarenas));
fprintf(fp, " things: %lu\n", UL(stats->nthings));
fprintf(fp, " max things: %lu\n", UL(stats->maxthings));
fprintf(fp, " free list: %lu\n", UL(stats->freelen));
fprintf(fp, " free list density: %.1f%%\n",
stats->narenas == 0
? 0.0
: (100.0 * list->thingSize * (jsdouble)stats->freelen /
(GC_THINGS_SIZE * (jsdouble)stats->narenas)));
fprintf(fp, " average free list density: %.1f%%\n",
stats->totalarenas == 0
? 0.0
: (100.0 * list->thingSize * (jsdouble)stats->totalfreelen /
(GC_THINGS_SIZE * (jsdouble)stats->totalarenas)));
fprintf(fp, " recycles: %lu\n", UL(stats->recycle));
fprintf(fp, " recycle/alloc ratio: %.2f\n",
(jsdouble)stats->recycle /
(jsdouble)(stats->totalnew - stats->recycle));
totalThings += stats->nthings;
totalMaxThings += stats->maxthings;
totalBytes += GC_FREELIST_NBYTES(i) * stats->nthings;
}
fprintf(fp, "TOTAL STATS:\n");
fprintf(fp, " public bytes allocated: %lu\n", UL(rt->gcBytes));
fprintf(fp, " private bytes allocated: %lu\n", UL(rt->gcPrivateBytes));
fprintf(fp, " alloc attempts: %lu\n", ULSTAT(alloc));
#ifdef JS_THREADSAFE
fprintf(fp, " alloc without locks: %1u\n", ULSTAT(localalloc));
#endif
fprintf(fp, " total GC things: %lu\n", UL(totalThings));
fprintf(fp, " max total GC things: %lu\n", UL(totalMaxThings));
fprintf(fp, " GC things size: %lu\n", UL(totalBytes));
fprintf(fp, "allocation retries after GC: %lu\n", ULSTAT(retry));
fprintf(fp, " allocation failures: %lu\n", ULSTAT(fail));
fprintf(fp, " things born locked: %lu\n", ULSTAT(lockborn));
fprintf(fp, " valid lock calls: %lu\n", ULSTAT(lock));
fprintf(fp, " valid unlock calls: %lu\n", ULSTAT(unlock));
fprintf(fp, " mark recursion depth: %lu\n", ULSTAT(depth));
fprintf(fp, " maximum mark recursion: %lu\n", ULSTAT(maxdepth));
fprintf(fp, " mark C recursion depth: %lu\n", ULSTAT(cdepth));
fprintf(fp, " maximum mark C recursion: %lu\n", ULSTAT(maxcdepth));
fprintf(fp, " delayed scan bag adds: %lu\n", ULSTAT(unscanned));
#ifdef DEBUG
fprintf(fp, " max delayed scan bag size: %lu\n", ULSTAT(maxunscanned));
#endif
fprintf(fp, " maximum GC nesting level: %lu\n", ULSTAT(maxlevel));
fprintf(fp, "potentially useful GC calls: %lu\n", ULSTAT(poke));
fprintf(fp, " useless GC calls: %lu\n", ULSTAT(nopoke));
fprintf(fp, " thing arenas freed so far: %lu\n", ULSTAT(afree));
fprintf(fp, " stack segments scanned: %lu\n", ULSTAT(stackseg));
fprintf(fp, "stack segment slots scanned: %lu\n", ULSTAT(segslots));
fprintf(fp, "reachable closeable objects: %lu\n", ULSTAT(nclose));
fprintf(fp, " max reachable closeable: %lu\n", ULSTAT(maxnclose));
fprintf(fp, " scheduled close hooks: %lu\n", ULSTAT(closelater));
fprintf(fp, " max scheduled close hooks: %lu\n", ULSTAT(maxcloselater));
#undef UL
#undef US
#ifdef JS_ARENAMETER
JS_DumpArenaStats(fp);
#endif
}
#endif
#ifdef DEBUG
static void
CheckLeakedRoots(JSRuntime *rt);
#endif
void
js_FinishGC(JSRuntime *rt)
{
#ifdef JS_ARENAMETER
JS_DumpArenaStats(stdout);
#endif
#ifdef JS_GCMETER
js_DumpGCStats(rt, stdout);
#endif
FreePtrTable(&rt->gcIteratorTable, &iteratorTableInfo);
#if JS_HAS_GENERATORS
rt->gcCloseState.reachableList = NULL;
METER(rt->gcStats.nclose = 0);
rt->gcCloseState.todoQueue = NULL;
#endif
FinishGCArenaLists(rt);
if (rt->gcRootsHash.ops) {
#ifdef DEBUG
CheckLeakedRoots(rt);
#endif
JS_DHashTableFinish(&rt->gcRootsHash);
rt->gcRootsHash.ops = NULL;
}
if (rt->gcLocksHash) {
JS_DHashTableDestroy(rt->gcLocksHash);
rt->gcLocksHash = NULL;
}
}
JSBool
js_AddRoot(JSContext *cx, void *rp, const char *name)
{
JSBool ok = js_AddRootRT(cx->runtime, rp, name);
if (!ok)
JS_ReportOutOfMemory(cx);
return ok;
}
JSBool
js_AddRootRT(JSRuntime *rt, void *rp, const char *name)
{
JSBool ok;
JSGCRootHashEntry *rhe;
/*
* Due to the long-standing, but now removed, use of rt->gcLock across the
* bulk of js_GC, API users have come to depend on JS_AddRoot etc. locking
* properly with a racing GC, without calling JS_AddRoot from a request.
* We have to preserve API compatibility here, now that we avoid holding
* rt->gcLock across the mark phase (including the root hashtable mark).
*
* If the GC is running and we're called on another thread, wait for this
* GC activation to finish. We can safely wait here (in the case where we
* are called within a request on another thread's context) without fear
* of deadlock because the GC doesn't set rt->gcRunning until after it has
* waited for all active requests to end.
*/
JS_LOCK_GC(rt);
#ifdef JS_THREADSAFE
JS_ASSERT(!rt->gcRunning || rt->gcLevel > 0);
if (rt->gcRunning && rt->gcThread->id != js_CurrentThreadId()) {
do {
JS_AWAIT_GC_DONE(rt);
} while (rt->gcLevel > 0);
}
#endif
rhe = (JSGCRootHashEntry *) JS_DHashTableOperate(&rt->gcRootsHash, rp,
JS_DHASH_ADD);
if (rhe) {
rhe->root = rp;
rhe->name = name;
ok = JS_TRUE;
} else {
ok = JS_FALSE;
}
JS_UNLOCK_GC(rt);
return ok;
}
JSBool
js_RemoveRoot(JSRuntime *rt, void *rp)
{
/*
* Due to the JS_RemoveRootRT API, we may be called outside of a request.
* Same synchronization drill as above in js_AddRoot.
*/
JS_LOCK_GC(rt);
#ifdef JS_THREADSAFE
JS_ASSERT(!rt->gcRunning || rt->gcLevel > 0);
if (rt->gcRunning && rt->gcThread->id != js_CurrentThreadId()) {
do {
JS_AWAIT_GC_DONE(rt);
} while (rt->gcLevel > 0);
}
#endif
(void) JS_DHashTableOperate(&rt->gcRootsHash, rp, JS_DHASH_REMOVE);
rt->gcPoke = JS_TRUE;
JS_UNLOCK_GC(rt);
return JS_TRUE;
}
#ifdef DEBUG
JS_STATIC_DLL_CALLBACK(JSDHashOperator)
js_root_printer(JSDHashTable *table, JSDHashEntryHdr *hdr, uint32 i, void *arg)
{
uint32 *leakedroots = (uint32 *)arg;
JSGCRootHashEntry *rhe = (JSGCRootHashEntry *)hdr;
(*leakedroots)++;
fprintf(stderr,
"JS engine warning: leaking GC root \'%s\' at %p\n",
rhe->name ? (char *)rhe->name : "", rhe->root);
return JS_DHASH_NEXT;
}
static void
CheckLeakedRoots(JSRuntime *rt)
{
uint32 leakedroots = 0;
/* Warn (but don't assert) debug builds of any remaining roots. */
JS_DHashTableEnumerate(&rt->gcRootsHash, js_root_printer,
&leakedroots);
if (leakedroots > 0) {
if (leakedroots == 1) {
fprintf(stderr,
"JS engine warning: 1 GC root remains after destroying the JSRuntime.\n"
" This root may point to freed memory. Objects reachable\n"
" through it have not been finalized.\n");
} else {
fprintf(stderr,
"JS engine warning: %lu GC roots remain after destroying the JSRuntime.\n"
" These roots may point to freed memory. Objects reachable\n"
" through them have not been finalized.\n",
(unsigned long) leakedroots);
}
}
}
typedef struct NamedRootDumpArgs {
void (*dump)(const char *name, void *rp, void *data);
void *data;
} NamedRootDumpArgs;
JS_STATIC_DLL_CALLBACK(JSDHashOperator)
js_named_root_dumper(JSDHashTable *table, JSDHashEntryHdr *hdr, uint32 number,
void *arg)
{
NamedRootDumpArgs *args = (NamedRootDumpArgs *) arg;
JSGCRootHashEntry *rhe = (JSGCRootHashEntry *)hdr;
if (rhe->name)
args->dump(rhe->name, rhe->root, args->data);
return JS_DHASH_NEXT;
}
void
js_DumpNamedRoots(JSRuntime *rt,
void (*dump)(const char *name, void *rp, void *data),
void *data)
{
NamedRootDumpArgs args;
args.dump = dump;
args.data = data;
JS_DHashTableEnumerate(&rt->gcRootsHash, js_named_root_dumper, &args);
}
#endif /* DEBUG */
typedef struct GCRootMapArgs {
JSGCRootMapFun map;
void *data;
} GCRootMapArgs;
JS_STATIC_DLL_CALLBACK(JSDHashOperator)
js_gcroot_mapper(JSDHashTable *table, JSDHashEntryHdr *hdr, uint32 number,
void *arg)
{
GCRootMapArgs *args = (GCRootMapArgs *) arg;
JSGCRootHashEntry *rhe = (JSGCRootHashEntry *)hdr;
intN mapflags;
JSDHashOperator op;
mapflags = args->map(rhe->root, rhe->name, args->data);
#if JS_MAP_GCROOT_NEXT == JS_DHASH_NEXT && \
JS_MAP_GCROOT_STOP == JS_DHASH_STOP && \
JS_MAP_GCROOT_REMOVE == JS_DHASH_REMOVE
op = (JSDHashOperator)mapflags;
#else
op = JS_DHASH_NEXT;
if (mapflags & JS_MAP_GCROOT_STOP)
op |= JS_DHASH_STOP;
if (mapflags & JS_MAP_GCROOT_REMOVE)
op |= JS_DHASH_REMOVE;
#endif
return op;
}
uint32
js_MapGCRoots(JSRuntime *rt, JSGCRootMapFun map, void *data)
{
GCRootMapArgs args;
uint32 rv;
args.map = map;
args.data = data;
JS_LOCK_GC(rt);
rv = JS_DHashTableEnumerate(&rt->gcRootsHash, js_gcroot_mapper, &args);
JS_UNLOCK_GC(rt);
return rv;
}
JSBool
js_RegisterCloseableIterator(JSContext *cx, JSObject *obj)
{
JSRuntime *rt;
JSBool ok;
rt = cx->runtime;
JS_ASSERT(!rt->gcRunning);
JS_LOCK_GC(rt);
ok = AddToPtrTable(cx, &rt->gcIteratorTable, &iteratorTableInfo, obj);
JS_UNLOCK_GC(rt);
return ok;
}
static void
CloseIteratorStates(JSContext *cx)
{
JSRuntime *rt;
size_t count, newCount, i;
void **array;
JSObject *obj;
rt = cx->runtime;
count = rt->gcIteratorTable.count;
array = rt->gcIteratorTable.array;
newCount = 0;
for (i = 0; i != count; ++i) {
obj = (JSObject *)array[i];
if (js_IsAboutToBeFinalized(cx, obj))
js_CloseIteratorState(cx, obj);
else
array[newCount++] = obj;
}
ShrinkPtrTable(&rt->gcIteratorTable, &iteratorTableInfo, newCount);
}
#if JS_HAS_GENERATORS
void
js_RegisterGenerator(JSContext *cx, JSGenerator *gen)
{
JSRuntime *rt;
rt = cx->runtime;
JS_ASSERT(!rt->gcRunning);
JS_ASSERT(rt->state != JSRTS_LANDING);
JS_ASSERT(gen->state == JSGEN_NEWBORN);
JS_LOCK_GC(rt);
gen->next = rt->gcCloseState.reachableList;
rt->gcCloseState.reachableList = gen;
METER(rt->gcStats.nclose++);
METER(rt->gcStats.maxnclose = JS_MAX(rt->gcStats.maxnclose,
rt->gcStats.nclose));
JS_UNLOCK_GC(rt);
}
/*
* We do not run close hooks when the parent scope of the generator instance
* becomes unreachable to prevent denial-of-service and resource leakage from
* misbehaved generators.