/* * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers * Copyright (c) 1991-1993 by Xerox Corporation. All rights reserved. * * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED * OR IMPLIED. ANY USE IS AT YOUR OWN RISK. * * Permission is hereby granted to copy this garbage collector for any purpose, * provided the above notices are retained on all copies. */ # ifndef GC←PRIVATE←H # define GC←PRIVATE←H # ifndef GC←H # include "gc.h" # endif typedef GC←word word; typedef GC←signed←word signed←word; # ifndef CONFIG←H # include "config.h" # endif # ifndef HEADERS←H # include "gc←headers.h" # endif # ifndef bool typedef int bool; # endif # define TRUE 1 # define FALSE 0 typedef char * ptr←t; /* A generic pointer to which we can add */ /* byte displacments. */ #ifdef ←←STDC←← # if !(defined( sony←news ) ) # include <stddef.h> # endif typedef void * extern←ptr←t; # define VOLATILE volatile #else typedef char * extern←ptr←t; # define VOLATILE #endif # ifndef OS2 # include <sys/types.h> # endif /*********************************/ /* */ /* Definitions for conservative */ /* collector */ /* */ /*********************************/ /*********************************/ /* */ /* Easily changeable parameters */ /* */ /*********************************/ #define STUBBORN←ALLOC /* Define stubborn allocation primitives */ #define ALL←INTERIOR←POINTERS /* Forces all pointers into the interior of an */ /* object to be considered valid. Also causes the */ /* sizes of all objects to be inflated by at least */ /* one byte. This should suffice to guarantee */ /* that in the presence of a compiler that does */ /* not perform garbage-collector-unsafe */ /* optimizations, all portable, strictly ANSI */ /* conforming C programs should be safely usable */ /* with malloc replaced by GC←malloc and free */ /* calls removed. There are several disadvantages: */ /* 1. There are probably no interesting, portable, */ /* strictly ANSI conforming C programs. */ /* 2. This option makes it hard for the collector */ /* to allocate space that is not ``pointed to'' */ /* by integers, etc. Under SunOS 4.X with a */ /* statically linked libc, we empiricaly */ /* observed that it would be difficult to */ /* allocate individual objects larger than 100K. */ /* Even if only smaller objects are allocated, */ /* more swap space is likely to be needed. */ /* Fortunately, much of this will never be */ /* touched. */ /* If you can easily avoid using this option, do. */ /* If not, try to keep individual objects small. */ #undef ALL←INTERIOR←POINTERS #define PRINTSTATS /* Print garbage collection statistics */ /* For less verbose output, undefine in reclaim.c */ #define PRINTTIMES /* Print the amount of time consumed by each garbage */ /* collection. */ #define PRINTBLOCKS /* Print object sizes associated with heap blocks, */ /* whether the objects are atomic or composite, and */ /* whether or not the block was found to be empty */ /* duing the reclaim phase. Typically generates */ /* about one screenful per garbage collection. */ #undef PRINTBLOCKS #define PRINTBLACKLIST /* Print black listed blocks, i.e. values that */ /* cause the allocator to avoid allocating certain */ /* blocks in order to avoid introducing "false */ /* hits". */ #undef PRINTBLACKLIST #ifdef SILENT # ifdef PRINTSTATS # undef PRINTSTATS # endif # ifdef PRINTTIMES # undef PRINTTIMES # endif # ifdef PRINTNBLOCKS # undef PRINTNBLOCKS # endif #endif #if defined(PRINTSTATS) && !defined(GATHERSTATS) # define GATHERSTATS #endif #ifdef SPARC # define ALIGN←DOUBLE /* Align objects of size > 1 word on 2 word */ /* boundaries. Wasteful of memory, but */ /* apparently required by SPARC architecture. */ #endif #if defined(SPARC) || defined(M68K) && defined(SUNOS) # if !defined(PCR) # define DYNAMIC←LOADING /* Search dynamic libraries for roots. */ # else /* PCR handles any dynamic loading whether with dlopen or otherwise */ # endif #endif #define MERGE←SIZES /* Round up some object sizes, so that fewer distinct */ /* free lists are actually maintained. This applies */ /* only to the top level routines in misc.c, not to */ /* user generated code that calls GC←allocobj and */ /* GC←allocaobj directly. */ /* Slows down average programs slightly. May however */ /* substantially reduce fragmentation if allocation */ /* request sizes are widely scattered. */ /* May save significant amounts of space for obj←map */ /* entries. */ /* ALIGN←DOUBLE requires MERGE←SIZES at present. */ # if defined(ALIGN←DOUBLE) && !defined(MERGE←SIZES) # define MERGE←SIZES # endif # define HINCR 16 /* Initial heap increment, in blocks of 4K */ # define MAXHINCR 512 /* Maximum heap increment, in blocks */ # define HINCR←MULT 5 /* After each new allocation, GC←hincr is */ # define HINCR←DIV 4 /* multiplied by HINCR←MULT/HINCR←DIV */ # define GC←MULT 3 /* Don't collect if the fraction of */ /* non-collectable memory in the heap */ /* exceeds GC←MUL/GC←DIV */ # define GC←DIV 4 # define NON←GC←HINCR ((word)8) /* Heap increment if most of heap if collection */ /* was suppressed because most of heap is not */ /* collectable */ /*********************************/ /* */ /* OS interface routines */ /* */ /*********************************/ #include <time.h> #if !defined(←←STDC←←) && defined(SPARC) && defined(SUNOS4) clock←t clock(); /* Not in time.h, where it belongs */ #endif #if !defined(CLOCKS←PER←SEC) # define CLOCKS←PER←SEC 1000000 /* * This is technically a bug in the implementation. ANSI requires that * CLOCKS←PER←SEC be defined. But at least under SunOS4.1.1, it isn't. * Also note that the combination of ANSI C and POSIX is incredibly gross * here. The type clock←t is used by both clock() and times(). But on * some machines thes use different notions of a clock tick, CLOCKS←PER←SEC * seems to apply only to clock. Hence we use it here. On many machines, * including SunOS, clock actually uses units of microseconds (which are * not really clock ticks). */ #endif #define CLOCK←TYPE clock←t #define GET←TIME(x) x = clock() #define MS←TIME←DIFF(a,b) ((unsigned long) \ (1000.0*(double)((a)-(b))/(double)CLOCKS←PER←SEC)) /* We use bzero and bcopy internally. They may not be available. */ # if defined(SPARC) && defined(SUNOS4) # define BCOPY←EXISTS # endif # if defined(M68K) && defined(SUNOS) # define BCOPY←EXISTS # endif # if defined(VAX) # define BCOPY←EXISTS # endif # ifndef BCOPY←EXISTS # include <string.h> # define bcopy(x,y,n) memcpy(y,x,n) # define bzero(x,n) memset(x, 0, n) # endif /* HBLKSIZE aligned allocation. 0 is taken to mean failure */ /* space is assumed to be cleared. */ # ifdef PCR char * real←malloc(); # define GET←MEM(bytes) HBLKPTR(real←malloc((size←t)bytes + HBLKSIZE) \ + HBLKSIZE-1) # define THREADS # else # ifdef OS2 void * os2←alloc(size←t bytes); # define GET←MEM(bytes) HBLKPTR((ptr←t)os2←alloc((size←t)bytes + HBLKSIZE) \ + HBLKSIZE-1) # else extern ptr←t GC←unix←get←mem(); # define GET←MEM(bytes) (struct hblk *)GC←unix←get←mem(bytes) # endif # endif /* * Mutual exclusion between allocator/collector routines. * Needed if there is more than one allocator thread. * FASTLOCK() is assumed to try to acquire the lock in a cheap and * dirty way that is acceptable for a few instructions, e.g. by * inhibiting preemption. This is assumed to have succeeded only * if a subsequent call to FASTLOCK←SUCCEEDED() returns TRUE. * FASTUNLOCK() is called whether or not FASTLOCK←SUCCEEDED(). * If signals cannot be tolerated with the FASTLOCK held, then * FASTLOCK should disable signals. The code executed under * FASTLOCK is otherwise immune to interruption, provided it is * not restarted. * DCL←LOCK←STATE declares any local variables needed by LOCK and UNLOCK * and/or DISABLE←SIGNALS and ENABLE←SIGNALS and/or FASTLOCK. * (There is currently no equivalent for FASTLOCK.) */ # ifdef PCR # include "th/PCR←Th.h" # include "th/PCR←ThCrSec.h" extern struct PCR←Th←MLRep GC←allocate←ml; # define DCL←LOCK←STATE PCR←sigset←t GC←old←sig←mask # define LOCK() PCR←Th←ML←Acquire(&GC←allocate←ml) # define UNLOCK() PCR←Th←ML←Release(&GC←allocate←ml) # define FASTLOCK() PCR←ThCrSec←EnterSys() /* Here we cheat (a lot): */ # define FASTLOCK←SUCCEEDED() (*(int *)(&GC←allocate←ml) == 0) /* TRUE if nobody currently holds the lock */ # define FASTUNLOCK() PCR←ThCrSec←ExitSys() # else # define DCL←LOCK←STATE # define LOCK() # define UNLOCK() # define FASTLOCK() LOCK() # define FASTLOCK←SUCCEEDED() TRUE # define FASTUNLOCK() UNLOCK() # endif /* Delay any interrupts or signals that may abort this thread. Data */ /* structures are in a consistent state outside this pair of calls. */ /* ANSI C allows both to be empty (though the standard isn't very */ /* clear on that point). Standard malloc implementations are usually */ /* neither interruptable nor thread-safe, and thus correspond to */ /* empty definitions. */ # ifdef PCR # define DISABLE←SIGNALS() \ PCR←Th←SetSigMask(PCR←allSigsBlocked,&GC←old←sig←mask) # define ENABLE←SIGNALS() \ PCR←Th←SetSigMask(&GC←old←sig←mask, NIL) # else # if 0 /* Useful for debugging, and unusually */ /* correct client code. */ # define DISABLE←SIGNALS() # define ENABLE←SIGNALS() # else # define DISABLE←SIGNALS() GC←disable←signals() void GC←disable←signals(); # define ENABLE←SIGNALS() GC←enable←signals() void GC←enable←signals(); # endif # endif /* * Stop and restart mutator threads. */ # ifdef PCR # include "th/PCR←ThCtl.h" # define STOP←WORLD() \ PCR←ThCtl←SetExclusiveMode(PCR←ThCtl←ExclusiveMode←stopNormal, \ PCR←allSigsBlocked, \ PCR←waitForever) # define START←WORLD() \ PCR←ThCtl←SetExclusiveMode(PCR←ThCtl←ExclusiveMode←null, \ PCR←allSigsBlocked, \ PCR←waitForever); # else # define STOP←WORLD() # define START←WORLD() # endif /* Abandon ship */ # ifdef PCR void PCR←Base←Panic(const char *fmt, ...); # define ABORT(s) PCR←Base←Panic(s) # else # define ABORT(s) abort(s) # endif /* Exit abnormally, but without making a mess (e.g. out of memory) */ # ifdef PCR void PCR←Base←Exit(int status); # define EXIT() PCR←Base←Exit(1) # else # define EXIT() (void)exit(1) # endif /* Print warning message, e.g. almost out of memory. */ # define WARN(s) GC←printf0(s) /*********************************/ /* */ /* Word-size-dependent defines */ /* */ /*********************************/ #if CPP←WORDSZ == 32 # define WORDS←TO←BYTES(x) ((x)<<2) # define BYTES←TO←WORDS(x) ((x)>>2) # define LOGWL ((word)5) /* log[2] of CPP←WORDSZ */ # define modWORDSZ(n) ((n) & 0x1f) /* n mod size of word */ #endif #if CPP←WORDSZ == 64 # define WORDS←TO←BYTES(x) ((x)<<3) # define BYTES←TO←WORDS(x) ((x)>>3) # define LOGWL ((word)6) /* log[2] of CPP←WORDSZ */ # define modWORDSZ(n) ((n) & 0x3f) /* n mod size of word */ #endif #define WORDSZ ((word)CPP←WORDSZ) #define SIGNB ((word)1 << (WORDSZ-1)) #define BYTES←PER←WORD ((word)(sizeof (word))) #define ONES ((word)(-1)) #define divWORDSZ(n) ((n) >> LOGWL) /* divide n by size of word */ /*********************/ /* */ /* Size Parameters */ /* */ /*********************/ /* heap block size, bytes. Should be power of 2 */ #if CPP←WORDSZ == 32 # define CPP←LOG←HBLKSIZE 12 #else # define CPP←LOG←HBLKSIZE 13 #endif #define LOG←HBLKSIZE ((word)CPP←LOG←HBLKSIZE) #define CPP←HBLKSIZE (1 << CPP←LOG←HBLKSIZE) #define HBLKSIZE ((word)CPP←HBLKSIZE) /* max size objects supported by freelist (larger objects may be */ /* allocated, but less efficiently) */ #define CPP←MAXOBJSZ BYTES←TO←WORDS(CPP←HBLKSIZE/2) #define MAXOBJSZ ((word)CPP←MAXOBJSZ) # define divHBLKSZ(n) ((n) >> LOG←HBLKSIZE) # define HBLK←PTR←DIFF(p,q) divHBLKSZ((ptr←t)p - (ptr←t)q) /* Equivalent to subtracting 2 hblk pointers. */ /* We do it this way because a compiler should */ /* find it hard to use an integer division */ /* instead of a shift. The bundled SunOS 4.1 */ /* o.w. sometimes pessimizes the subtraction to */ /* involve a call to .div. */ # define modHBLKSZ(n) ((n) & (HBLKSIZE-1)) # define HBLKPTR(objptr) ((struct hblk *)(((word) (objptr)) & ~(HBLKSIZE-1))) # define HBLKDISPL(objptr) (((word) (objptr)) & (HBLKSIZE-1)) /* Round up byte allocation requests to integral number of words: */ # ifdef ALL←INTERIOR←POINTERS # define ROUNDED←UP←WORDS(n) BYTES←TO←WORDS((n) + WORDS←TO←BYTES(1)) # else # define ROUNDED←UP←WORDS(n) BYTES←TO←WORDS((n) + WORDS←TO←BYTES(1) - 1) # endif /* * Hash table representation of sets of pages. This assumes it is * OK to add spurious entries to sets. * Used by black-listing code, and perhaps by dirty bit maintenance code. */ # define LOG←PHT←ENTRIES 14 /* Collisions are likely if heap grows */ /* to more than 16K hblks = 64MB. */ /* Each hash table occupies 2K bytes. */ # define PHT←ENTRIES ((word)1 << LOG←PHT←ENTRIES) # define PHT←SIZE (PHT←ENTRIES >> LOGWL) typedef word page←hash←table[PHT←SIZE]; # define PHT←HASH(addr) ((((word)(addr)) >> LOG←HBLKSIZE) & (PHT←ENTRIES - 1)) # define get←pht←entry←from←index(bl, index) \ (((bl)[divWORDSZ(index)] >> modWORDSZ(index)) & 1) # define set←pht←entry←from←index(bl, index) \ (bl)[divWORDSZ(index)] |= (word)1 << modWORDSZ(index) # define clear←pht←entry←from←index(bl, index) \ (bl)[divWORDSZ(index)] &= ~((word)1 << modWORDSZ(index)) /********************************************/ /* */ /* H e a p B l o c k s */ /* */ /********************************************/ /* heap block header */ #define HBLKMASK (HBLKSIZE-1) #define BITS←PER←HBLK (HBLKSIZE * 8) #define MARK←BITS←PER←HBLK (BITS←PER←HBLK/CPP←WORDSZ) /* upper bound */ /* We allocate 1 bit/word. Only the first word */ /* in each object is actually marked. */ # ifdef ALIGN←DOUBLE # define MARK←BITS←SZ (((MARK←BITS←PER←HBLK + 2*CPP←WORDSZ - 1) \ / (2*CPP←WORDSZ))*2) # else # define MARK←BITS←SZ ((MARK←BITS←PER←HBLK + CPP←WORDSZ - 1)/CPP←WORDSZ) # endif /* Upper bound on number of mark words per heap block */ /* Mark stack entries. */ typedef struct ms←entry { word * mse←start; /* inclusive */ word * mse←end; /* exclusive */ } mse; typedef mse * (*mark←proc)(/* word * addr, hdr * hhdr, mse * msp, mse * msl */); /* Procedure to arrange for the descendents of the object at */ /* addr to be marked. Msp points at the top entry on the */ /* mark stack. Msl delimits the hot end of the mark stack. */ /* hhdr is the hdr structure corresponding to addr. */ /* Returns the new mark stack pointer. */ struct hblkhdr { word hb←sz; /* If in use, size in words, of objects in the block. */ /* if free, the size in bytes of the whole block */ struct hblk * hb←next; /* Link field for hblk free list */ /* and for lists of chunks waiting to be */ /* reclaimed. */ mark←proc hb←mark←proc; /* Procedure to mark objects. Can */ /* also be retrived through obj←kind. */ /* But one level of indirection matters */ /* here. */ char* hb←map; /* A pointer to a pointer validity map of the block. */ /* See GC←obj←map. */ /* Valid for all blocks with headers. */ /* Free blocks point to GC←invalid←map. */ unsigned short hb←obj←kind; /* Kind of objects in the block. Each kind */ /* identifies a mark procedure and a set of */ /* list headers. sometimes called regions. */ unsigned short hb←last←reclaimed; /* Value of GC←gc←no when block was */ /* last allocated or swept. May wrap. */ word hb←marks[MARK←BITS←SZ]; /* Bit i in the array refers to the */ /* object starting at the ith word (header */ /* INCLUDED) in the heap block. */ /* The lsb of word 0 is numbered 0. */ }; /* heap block body */ # define DISCARD←WORDS 0 /* Number of words to be dropped at the beginning of each block */ /* Must be a multiple of WORDSZ. May reasonably be nonzero */ /* on machines that don't guarantee longword alignment of */ /* pointers, so that the number of false hits is minimized. */ /* 0 and WORDSZ are probably the only reasonable values. */ # define BODY←SZ ((HBLKSIZE-WORDS←TO←BYTES(DISCARD←WORDS))/sizeof(word)) struct hblk { # if (DISCARD←WORDS != 0) word garbage[DISCARD←WORDS]; # endif word hb←body[BODY←SZ]; }; # define HDR←WORDS ((word)DISCARD←WORDS) # define HDR←BYTES ((word)WORDS←TO←BYTES(DISCARD←WORDS)) # define OBJ←SZ←TO←BLOCKS(sz) \ divHBLKSZ(HDR←BYTES + WORDS←TO←BYTES(sz) + HBLKSIZE-1) /* Size of block (in units of HBLKSIZE) needed to hold objects of */ /* given sz (in words). */ /* Object free list link */ # define obj←link(p) (*(ptr←t *)(p)) /* lists of all heap blocks and free lists */ /* These are grouped together in a struct */ /* so that they can be easily skipped by the */ /* GC←mark routine. */ /* The ordering is weird to make GC←malloc */ /* faster by keeping the important fields */ /* sufficiently close together that a */ /* single load of a base register will do. */ /* Scalars that could easily appear to */ /* be pointers are also put here. */ struct ←GC←arrays { word ←heapsize; ptr←t ←last←heap←addr; ptr←t ←prev←heap←addr; word ←words←allocd←before←gc; /* Number of words allocated before this */ /* collection cycle. */ # ifdef GATHERSTATS word ←composite←in←use; /* Number of words in accessible composite */ /* objects. */ word ←atomic←in←use; /* Number of words in accessible atomic */ /* objects. */ # endif word ←words←allocd; /* Number of words allocated during this collection cycle */ word ←words←wasted; /* Number of words wasted due to internal fragmentation */ /* in large objects allocated since last gc. Approximate.*/ word ←non←gc←bytes←at←gc; /* Number of explicitly managed bytes of storage */ /* at last collection. */ word ←mem←freed; /* Number of explicitly deallocated words of memory */ /* since last collection. */ ptr←t ←objfreelist[MAXOBJSZ+1]; /* free list for objects */ # ifdef MERGE←SIZES unsigned ←size←map[WORDS←TO←BYTES(MAXOBJSZ+1)]; /* Number of words to allocate for a given allocation request in */ /* bytes. */ # endif ptr←t ←aobjfreelist[MAXOBJSZ+1]; /* free list for atomic objs */ ptr←t ←uobjfreelist[MAXOBJSZ+1]; /* uncollectable but traced objs */ # ifdef STUBBORN←ALLOC ptr←t ←sobjfreelist[MAXOBJSZ+1]; # endif /* free list for immutable objects */ ptr←t ←obj←map[MAXOBJSZ+1]; /* If not NIL, then a pointer to a map of valid */ /* object addresses. hbh←map[sz][i] is j if the */ /* address block←start+i is a valid pointer */ /* to an object at */ /* block←start+i&~3 - WORDS←TO←BYTES(j). */ /* (If ALL←INTERIOR←POINTERS is defined, then */ /* instead ((short *)(hbh←map[sz])[i] is j if */ /* block←start+WORDS←TO←BYTES(i) is in the */ /* interior of an object starting at */ /* block←start+WORDS←TO←BYTES(i-j)). */ /* It is OBJ←INVALID if */ /* block←start+WORDS←TO←BYTES(i) is not */ /* valid as a pointer to an object. */ /* We assume that all values of j <= OBJ←INVALID */ /* The zeroth entry corresponds to large objects.*/ # ifdef ALL←INTERIOR←POINTERS # define map←entry←type short # define OBJ←INVALID 0x7fff # define MAP←ENTRY(map, bytes) \ (((map←entry←type *)(map))[BYTES←TO←WORDS(bytes)]) # define MAP←ENTRIES BYTES←TO←WORDS(HBLKSIZE) # define MAP←SIZE (MAP←ENTRIES * sizeof(map←entry←type)) # define OFFSET←VALID(displ) TRUE # define CPP←MAX←OFFSET (HBLKSIZE - HDR←BYTES - 1) # define MAX←OFFSET ((word)CPP←MAX←OFFSET) # else # define map←entry←type char # define OBJ←INVALID 0x7f # define MAP←ENTRY(map, bytes) \ (map)[bytes] # define MAP←ENTRIES HBLKSIZE # define MAP←SIZE MAP←ENTRIES # define CPP←MAX←OFFSET (WORDS←TO←BYTES(OBJ←INVALID) - 1) # define MAX←OFFSET ((word)CPP←MAX←OFFSET) # define VALID←OFFSET←SZ \ (CPP←MAX←OFFSET > WORDS←TO←BYTES(CPP←MAXOBJSZ)? \ CPP←MAX←OFFSET+1 \ : WORDS←TO←BYTES(CPP←MAXOBJSZ)+1) char ←valid←offsets[VALID←OFFSET←SZ]; /* GC←valid←offsets[i] == TRUE ==> i */ /* is registered as a displacement. */ # define OFFSET←VALID(displ) GC←valid←offsets[displ] char ←modws←valid←offsets[sizeof(word)]; /* GC←valid←offsets[i] ==> */ /* GC←modws←valid←offsets[i%sizeof(word)] */ # endif struct hblk * ←reclaim←list[MAXOBJSZ+1]; struct hblk * ←areclaim←list[MAXOBJSZ+1]; struct hblk * ←ureclaim←list[MAXOBJSZ+1]; # ifdef STUBBORN←ALLOC struct hblk * ←sreclaim←list[MAXOBJSZ+1]; page←hash←table ←changed←pages; /* Stubborn object pages that were changes since last call to */ /* GC←read←changed. */ page←hash←table ←prev←changed←pages; /* Stubborn object pages that were changes before last call to */ /* GC←read←changed. */ # endif # if defined(PROC←VDB) || defined(MPROTECT←VDB) page←hash←table ←grungy←pages; /* Pages that were dirty at last */ /* GC←read←dirty. */ # endif # define MAX←HEAP←SECTS 256 /* Separately added heap sections. */ struct HeapSect { ptr←t hs←start; word hs←bytes; } ←heap←sects[MAX←HEAP←SECTS]; /* Block header index; see gc←headers.h */ bottom←index ←all←nils; bottom←index * ←top←index [TOP←SZ]; }; extern struct ←GC←arrays GC←arrays; # define GC←objfreelist GC←arrays.←objfreelist # define GC←aobjfreelist GC←arrays.←aobjfreelist # define GC←uobjfreelist GC←arrays.←uobjfreelist # define GC←sobjfreelist GC←arrays.←sobjfreelist # define GC←valid←offsets GC←arrays.←valid←offsets # define GC←modws←valid←offsets GC←arrays.←modws←valid←offsets # define GC←reclaim←list GC←arrays.←reclaim←list # define GC←areclaim←list GC←arrays.←areclaim←list # define GC←ureclaim←list GC←arrays.←ureclaim←list # ifdef STUBBORN←ALLOC # define GC←sreclaim←list GC←arrays.←sreclaim←list # define GC←changed←pages GC←arrays.←changed←pages # define GC←prev←changed←pages GC←arrays.←prev←changed←pages # endif # define GC←obj←map GC←arrays.←obj←map # define GC←last←heap←addr GC←arrays.←last←heap←addr # define GC←prev←heap←addr GC←arrays.←prev←heap←addr # define GC←words←allocd GC←arrays.←words←allocd # define GC←words←wasted GC←arrays.←words←wasted # define GC←non←gc←bytes←at←gc GC←arrays.←non←gc←bytes←at←gc # define GC←mem←freed GC←arrays.←mem←freed # define GC←heapsize GC←arrays.←heapsize # define GC←words←allocd←before←gc GC←arrays.←words←allocd←before←gc # define GC←heap←sects GC←arrays.←heap←sects # define GC←all←nils GC←arrays.←all←nils # define GC←top←index GC←arrays.←top←index # if defined(PROC←VDB) || defined(MPROTECT←VDB) # define GC←grungy←pages GC←arrays.←grungy←pages # endif # ifdef GATHERSTATS # define GC←composite←in←use GC←arrays.←composite←in←use # define GC←atomic←in←use GC←arrays.←atomic←in←use # endif # ifdef MERGE←SIZES # define GC←size←map GC←arrays.←size←map # endif # define beginGC←arrays ((ptr←t)(&GC←arrays)) # define endGC←arrays (((ptr←t)(&GC←arrays)) + (sizeof GC←arrays)) # define MAXOBJKINDS 16 /* Object kinds: */ extern struct obj←kind { ptr←t *ok←freelist; /* Array of free listheaders for this kind of object */ /* Point either to GC←arrays or to storage allocated */ /* with GC←scratch←alloc. */ struct hblk **ok←reclaim←list; /* List headers for lists of blocks waiting to be */ /* swept. */ mark←proc ok←mark←proc; /* Procedure to either mark referenced objects, */ /* or push them on the mark stack. */ bool ok←init; /* Clear objects before putting them on the free list. */ } GC←obj←kinds[MAXOBJKINDS]; /* Predefined kinds: */ # define PTRFREE 0 # define NORMAL 1 # define UNCOLLECTABLE 2 # define STUBBORN 3 extern word GC←n←heap←sects; /* Number of separately added heap */ /* sections. */ extern int GC←n←kinds; extern char * GC←invalid←map; /* Pointer to the nowhere valid hblk map */ /* Blocks pointing to this map are free. */ extern struct hblk * GC←hblkfreelist; /* List of completely empty heap blocks */ /* Linked through hb←next field of */ /* header structure associated with */ /* block. */ extern bool GC←is←initialized; /* GC←init() has been run. */ extern bool GC←objects←are←marked; /* There are marked objects in */ /* the heap. */ # ifndef PCR extern ptr←t GC←stackbottom; /* Cool end of user stack */ # endif extern word GC←hincr; /* current heap increment, in blocks */ extern word GC←root←size; /* Total size of registered root sections */ extern bool GC←debugging←started; /* GC←debug←malloc has been called. */ extern ptr←t GC←least←plausible←heap←addr; extern ptr←t GC←greatest←plausible←heap←addr; /* Bounds on the heap. Guaranteed valid */ /* Likely to include future heap expansion. */ /* Operations */ # define update←GC←hincr GC←hincr = (GC←hincr * HINCR←MULT)/HINCR←DIV; \ if (GC←hincr > MAXHINCR) {GC←hincr = MAXHINCR;} # ifndef abs # define abs(x) ((x) < 0? (-(x)) : (x)) # endif /* Marks are in a reserved area in */ /* each heap block. Each word has one mark bit associated */ /* with it. Only those corresponding to the beginning of an */ /* object are used. */ /* Mark bit perations */ /* * Retrieve, set, clear the mark bit corresponding * to the nth word in a given heap block. * * (Recall that bit n corresponds to object beginning at word n * relative to the beginning of the block, including unused words) */ # define mark←bit←from←hdr(hhdr,n) (((hhdr)->hb←marks[divWORDSZ(n)] \ >> (modWORDSZ(n))) & (word)1) # define set←mark←bit←from←hdr(hhdr,n) (hhdr)->hb←marks[divWORDSZ(n)] \ |= (word)1 << modWORDSZ(n) # define clear←mark←bit←from←hdr(hhdr,n) (hhdr)->hb←marks[divWORDSZ(n)] \ &= ~((word)1 << modWORDSZ(n)) /* Important internal collector routines */ void GC←apply←to←all←blocks(/*fn, client←data*/); /* Invoke fn(hbp, client←data) for each */ /* allocated heap block. */ struct hblk * GC←next←block(/* struct hblk * h */); mse * GC←no←mark←proc(/*addr,hhdr,msp,msl*/); /* Mark procedure for PTRFREE objects. */ mse * GC←normal←mark←proc(/*addr,hhdr,msp,msl*/); /* Mark procedure for NORMAL objects. */ void GC←mark←init(); void GC←clear←marks(); /* Clear mark bits for all heap objects. */ void GC←mark←from←mark←stack(); /* Mark from everything on the mark stack. */ /* Return after about one pages worth of */ /* work. */ bool GC←mark←stack←empty(); bool GC←mark←some(); /* Perform about one pages worth of marking */ /* work of whatever kind is needed. Returns */ /* quickly if no collection is in progress. */ /* Return TRUE if mark phase finished. */ void GC←initiate←full(); /* initiate full collection. */ void GC←initiate←partial(); /* initiate partial collection. */ void GC←push←all(/*b,t*/); /* Push everything in a range */ /* onto mark stack. */ void GC←push←dirty(/*b,t*/); /* Push all possibly changed */ /* subintervals of [b,t) onto */ /* mark stack. */ void GC←push←conditional(/* ptr←t b, ptr←t t, bool all*/); /* Do either of the above, depending */ /* on the third arg. */ void GC←push←all←stack(/*b,t*/); /* As above, but consider */ /* interior pointers as valid */ void GC←remark(); /* Mark from all marked objects. Used */ /* only if we had to drop something. */ void GC←push←one(/*p*/); /* If p points to an object, mark it */ /* and push contents on the mark stack */ void GC←push←one←checked(/*p*/); /* Ditto, omits plausibility test */ void GC←push←marked(/* struct hblk h, hdr * hhdr */); /* Push contents of all marked objects in h onto */ /* mark stack. */ struct hblk * GC←push←next←marked←dirty(/* h */); /* Invoke GC←push←marked on next dirty block above h. */ /* Return a pointer just past the end of this block. */ struct hblk * GC←push←next←marked(/* h */); /* Ditto, but also mark from clean pages. */ struct hblk * GC←push←next←marked←uncollectable(/* h */); /* Ditto, but mark only from uncollectable pages. */ void GC←stopped←mark(); /* Mark from all roots and rescuers */ /* with the world stopped. */ void GC←clear←hdr←marks(/* hhdr */); /* Clear the mark bits in a header */ void GC←add←roots←inner(); void GC←register←dynamic←libraries(); /* Add dynamic library data sections to the root set. */ /* Machine dependent startup routines */ ptr←t GC←get←stack←base(); void GC←register←data←segments(); # ifndef ALL←INTERIOR←POINTERS void GC←add←to←black←list←normal(/* bits */); /* Register bits as a possible future false */ /* reference from the heap or static data */ # define GC←ADD←TO←BLACK←LIST←NORMAL(bits) GC←add←to←black←list←normal(bits) # else # define GC←ADD←TO←BLACK←LIST←NORMAL(bits) GC←add←to←black←list←stack(bits) # endif void GC←add←to←black←list←stack(/* bits */); struct hblk * GC←is←black←listed(/* h, len */); /* If there are likely to be false references */ /* to a block starting at h of the indicated */ /* length, then return the next plausible */ /* starting location for h that might avoid */ /* these false references. */ void GC←promote←black←lists(); /* Declare an end to a black listing phase. */ ptr←t GC←scratch←alloc(/*bytes*/); /* GC internal memory allocation for */ /* small objects. Deallocation is not */ /* possible. */ void GC←invalidate←map(/* hdr */); /* Remove the object map associated */ /* with the block. This identifies */ /* the block as invalid to the mark */ /* routines. */ bool GC←add←map←entry(/*sz*/); /* Add a heap block map for objects of */ /* size sz to obj←map. */ /* Return FALSE on failure. */ void GC←register←displacement←inner(/*offset*/); /* Version of GC←register←displacement */ /* that assumes lock is already held */ /* and signals are already disabled. */ void GC←init←inner(); void GC←new←hblk(/*size←in←words, kind*/); /* Allocate a new heap block, and build */ /* a free list in it. */ struct hblk * GC←allochblk(/*size←in←words, kind*/); /* Allocate a heap block, clear it if */ /* for composite objects, inform */ /* the marker that block is valid */ /* for objects of indicated size. */ /* sz < 0 ==> atomic. */ void GC←freehblk(); /* Deallocate a heap block and mark it */ /* as invalid. */ void GC←start←reclaim(/*abort←if←found*/); /* Restore unmarked objects to free */ /* lists, or (if abort←if←found is */ /* TRUE) report them. */ /* Sweeping of small object pages is */ /* largely deferred. */ void GC←continue←reclaim(/*size, kind*/); /* Sweep pages of the given size and */ /* kind, as long as possible, and */ /* as long as the corr. free list is */ /* empty. */ void GC←reclaim←or←delete←all(); /* Arrange for all reclaim lists to be */ /* empty. Judiciously choose between */ /* sweeping and discarding each page. */ bool GC←gcollect←inner(); /* Collect; caller must have acquired */ /* lock and disabled signals. */ /* FALSE return indicates nothing was */ /* done due to insufficient allocation. */ void GC←finish←collection(); /* Finish collection. Mark bits are */ /* consistent and lock is still held. */ bool GC←collect←or←expand(/* needed←blocks */); /* Collect or expand heap in an attempt */ /* make the indicated number of free */ /* blocks available. Should be called */ /* until it fails by returning FALSE. */ void GC←init(); /* Initialize collector. */ void GC←collect←a←little(/* n */); /* Do n units worth of garbage */ /* collection work, if appropriate. */ /* A unit is an amount appropriate for */ /* HBLKSIZE bytes of allocation. */ ptr←t GC←generic←malloc(/* bytes, kind */); /* Allocate an object of the given */ /* kind. By default, there are only */ /* two kinds: composite, and atomic. */ /* We claim it's possible for clever */ /* client code that understands GC */ /* internals to add more, e.g. to */ /* communicate object layout info */ /* to the collector. */ ptr←t GC←generic←malloc←inner(/* bytes, kind */); /* Ditto, but I already hold lock, etc. */ ptr←t GC←generic←malloc←words←small(/*words, kind*/); /* As above, but size in units of words */ /* Bypasses MERGE←SIZES. Assumes */ /* words <= MAXOBJSZ. */ ptr←t GC←allocobj(/* sz←inn←words, kind */); /* Make the indicated */ /* free list nonempty, and return its */ /* head. */ bool GC←install←header(/*h*/); /* Install a header for block h. */ /* Return FALSE on failure. */ bool GC←install←counts(/*h, sz*/); /* Set up forwarding counts for block */ /* h of size sz. */ /* Return FALSE on failure. */ void GC←remove←header(/*h*/); /* Remove the header for block h. */ void GC←remove←counts(/*h, sz*/); /* Remove forwarding counts for h. */ hdr * GC←find←header(/*p*/); /* Debugging only. */ void GC←finalize(); /* Perform all indicated finalization actions */ /* on unmarked objects. */ void GC←add←to←heap(/*p, bytes*/); /* Add a HBLKSIZE aligned chunk to the heap. */ void GC←print←obj(/* ptr←t p */); /* P points to somewhere inside an object with */ /* debugging info. Print a human readable */ /* description of the object to stderr. */ extern void (*GC←check←heap)(); /* Check that all objects in the heap with */ /* debugging info are intact. Print */ /* descriptions of any that are not. */ /* Virtual dirty bit implementation: */ /* Each implementation exports the following: */ void GC←read←dirty(); /* Retrueve dirty bits. */ bool GC←page←was←dirty(/* struct hblk * h */); /* Read retrieved dirty bits. */ void GC←write←hint(/* struct hblk * h */); /* h is about to be written. */ void GC←dirty←init(); /* Slow/general mark bit manipulation: */ bool GC←is←marked(); void GC←clear←mark←bit(); void GC←set←mark←bit(); /* Stubborn objects: */ void GC←read←changed(); /* Analogous to GC←read←dirty */ bool GC←page←was←changed(/* h */); /* Analogous to GC←page←was←dirty */ void GC←clean←changing←list(); /* Collect obsolete changing list entries */ void GC←stubborn←init(); /* Debugging print routines: */ void GC←print←block←list(); void GC←print←hblkfreelist(); /* Logging and diagnostic output: */ void GC←printf(/* format, a, b, c, d, e, f */); /* A version of printf that doesn't allocate, */ /* is restricted to long arguments, and */ /* (unfortunately) doesn't use varargs for */ /* portability. Restricted to 6 args and */ /* 1K total output length. */ /* (We use sprintf. Hopefully that doesn't */ /* allocate for long arguments.) */ # define GC←printf0(f) GC←printf(f, 0l, 0l, 0l, 0l, 0l, 0l) # define GC←printf1(f,a) GC←printf(f, (long)a, 0l, 0l, 0l, 0l, 0l) # define GC←printf2(f,a,b) GC←printf(f, (long)a, (long)b, 0l, 0l, 0l, 0l) # define GC←printf3(f,a,b,c) GC←printf(f, (long)a, (long)b, (long)c, 0l, 0l, 0l) # define GC←printf4(f,a,b,c,d) GC←printf(f, (long)a, (long)b, (long)c, \ (long)d, 0l, 0l) # define GC←printf5(f,a,b,c,d,e) GC←printf(f, (long)a, (long)b, (long)c, \ (long)d, (long)e, 0l) # define GC←printf6(f,a,b,c,d,e,g) GC←printf(f, (long)a, (long)b, (long)c, \ (long)d, (long)e, (long)g) void GC←err←printf(/* format, a, b, c, d, e, f */); # define GC←err←printf0(f) GC←err←puts(f) # define GC←err←printf1(f,a) GC←err←printf(f, (long)a, 0l, 0l, 0l, 0l, 0l) # define GC←err←printf2(f,a,b) GC←err←printf(f, (long)a, (long)b, 0l, 0l, 0l, 0l) # define GC←err←printf3(f,a,b,c) GC←err←printf(f, (long)a, (long)b, (long)c, \ 0l, 0l, 0l) # define GC←err←printf4(f,a,b,c,d) GC←err←printf(f, (long)a, (long)b, \ (long)c, (long)d, 0l, 0l) # define GC←err←printf5(f,a,b,c,d,e) GC←err←printf(f, (long)a, (long)b, \ (long)c, (long)d, \ (long)e, 0l) # define GC←err←printf6(f,a,b,c,d,e,g) GC←err←printf(f, (long)a, (long)b, \ (long)c, (long)d, \ (long)e, (long)g) /* Ditto, writes to stderr. */ void GC←err←puts(/* char *s */); /* Write s to stderr, don't buffer, don't add */ /* newlines, don't ... */ # endif /* GC←PRIVATE←H */