/* begincopyright Copyright (c) 1988,1990 Xerox Corporation. All rights reserved. Use and copying of this software and preparation of derivative works based upon this software are permitted. Any distribution of this software or derivative works must comply with all applicable United States export control laws. This software is made available AS IS, and Xerox Corporation makes no warranty about the software, its performance or its conformity to any specification. Any person obtaining a copy of this software is requested to send their name and post office or electronic mail address to: PCR Coordinator Xerox PARC 3333 Coyote Hill Rd. Palo Alto, CA 94304 Parts of this software were derived from code bearing the copyright notice: Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers This material may be freely distributed, provided this notice is retained. This material is provided as is, with no warranty expressed or implied. Use at your own risk. endcopyright */ /* * GC.h * * Basic public type definitions for Xerox Runtime * storage managment package. * * Mark Weiser, October 18, 1989 * Alan Ishigo, November 14, 1988 4:38:01 pm PST * Demers, October 24, 1989 1:58:02 pm PDT * Boehm, July 19, 1990 1:18:27 pm PDT * */ #ifndef _XR_GC_ #define _XR_GC_ 1 #ifndef _XR_BASIC_TYPES_ #include "xr/BasicTypes.h" #endif /* The garbage collector used to assume sizeof (bool) = sizeof (long) */ /* It may still. */ #ifndef _XR_THREADS_ #include "xr/Threads.h" #endif /* Public variables. May be altered by client. */ extern bool GC_dont_gc; /* inhibit collection */ extern long GC_non_gc_bytes; /* Number of bytes declared to be uncollectable. */ extern bool GC_markCarefully; /* Try to keep working set down during marking, */ /* at the expense of about a factor of 2 in cpu */ /* time. Ignored during partial collections. */ /* Setting or clearing this has only a transient */ /* effect, up to at least the next collection. */ extern bool GC_ok_to_panic; /* OK to call XR_panic if things go completely */ /* haywire. */ extern unsigned GC_partial_gc_allocs; /* Number of words to be allocated between */ /* partial collections. */ /* 0 ==> use default value based on number */ /* of composite objects in use. */ extern unsigned GC_full_gc_allocs; /* Number of words to be allocated between */ /* full collections. 0 ==> collect only when */ /* heap is full. */ extern unsigned GC_free_mem_ratio; /* Collector tries to expand heap until */ /* 2*GC_composite_in_use/GC_free_mem_ratio */ /* words are reclaimed at full collections. */ /* Greater values imply smaller heaps, but */ /* risk excessive GC frequency. */ /* This is all very approximate. */ extern bool GC_fix_heap_size; /* Avoid expanding the heap if at all possible. */ extern unsigned long GC_max_heap_size; /* An attempt to expand the heap past this */ /* amount will fail. */ typedef void (*GC_expand_call_back_type)(/*XR_Pointer client_data*/); void GC_register_expand_callback(/* GC_expand_call_back_type fn, XR_Pointer data, GC_expand_call_back_type *Ofn, XR_Pointer *Odata */); /* Register a function to be called when the collector would have */ /* liked to expand the heap, but couldn't. The callback function */ /* can't safely do much other than set a global flag or increase */ /* GC_max_heap_size. */ /* Public variables. Read only for the client. Set by collector. */ extern char * GC_heapstart; /* A lower bound on all heap addresses */ /* Known to be HBLKSIZE aligned. */ /* Zero before GC initialization. */ extern char * GC_heaplim; /* 1 + last address in heap */ extern long GC_heapsize; /* Heap size in bytes */ extern char * GC_sys_mem_end; /* 1 + end of memory allocated from system. */ /* May differ from GC_heaplim if other */ /* allocation, e.g. for block headers */ /* is going on. */ extern long GC_mem_found; /* Number of words reclaimed since start of last */ /* collection. Only a lower bound. */ extern long GC_mem_freed; /* Number of longwords explicitly */ /* freed since last garbage collection. */ extern long GC_composite_in_use; /* Number of longwords in accessible */ /* composite objects. */ extern long GC_atomic_in_use; /* Number of longwords in accessible atomic */ /* objects. */ /* Both of the above numbers are good upper */ /* bounds, as of the last collection. */ extern long GC_objects_in_use; /* Number of live objects found during last */ /* collection. */ extern bool GC_running_exclusive; /* True if a single process has taken */ /* over all the processors. */ extern bool GC_collection_in_progress; /* A parallel collection is currently */ /* running. Changed to true only while */ /* GC_allocate_ml is held. */ /* Currently not set during stop-the-world */ /* collections. */ extern char XR_gcVersion[]; /* read-only */ /* Publically readable variables that may not remain meaningful */ /* if the collector changes. Should be used only in disposable code. */ extern long GC_markfaults; /* The number of page faults that occurred during */ /* the last mark phase. */ extern GC_max_markfaults; /* The recent max of above */ extern long GC_gc_no; /* How many times have we collected? */ extern bool GC_full_gc; /* A full collection is in progress */ extern bool GC_after_full; /* Cleaning up a full GC */ extern long GC_tenure_count; /* Number of tenured blocks */ extern int GC_n_maps_cached; /* The number of heap block maps currently in the cache */ extern int GC_words_at_full_gc; /* Words accessible after last full */ /* collection. */ extern int GC_composite_at_full_gc; /* Composite words accessible */ /* after last full collection. */ /* The following are meaningful for any collector, but can only be */ /* maintained at significant expense, and may thus eventually disappear. */ /* The object counts are more likely to disappear, since they are */ /* useless to the collector itself. */ extern unsigned GC_words_allocd; /* Number of words allocated since last collection */ extern unsigned GC_words_allocd_before_gc; /* Words allocated up to last garbage */ /* collection. The sum of this and the */ /* preceding variable is the total */ /* number of words allocated since the */ /* beginning of the world. */ extern unsigned GC_objects_allocd; /* Number of objects allocated since last */ /* collection. */ extern unsigned GC_objects_allocd_before_gc; /* Objects allocated up to last */ /* garbage collection. */ /* All routines in the GC interface start with either XR_ or GC_. Internal */ /* routines start with GC_. */ /***** Informational Routines *****/ extern unsigned XR_GCCurrentByteCount(); /* Return the current number of bytes allocated. This number increases continuously (at each allocation) but shrinks only after collections. */ extern unsigned XR_GCCurrentObjectCount(); /* Return the current number of objects allocated. This number increases continuously (at each allocation) but shrinks only after collections. */ extern unsigned XR_GCTotalByteCount(); /* Return the current number of bytes allocated. This number increases continuously. */ extern unsigned XR_GCTotalObjectCount(); /* Return the current number of bytes allocated. This number increases continuously. */ extern bool XR_NfreePagesP(/* unsigned N */); /* * Returns 1 if at least N GC pages (i.e. hblocks) are free (no * objects at all on them) and 0 otherwise. * (Written this way, rather than just to return the number of free * pages, to keep working set down by truncating search.) */ /***** Routines to Control Behavior *****/ /* The ...Set... routines always return the old value. */ extern bool XR_GCGetNeverCollectAtAll(), XR_GCSetNeverCollectAtAll(/* bool */); /* A value of TRUE causes no collection activity whatsoever. A value of FALSE causes normal collection behavior (subject to the NeverFree boolean). */ extern unsigned XR_GCHeapSize(); /* Returns the size of the heap in bytes. */ extern bool XR_Increase_Heap(/* unsigned */); /* The heap is grown by the indicated number of bytes, rounded up to a pagesize. Returns true if successful, false otherwise. */ extern bool XR_GCSetMarkCarefully(/* bool */), XR_GCGetMarkCarefully( /* bool */); /* If this switch is set, then the objects to be marked are kept sorted in page order so as to reduce paging behavior, at the obvious cost of cpu time. The switch is set by the collector after excessive paging activity, and reset after a full collection with low paging activity. */ extern unsigned XR_GCSetMode(/* unsigned */); # define GC_INCREMENTAL 1 # define GC_PARALLEL 2 /* Turn on or off parallel and or incremental collection. Default with */ /* STICKY_MARK_BITS defined is both. For batch processes it pays to */ /* turn off GC_PARALLEL. For short-lived batch processes, and/or */ /* batch processes in small address spaces, it pays to turn both off. */ /* Turning both off will usually remove virtual dirty bit overhead. */ unsigned XR_SetBytesAfterWhichToCollect(/* unsigned */), XR_GetBytesAfterWhichToCollect(); /* A non-zero value causes a collection whenever this many bytes have been allocated since the last collection. */ void XR_CollectOnlyWhenFull(), XR_CollectAfterTwoMegabytes(), XR_CollectAfterOneMegabyte(); /* Convenient access to XR_SetBytesAfterWhichToCollect for folks without access to arguments (e.g., via the 'pcr:' prompt.) */ /***** Routines to cause behavior. *****/ /* These routines are all monitored and safe to call at any time. They are the basic interfaces into storage management. */ void XR_GCollect(); /* initiate a garbage collection. */ void XR_GCollect2(/* bool wait, bool full */); /* As above; wait ==> return only when finished; full ==> force full */ /* collection; wait && full also implies stop the world. */ /* general purpose allocation routines: */ XR_Pointer GC_malloc(/* unsigned ObjectSizeInBytes */); /* Allocates a new object of at least the specified length. The object is cleared. It will be aligned on at least a 4 byte boundary. If the architecture requires n byte alignment for certain objects, then it will be n byte aligned unless the requested size is less than n. If the requested size is a multiple of n, where n is a power of 2 no larger than 16, then the object will also be n byte aligned. No space is implicitly reserved for type tags or the like. Returns (XR_Pointer)0 if no memory is available. */ XR_Pointer GC_malloc_atomic(/* unsigned ObjectSizeInBytes */); /* Identical to GC_malloc, except that the object is assumed to contain no pointers, amd the object is not cleared. Is faster than, and results in faster collections than GC_malloc. */ XR_Pointer GC_realloc(/* XR_Pointer old_object, unsigned ObjectSizeInBytes */); /* Return a new object with indicated size, and contents of the old object. The new object is assumed to not contain any pointers if the old object was known not to contain pointers. The new object is identical to the old object whenever this can be easily arranged. May be much faster than a new allocation followed by a copy, but this probably happens only if the client program uses a stupid algorithm. Unfortunately, such clients are common, at least in the C world. */ void GC_free(/* XR_Pointer object */); /* Explicitly deallocate an object. The object should have been allocated by one of the above routines, NOT by one of those below. Results in disaster if the object is subsequently accessed. */ void XR_free(/* XR_Pointer p */); /* Similar to GC_free, but p may point to the interior of an object. May */ /* be used with any allocated object. */ XR_Pointer XR_valloc(/* unsigned ObjectSizeInbytes */); /* allocates an object of size at least ObjectSizeInbytes, whose first address is at a system page boundary. The object is also made noncollectable, and can be made collectable by calling valloc_free. The object is assumed to contain NO pointers, and is not cleared. */ void XR_valloc_free(/* XR_Pointer ObjectAddress */); /* permits an object allocated by valloc to be collected if there are no pointers to it. */ void XR_make_uncollectable(/* XR_Pointer RealObjectAddress, XR_Pointer AlternateAddressForRelease */); /* prevents the object pointed to by ObjectAddress from ever being collected. */ void XR_unmake_uncollectable(/* XR_Pointer ObjectAddress */); /* permits an object made uncollectable by a call on XR_make_uncollectable to again enter the possible collection pool. ObjectAddress can be either the RealObjectAddress or the AlternateAddressForRelease give to XR_make_uncollectable. */ /* The following routines are OBSOLETE. They return pointers into the middle of objects, leaving room for 8 byte headers. */ XR_Pointer XR_calloc(/* unsigned ObjectCount, ObjectSizeInBytes */); /* allocates a new object of length at least ObjectCount * ObjectSizeInBytes. The object is assumed to contain pointers, and is cleared to all zeros. OBSOLETE */ XR_Pointer XR_clear_new(/* unsigned ObjectCount, ObjectSizeInBytes */); /* allocates a new object of length at least ObjectCount * ObjectSizeInBytes. The object is assumed to contain pointers, and is cleared to all zeros. OBSOLETE*/ XR_Pointer XR_new(/* unsigned ObjectSizeInBytes */); /* allocates a new object at least ObjectSizeInBytes. The object is assumed to contain pointers, and is cleared to all zeros. */ XR_Pointer XR_pointerfree_new(/* unsigned ObjectSizeInBytes */); /* allocates a new object at least ObjectSizeInBytes. The object is assumed to contain NO pointers, and is not cleared. OBSOLETE */ XR_Pointer XR_ralloc(/* unsigned ObjectSizeIn32BitWords */); /* allocates a new object of at least ObjectSizeIn32BitWords. The object is assumed to contain NO pointers, and is not cleared. OBSOLETE */ XR_Pointer XR_ralloc_comp(/* unsigned ObjectSizeIn32BitWords */); /* allocates a new object of at least ObjectSizeIn32BitWords. The object is assumed to contain pointers, and is cleared to zeros. OBSOLETE */ XR_Pointer XR_realloc(/* unsigned ObjectAddress; unsigned ObjectSizeInBytes */); /* allocates a new object of size at least ObjectSizeInBytes, and copies the bytes at ObjectAddress into the new object. ObjectSizeInBytes are always copied, so XR_realloc only makes sense for growing, not shrinking, objects. The object is assumed to contain pointers, and any additional space is cleared to zeros. OBSOLETE */ XR_Pointer XR_malloc(/* unsigned ObjectSizeInBytes */); /* allocates an object of size at least ObjectSizeInBytes, and return the address. The object is assumed to contain pointers, and is cleared to zeros. OBSOLETE */ void XR_unsafe_free(/* XR_Pointer */); /* Alias for XR_free. */ /* The following control routines are also OBSOLETE: */ extern bool XR_GCGetMiserlyHeap(), XR_GCSetMiserlyHeap(/* bool */); /* Do nothing at the moment. Return FALSE */ /* * Allocation and deallocation routines that traffic in uncollectable * objects. These were introduced as a patch to exisiting problems. * They should be considered instantly OBSOLETE. */ XR_Pointer XR_UNCollect_malloc(/* long size */); XR_Pointer XR_UNCollect_calloc(/* long size_elem, long num_elem */); XR_Pointer XR_UNCollect_realloc(/* XR_Pointer old, long size */); void XR_UNCollect_free(/* XR_Pointer ptr */); /***** Unix interface replacements (see Unix manuals). *****/ /* All objects are assumed to possibly contain pointers, except valloc. These are here for compatibility of pre-existing code, but for the sake of clarity, because of slightly different semantics, it is prefered to use the XR_foo name instead of just foo. realloc cfree free malloc calloc valloc - special note: valloced objects in PCR are assumed to contain no pointers, and are made not collectable. valloc_free is available to make them collectable. */ /* Routines below are not for casual users */ void XR_add_data_list(/* XR_Pointer startAddress, endAddress */); /* To be called by the initializing world to add a root. Can be called at any time to add additional roots. The words between startAddress and endAddress will be used as an additional root set for garbage collections. This is not for casual use: only world initialization and dynamic loading ordinarly use it. The last word checked starts at endAddress-4. */ typedef struct GC_InfoRep { bool gci_full_collection; /* This is a full collection */ /* More to come? */ } * GC_Info; typedef void (*RegisterGCCallbackType)(/* XR_Pointer clientdata, GC_Info info */); void XR_RegisterGCCallBackBefore(/* RegisterGCCallbackType proc, XR_Pointer clientdata, RegisterGCCallbackType *oldproc, XR_Pointer *oldclientdata */); void XR_RegisterGCCallBackAfter(/* RegisterGCCallbackType proc; XR_Pointer clientdata, RegisterGCCallbackType *oldproc; XR_Pointer *oldclientdata */); void XR_RegisterGCCallBackDuringInner(/* RegisterGCCallbackType proc; XR_Pointer clientdata, RegisterGCCallbackType *oldproc; XR_Pointer *oldclientdata */); /* Registers subroutines to be called-back just before, and just after, garbage collection. These routines are not for casual use: they may be called inside the GC monitor lock, and so must not allocate any storage. Also, only one is kept, any new registration replaces the old. (The old proc and client data values are returned in the locations pointed to by oldproc and oldclientdata, unless these are null.) Registering NIL turns off callback. The call is made by the GC daemon thread. A failure to return promptly can be disastrous. The callback is made as: (*proc)(clientdata, full_gc) For present purposes, a collection is defined to be occurring only while the world is stopped. The first two routines are called just before and just after this occurs. The fact that the collector does much of its work concurrently is ignored. A collection is defined to be full if no objects are preserved simply because they survived some combination of previous collections. The routine XR_RegisterGCCallBackDuringInner expects to be called with GC_allocate_ml already held. The routine registered by XR_RegisterGCCallBackDuringInner is called with the world stopped, and with the allocate, virtual dirty bit, and IOP order locks held. It is called after all mark bits have attained their final value. */ typedef XR_Pointer (*GC_alloc_call_back_type) (/* long sz, bool is_atomic, XR_Pointer client_data */); void GC_register_alloc_callback (/* GC_alloc_call_back_type fn, XR_Pointer client_data, GC_alloc_call_back_type *Ofn, XR_Pointer *Oclient_data */); /* Register a routine to be called before every allocation. If it returns * a non-NIL value, then that value is returned as the result of the * allocator call. Fn is run before the allocation monitor lock is acquired, * but after acquiring a spearate monitor lock. It should be safe to have * fn unregister the callback (by passing 0 as the fn argument), and then * recursively invoke the allocator. A recursive invocation of the callback * wwould result in deadlock. * Client_data is passed to fn. The old values of fn and client_data are * returned in the third and fourth arguments. * Most applications will want to have fn always return NIL. */ void XR_SetupGC(); /* Set up for garbage collection. Called only by initializing world. */ void GC_register_displacement(/* unsigned DisplacementInBytes */); /* Register the given displacement as a valid displacement of a pointer to an object into an object. All values equal to an object address plus the given displacement will henceforth be treated as pointers. Ignored if the collector is not compiled to keep track of such things. Calling this after allocation has taken place is more expensive, but still safe. We claim it is unreasonable to ever unregister a displacement. Thus there is no way to do it. */ #if defined(FINALIZE) /***** Finalization *****/ /* The finalization described here is the innermost level, and is not indended for direct use. Rather, language implementors are expected to wrap their own layers around this. For instance, there is a different Cedar layer for use by Cedar/Mesa programmers. */ /* * Finalizable Object structures and Finalization Queues: * * Invariants: * * firstword, secondword - encode a pointer to an object * (a) disguised as pair < ptr&0xffff, (ptr>>16)&0xffff > * if it's finalizable * (b) undisguised as < ptr , nil > if it's not finalizable * N.B. since first 64K of address space isn't in heap, this * means (secondword == 0) iff the pointer is undisguised. * associatedFQ - pointer to a FinalizationQueueStructure * non-NIL iff the object is finalizable or on the finalization queue * * Thus, finalization states can be determined by: * enabled: (secondword != 0) * disabled: (associatedFQ == NIL) * onFQ: otherwise */ typedef struct XR_FinalizationQueueStructure { struct XR_FinalizableObjectStructure * head; struct XR_FinalizableObjectStructure * tail; struct XR_CVRep fqNonempty; } * XR_FinalizationQueue; typedef struct XR_FinalizableObjectStructure { unsigned long firstword; unsigned long secondword; XR_FinalizationQueue associatedFQ; struct XR_FinalizableObjectStructure *next; } * XR_FinalizationHandle; #define XR_IsDisguised(h) ((h)->secondword != 0) #define XR_FetchFromDisguised(h) ( ((h)->firstword) | ((h)->secondword << 16) ) #define XR_FetchFromUndisguised(h) ( (h)->firstword ) #define XR_StoreDisguised(w,h) { \ (h)->firstword = ((unsigned long)(w)) & 0xffff; \ (h)->secondword = (((unsigned long)(w)) >> 16) & 0xffff; \ } #define XR_StoreUndisguised(w,h) { \ (h)->firstword = ((unsigned long)(w)); \ (h)->secondword = 0; \ } typedef enum { fzsEnabled = 0, fzsOnFQ = 1, fzsDisabled = 2, fzsError = 0x7fffffff /* force to 32 bits */ } XR_FinalizationState; XR_FinalizationQueue XR_NewFQ(); /* return a new, empty, initialized finalization queue. */ XR_FinalizationHandle XR_FQNextNoAbort(/* XR_FinalizationQueue fq */); /* return the next handle on queue 'fq', waiting on a condition variable until there is an item if necessary. If there is no item, or the wait is interrupted, return NIL */ bool XR_FQEmpty(/* XR_FinalizationQueue fq */); XR_FinalizationHandle XR_NewFinalizationHandle(); /* return a new, empty, initialized handle for an object to be finalized. */ void XR_EnableFinalization(/* XR_Pointer object; XR_FinalizationQueue fq; XR_FinalizationHandle h */); /* Cause the object to be enabled for finalization. 'h' is updated to describe the object. When the time comes, fq will be the finalization queue on which the object is placed. */ XR_FinalizationState XR_DisableFinalization(/* XR_FinalizationHandle h */); /* Disable the object described by 'h' for finalization. Return its prior XR_FinalizationState. If it is already on a finalize q, remote it. */ XR_FinalizationState XR_ReenableFinalization(/* XR_FinalizationHandle h; XR_FinalizationQueue fq */); /* Causes an object which was once finalizable to be so again, now on queue 'fq'. If it is on some other queue, it is removed first. Prior state is returned. */ XR_FinalizationState XR_GetFinalizationState(/* XR_FinalizationHandle h */); /* get the finalization state of the object */ XR_Pointer XR_HandleToObject(/* XR_FinalizationHandle h */); /* Get the pointer to the real object, given its handle */ #endif /* FINALIZE */ #endif /* _XR_GC_ */