-- File: BufferMgr.mesa - last edit:
-- HGM 22-May-86 15:26:54, Big buffers
-- AOF 25-Jul-85 9:11:59
-- Copyright (C) 1984, 1985 by Xerox Corporation. All rights reserved.
DIRECTORY
Buffer USING [
AccessHandle, AccessObject, Buffer, fixedOverhead, Function,
Queue, SizeHint, Type, PoolType],
BufferOps USING [Intervals, Defaults, Statistics],
CommFlags USING [doDebug, doStats],
CommHeap USING [zone],
CommunicationInternal USING [],
CommUtil USING [AllocateBuffers, FreeBuffers],
Driver USING [Glitch, Network, nilNetwork],
DriverTypes USING [Encapsulation, EthernetCRC],
Frame USING [GetReturnFrame, ReadGlobalLink],
Inline USING [BITAND],
NSTypes USING [wordsPerIDPHeader, wordsPerSppHeader],
Process USING [
Abort, DisableAborts, DisableTimeout, EnableAborts, InitializeCondition,
SetPriority, SetTimeout, TicksToMsec, GetPriority, Priority],
ProcessPriorities USING [priorityRealTime],
PupDefs USING [],
PupTypes USING [BufferBody],
Socket USING [],
Stats USING [StatIncr],
System USING [GetClockPulses, MicrosecondsToPulses];
BufferMgr: MONITOR
IMPORTS CommHeap, CommUtil, Driver, Frame, Inline, Process, Stats, System
EXPORTS Buffer, BufferOps, CommunicationInternal, Driver, PupDefs, Socket =
BEGIN
Network: PUBLIC TYPE = Driver.Network;
Cluster: PUBLIC TYPE = LONG POINTER TO ClusterObject;
ClusterObject: TYPE = RECORD[
next: Cluster, --objects are linked together
order: CARDINAL, --and they are ordered
time: LONG CARDINAL, --System.GetClockPulses last returned
useMap: UseMap, --map of buffers free/used
slot: ARRAY CARDINAL[0..4) OF Buffer.Buffer, --the actual buffers
pad: ARRAY[0..4) OF WORD ← TRASH]; --alignment for Daybreak
UseMap: TYPE = MACHINE DEPENDENT RECORD[
map(0: 0..3): PACKED ARRAY CARDINAL[0..4) OF Use,
filler(0: 4..15): CARDINAL[0..7777B]];
Use: TYPE = MACHINE DEPENDENT{used(0), free(1)};
allFree: UseMap = [[free, free, free, free], 0];
mostFree: UseMap = [[used, free, free, free], 0];
allUsed: UseMap = [[used, used, used, used], 0];
mostUsed: UseMap = [[free, used, used, used], 0];
syspool: PUBLIC RECORD[
cache: Cluster, --best chance to find free 'fullBuffer'
cluster: RECORD[head, tail: Cluster, count: CARDINAL], --list of clusters
send, receive, sendInUse, receiveInUse, freeBias: CARDINAL, --current events
buffersRequested, buffersAllocated, buffersAvailable: CARDINAL, --current
access: Buffer.AccessHandle, --head of linked list
smallSize, fullSize: CARDINAL, --in words of the respective buffers
scanInterval, lastTime: LONG CARDINAL, --times in Pulses
clusterAgeLimit, lostBufferLimit: LONG CARDINAL, --times in Pulses & msecs
defaultSendBuffers, defaultReceiveBuffers, leaked: CARDINAL,
freeBuffer, fault: CONDITION, flag: BOOLEAN, allocator: PROCESS];
crc32: CARDINAL = SIZE[DriverTypes.EthernetCRC]; --length of crc32 field
fudgeFactor: CARDINAL = 8; --extra words in buffers
alignment: CARDINAL = 4; --quad word (almost) required
preallocate: BOOLEAN = FALSE; --does she, or doesn't she
biasing: BOOLEAN = TRUE; --keep 'n' buffers free at all times
QueueScrambled: ERROR = CODE;
PoolTypeBroken: ERROR = CODE;
NotInitialized: ERROR = CODE;
QueueSealBroken: ERROR = CODE;
AccessSealBroken: ERROR = CODE;
BufferSealBroken: ERROR = CODE;
BufferNeverFreed: ERROR = CODE;
ClusterNotOrdered: ERROR = CODE;
OtherClientsActive: ERROR = CODE;
ClusterNotDeletable: ERROR = CODE;
SystemBufferPoolConfused: ERROR = CODE;
DontKnowHowToAllocateBuffer: ERROR = CODE;
GetSizes: PUBLIC PROC RETURNS [ARRAY Buffer.SizeHint OF CARDINAL] =
BEGIN
RETURN[[
syspool.smallSize, syspool.fullSize, 0, 0]];
END; --GetSizes
SetSizes: PUBLIC PROC[size: ARRAY Buffer.SizeHint OF CARDINAL] =
BEGIN
syspool.smallSize ← size[smallBuffer];
syspool.fullSize ← size[fullBuffer];
END; --SetSizes
GetIntervals: PUBLIC PROC RETURNS[ARRAY BufferOps.Intervals OF LONG CARDINAL] =
BEGIN
RETURN[[
scan: syspool.scanInterval,
aging: syspool.clusterAgeLimit,
lost: syspool.lostBufferLimit]];
END; --GetIntervals
SetIntervals: PUBLIC PROC[
interval: ARRAY BufferOps.Intervals OF LONG CARDINAL] =
BEGIN
syspool.scanInterval ← interval[scan];
syspool.clusterAgeLimit ← interval[aging];
syspool.lostBufferLimit ← interval[lost];
END; --SetIntervals
GetDefaults: PUBLIC PROC RETURNS[ARRAY BufferOps.Defaults OF CARDINAL] =
BEGIN
RETURN[[
syspool.defaultSendBuffers,
syspool.defaultReceiveBuffers,
syspool.freeBias]];
END; --GetDefaults
SetDefaults: PUBLIC PROC[default: ARRAY BufferOps.Defaults OF CARDINAL] =
BEGIN
syspool.defaultSendBuffers ← default[send];
syspool.defaultReceiveBuffers ← default[receive];
syspool.freeBias ← default[bias];
END; --SetDefaults
GetStatistics: PUBLIC PROC RETURNS[ARRAY BufferOps.Statistics OF CARDINAL] =
BEGIN
RETURN[[
syspool.buffersRequested, syspool.buffersAllocated,
syspool.buffersAvailable,
syspool.send, syspool.receive,
syspool.sendInUse, syspool.receiveInUse]];
END; --GetStatistics
BuffersLeft: PUBLIC PROC[aH: Buffer.AccessHandle] RETURNS[CARDINAL] =
{RETURN[(aH.receive - aH.receiveInUse) + (aH.send - aH.sendInUse)]};
SendBuffersLeft: PUBLIC PROC[aH: Buffer.AccessHandle] RETURNS[CARDINAL] =
{RETURN[(aH.send - aH.sendInUse)]};
ReceiveBuffersLeft: PUBLIC PROC[aH: Buffer.AccessHandle] RETURNS[CARDINAL] =
{RETURN[(aH.receive - aH.receiveInUse)]};
DataWordsPerRawBuffer: PUBLIC PROC RETURNS [CARDINAL] =
{RETURN[syspool.fullSize - Buffer.fixedOverhead -
SIZE[DriverTypes.Encapsulation] - crc32 - alignment]};
DataWordsPerPupBuffer: PUBLIC PROC RETURNS [CARDINAL] =
{RETURN[DataWordsPerRawBuffer[] - SIZE[pupWords PupTypes.BufferBody]]};
DataWordsPerNSBuffer: PUBLIC PROC RETURNS [CARDINAL] =
{RETURN[DataWordsPerRawBuffer[] - NSTypes.wordsPerIDPHeader]};
DataWordsPerSppBuffer: PUBLIC PROC RETURNS [CARDINAL] =
{RETURN[DataWordsPerNSBuffer[] - NSTypes.wordsPerSppHeader]};
--THE QUEUE MANAGEMENT PACKAGE
CreditReceiveBuffer: PUBLIC ENTRY PROC[
aH: Buffer.AccessHandle, b: Buffer.Buffer]
RETURNS [gotCredit: BOOLEAN] =
BEGIN
IF CommFlags.doDebug AND (aH.seal ~IN Buffer.PoolType) THEN
Driver.Glitch[PoolTypeBroken];
IF (gotCredit ← aH.receiveInUse < aH.receive) THEN
BEGIN
aH.receiveInUse ← aH.receiveInUse + 1;
syspool.receiveInUse ← syspool.receiveInUse + 1;
b.currentOwner ← aH;
b.function ← receive;
IF CommFlags.doDebug THEN
b.debug ← Frame.ReadGlobalLink[Frame.GetReturnFrame[]];
END
ELSE IF CommFlags.doStats THEN Stats.StatIncr[statNoCredit];
END; --CreditReceiveBuffer
Dequeue, DequeuePup, DequeueNS, DequeueSpp: PUBLIC PROC[
q: Buffer.Queue] RETURNS [b: Buffer.Buffer] =
BEGIN
IF CommFlags.doDebug THEN
SELECT TRUE FROM
(q = NIL) => Driver.Glitch[QueueScrambled];
(q.seal # queueSeal) => Driver.Glitch[QueueSealBroken];
(q.length > 256) => Driver.Glitch[QueueScrambled];
ENDCASE;
IF (b ← q.first) = NIL THEN
BEGIN
IF CommFlags.doDebug AND q.length # 0 THEN Driver.Glitch[QueueScrambled];
IF CommFlags.doStats THEN Stats.StatIncr[statDequeueNIL];
RETURN;
END;
IF (q.first ← q.first.next) = NIL THEN q.last ← NIL;
q.length ← q.length - 1;
IF CommFlags.doStats THEN Stats.StatIncr[statDequeue];
IF CommFlags.doDebug THEN
BEGIN
b.debug ← Frame.ReadGlobalLink[Frame.GetReturnFrame[]];
SELECT TRUE FROM
(b.queue # q) => Driver.Glitch[QueueScrambled];
(b.seal # bufferSeal) => Driver.Glitch[BufferSealBroken];
(q.length # 0 AND (q.first = NIL OR q.last = NIL)) =>
Driver.Glitch[QueueScrambled];
ENDCASE;
END;
b.queue ← NIL;
b.next ← NIL;
END; --Dequeue, DequeuePup, DequeueNS, DequeueSpp
Enqueue, EnqueuePup, EnqueueNS, EnqueueSpp: PUBLIC PROC[
q: Buffer.Queue, b: Buffer.Buffer] =
BEGIN
IF CommFlags.doDebug THEN
SELECT TRUE FROM
(q.seal # queueSeal) => Driver.Glitch[QueueSealBroken];
(b.seal # bufferSeal) => Driver.Glitch[BufferSealBroken];
(b.queue # NIL) => Driver.Glitch[QueueScrambled];
(q.length # 0 AND (q.first = NIL OR q.last = NIL)) =>
Driver.Glitch[QueueScrambled];
(q.length > 256) => Driver.Glitch[QueueScrambled];
ENDCASE => b.debug ← Frame.ReadGlobalLink[Frame.GetReturnFrame[]];
b.next ← NIL;
IF CommFlags.doStats THEN Stats.StatIncr[statEnqueue];
IF q.first = NIL THEN q.first ← b ELSE q.last↑.next ← b;
q.last ← b;
b.queue ← q;
q.length ← q.length + 1;
END; --Enqueue
ExtractFromQueue, ExtractPupFromQueue, ExtractNSFromQueue,
ExtractSppFromQueue: PUBLIC PROC[q: Buffer.Queue, b: Buffer.Buffer]
RETURNS [Buffer.Buffer] =
BEGIN
previousB, currentB: Buffer.Buffer;
IF CommFlags.doDebug THEN
SELECT TRUE FROM
(q = NIL) => Driver.Glitch[QueueScrambled];
(b = NIL) => Driver.Glitch[QueueScrambled];
(q.seal # queueSeal) => Driver.Glitch[QueueSealBroken];
(b.seal # bufferSeal) => Driver.Glitch[BufferSealBroken];
ENDCASE;
previousB ← NIL;
currentB ← q.first;
UNTIL currentB = b DO
IF currentB = NIL THEN EXIT;
previousB ← currentB;
currentB ← currentB.next;
ENDLOOP;
IF currentB # NIL THEN
BEGIN
--remove this buffer from the queue
IF CommFlags.doDebug AND currentB.seal # bufferSeal THEN
Driver.Glitch[BufferSealBroken];
IF currentB = q.first THEN q.first ← currentB.next;
IF currentB = q.last THEN q.last ← previousB;
IF previousB # NIL THEN previousB.next ← currentB.next;
q.length ← q.length - 1;
currentB.queue ← NIL;
currentB.next ← NIL;
IF CommFlags.doDebug THEN
b.debug ← Frame.ReadGlobalLink[Frame.GetReturnFrame[]];
IF CommFlags.doStats THEN Stats.StatIncr[statXqueue];
END
ELSE IF CommFlags.doStats THEN Stats.StatIncr[statXqueueNIL];
RETURN[currentB];
END; --ExtractFromQueue
QueueCleanup: PUBLIC PROC[q: Buffer.Queue] =
{b: Buffer.Buffer; UNTIL (b ← Dequeue[q]) = NIL DO ReturnBuffer[b]; ENDLOOP};
QueueInitialize: PUBLIC PROC[q: Buffer.Queue] =
{q↑ ← [length: 0, first: NIL, last: NIL, seal: queueSeal]};
--BUFFER MANAGEMENT ROUTINES
AllocateNewCluster: PROC =
BEGIN
<<
This broken in two so that the monitor will not be locked while we run
off to Pilot to allocate nodes and spaces.
>>
AllocateNewClusterLocked: ENTRY PROC =
BEGIN
ENABLE UNWIND => NULL;
IF syspool.flag THEN
BEGIN
--link new cluster to end of list and set its order
IF syspool.cluster.tail = NIL THEN
{syspool.cluster.head ← tail; order ← 1}
ELSE {syspool.cluster.tail.next ← tail;
order ← syspool.cluster.tail.order + 1};
syspool.cluster.tail ← tail;
tail.order ← order;
--keep up bookeeping
syspool.cluster.count ← syspool.cluster.count + 1;
syspool.buffersAllocated ← syspool.buffersAllocated + 3;
syspool.buffersAvailable ← syspool.buffersAvailable + 3;
syspool.flag ← FALSE; --reset to avoid looping
BROADCAST syspool.freeBuffer; --tell the world, "New buffers!"
END;
WAIT syspool.fault; --wait for new request
END; --AllocateNewClusterLocked
tail: Cluster;
order: CARDINAL;
b: Buffer.Buffer;
clusterWordSize: CARDINAL;
--UNTIL ABORTED-- DO
ENABLE ABORTED => EXIT;
IF syspool.flag THEN
BEGIN
IF CommFlags.doStats THEN Stats.StatIncr[statAddCluster];
clusterWordSize ← SIZE[ClusterObject] +
syspool.smallSize + alignment +
(syspool.fullSize + alignment) * 3;
tail ← CommUtil.AllocateBuffers[clusterWordSize];
tail.next ← NIL;
tail.useMap ← allFree;
tail.time ← System.GetClockPulses[];
b ← LOOPHOLE[tail + SIZE[ClusterObject]]; --where first buffer will be
--fill defaults of buffer, align properly, etc
b ← InitializeBuffer[tail, 0, b] + syspool.smallSize;
b ← InitializeBuffer[tail, 1, b] + syspool.fullSize;
b ← InitializeBuffer[tail, 2, b] + syspool.fullSize;
b ← InitializeBuffer[tail, 3, b];
END;
AllocateNewClusterLocked[];
ENDLOOP;
END; --AllocateNewCluster
DestroyPool: PUBLIC ENTRY PROC[aH: Buffer.AccessHandle] =
BEGIN
<<
Delete the accounting object pointed to by aH, after making sure that
all the buffers being used by that object have been returned. If after
a period of time all the buffers cannot be accounted for - Glitch!
>>
match: Buffer.AccessHandle;
previous: Buffer.AccessHandle ← NIL;
seal: Buffer.PoolType ← aH.seal; --save the current seal
start: LONG CARDINAL ← System.GetClockPulses[]; --start now
IF CommFlags.doDebug AND (aH.seal ~IN Buffer.PoolType) THEN
Driver.Glitch[AccessSealBroken];
aH.seal ← unsealed; --smash it so it can't be used
Process.DisableAborts[@aH.clientFreeBuffer]; --don't let him hurt us
Process.SetTimeout[@aH.clientFreeBuffer, 10]; --solicit wakeups
FOR looptime: CARDINAL ← 0, looptime + Process.TicksToMsec[10]
UNTIL (aH.sendInUse + aH.receiveInUse) = 0 DO
WAIT aH.clientFreeBuffer;
SELECT TRUE FROM
(looptime < CARDINAL[syspool.lostBufferLimit]) => NULL;
(CommFlags.doDebug) => Driver.Glitch[BufferNeverFreed];
ENDCASE => {syspool.leaked ← syspool.leaked + 1; EXIT};
ENDLOOP;
FOR match ← syspool.access, match.next UNTIL match = NIL DO
IF match = aH THEN EXIT ELSE previous ← match;
REPEAT FINISHED => Driver.Glitch[SystemBufferPoolConfused];
ENDLOOP;
IF previous = NIL THEN syspool.access ← aH.next
ELSE previous.next ← aH.next; --delink from list
IF seal # listenPool THEN
BEGIN
syspool.send ← syspool.send - aH.send;
syspool.receive ← syspool.receive - aH.receive;
syspool.buffersRequested ← syspool.buffersRequested - aH.send - aH.receive;
END;
CommHeap.zone.FREE[@aH]; --gone
END; --DestroyPool
DestroySystemBufferPool: PUBLIC --ENTRY-- PROC =
BEGIN
LockAndDestroyClusters: ENTRY PROC =
BEGIN
UNTIL syspool.cluster.count = 0 DO
c: Cluster ← syspool.cluster.head;
IF c.useMap # allFree THEN {WAIT syspool.freeBuffer; LOOP};
syspool.cluster.count ← syspool.cluster.count - 1;
syspool.cluster.head ← c.next; CommUtil.FreeBuffers[c];
ENDLOOP;
syspool.cluster ← [NIL, NIL, 0];
END; --LockAndDestroyClusters
IF syspool.access = NIL THEN Driver.Glitch[SystemBufferPoolConfused];
IF syspool.access.next # NIL THEN Driver.Glitch[OtherClientsActive];
Process.Abort[syspool.allocator]; JOIN syspool.allocator;
Process.DisableAborts[@syspool.freeBuffer];
DestroyPool[syspool.access];
LockAndDestroyClusters[];
syspool.access ← NIL; syspool.cache ← NIL;
END; --DestroySystemBufferPool
GetInputBuffer: PUBLIC ENTRY PROC[wait: BOOLEAN] RETURNS[b: Buffer.Buffer] =
BEGIN
<<
Used by drivers to get buffers without accounting. It also will not
try to allocate more buffers from the Pilot Space machinery.
>>
ENABLE UNWIND => NULL;
SELECT TRUE FROM
(b ← GetBufferInternal[Buffer.SizeHint[fullBuffer]]) # NIL => GOTO buffer;
(wait) =>
BEGIN
IF CommFlags.doStats THEN Stats.StatIncr[statBufferWaits];
WAIT syspool.freeBuffer; --wait for someone to return buffer
b ← GetBufferInternal[Buffer.SizeHint[fullBuffer]];
IF b # NIL THEN GOTO buffer
ELSE Stats.StatIncr[statBufferNIL];
END;
ENDCASE => IF CommFlags.doStats THEN Stats.StatIncr[statBufferNIL];
EXITS
buffer =>
BEGIN
IF CommFlags.doDebug THEN
b.debug ← Frame.ReadGlobalLink[Frame.GetReturnFrame[]];
b.type ← vagrant; b.function ← driver;
IF CommFlags.doStats THEN Stats.StatIncr[statGetFullBuffer];
END;
END; --GetInputBuffer
GetBuffer: PUBLIC ENTRY PROC[
type: Buffer.Type ← vagrant, aH: Buffer.AccessHandle,
function: Buffer.Function, size: Buffer.SizeHint ← fullBuffer,
wait: BOOLEAN ← TRUE]
RETURNS[b: Buffer.Buffer ← NIL] =
BEGIN
<<
Basic mechanism to get buffers with accounting involved. This routine will
WAIT if the accounting shows no buffer should be made available. It will
append a new cluster to the list if need it must. This routine may return
NIL if wait is FALSE and the client is attempting to exceed his allocation.
>>
ENABLE UNWIND => NULL;
system: POINTER TO CARDINAL;
reserved, inUse: LONG POINTER TO CARDINAL;
IF CommFlags.doDebug THEN
BEGIN
IF syspool.cache = NIL THEN Driver.Glitch[NotInitialized];
IF aH.seal ~IN Buffer.PoolType THEN
Driver.Glitch[AccessSealBroken];
END;
SELECT function FROM
send =>
BEGIN
reserved ← @aH.send;
inUse ← @aH.sendInUse;
system ← @syspool.sendInUse;
END;
receive =>
BEGIN
reserved ← @aH.receive;
inUse ← @aH.receiveInUse;
system ← @syspool.receiveInUse;
END;
ENDCASE => Driver.Glitch[DontKnowHowToAllocateBuffer];
--UNTIL CODE EXITS-- DO
SELECT TRUE FROM
(reserved↑ > inUse↑) =>
BEGIN
IF (b ← GetBufferInternal[size]) # NIL THEN GOTO go;
IF wait THEN WAIT syspool.freeBuffer ELSE GOTO jail;
IF CommFlags.doStats THEN Stats.StatIncr[statClusterWait];
END;
(wait) => WAIT aH.clientFreeBuffer;
ENDCASE => GOTO jail; --couldn't take it, huh?
REPEAT
go =>
BEGIN
inUse↑ ← inUse↑ + 1; system↑ ← system↑ + 1;
b.function ← function; b.type ← type; b.currentOwner ← aH;
IF CommFlags.doDebug THEN
b.debug ← Frame.ReadGlobalLink[Frame.GetReturnFrame[]];
IF CommFlags.doStats THEN Stats.StatIncr[IF b.slot = 0
THEN statGetSmallBuffer ELSE statGetFullBuffer];
END;
jail => IF CommFlags.doStats THEN Stats.StatIncr[statBufferNIL];
ENDLOOP;
END; --GetBuffer
GetBufferInternal: INTERNAL PROC[size: UNSPECIFIED] RETURNS[b: Buffer.Buffer] =
BEGIN
<<
'size' is the minimum index to use looking at the clusters. If its zero,
then small buffers may be returned, else only full buffers will be returned.
The search for a free buffer will start at 'syspool.cache' if large buffers
are required, else we will search starting at the beginning of the pool for
small buffers.
>>
<<
As we exit with a buffer, check to see if it was the last one in
the syspool. If so, notify the allocation machinery so it will
allocate another cluster before (hopefully) the next client needs
it. The result should be that no client ever waits for the
allocation machinery. The test will not detect taking the last
buffer that was also a small buffer from the middle of the order
list of clusters.
>>
Preallocate: PROC RETURNS[BOOLEAN] = INLINE
BEGIN
RETURN[SELECT TRUE FROM
(index = 0) => FALSE, --don't preallocate on smalls
(c.next # NIL) => FALSE, --not the end of the linked list
(c.useMap = mostUsed), (c.useMap = allUsed) => TRUE,
ENDCASE => FALSE];
END; --Preallocate
c: Cluster;
map: UseMap;
index, order: CARDINAL;
IF size # 0 THEN {c ← syspool.cache; map ← mostFree}
ELSE {c ← syspool.cluster.head; map ← allFree};
IF CommFlags.doDebug THEN order ← c.order;
UNTIL c = NIL DO
IF Inline.BITAND[map, c.useMap] # 0 THEN
FOR index IN[size..LENGTH[c.slot]) DO
IF c.useMap.map[index] = free THEN GOTO success;
ENDLOOP;
c ← c.next; --go to next element of link
IF CommFlags.doDebug THEN
SELECT TRUE FROM
(c = NIL) => NULL;
(c.order <= order) => Driver.Glitch[ClusterNotOrdered];
ENDCASE => order ← c.order;
REPEAT
success =>
BEGIN
b ← c.slot[index]; c.useMap.map[index] ← used; --check it out
b.requeueProcedure ← ReturnBuffer; --reset requeue proc
b.network ← Driver.nilNetwork; --remove previous network reference
b.context ← NIL; --doesn't have context at this time
b.next ← NIL; --cleanup forward pointers
IF index # 0 THEN
BEGIN
syspool.buffersAvailable ← syspool.buffersAvailable - 1;
b.driver.length ← syspool.fullSize - Buffer.fixedOverhead;
END
ELSE b.driver.length ← syspool.smallSize - Buffer.fixedOverhead;
END;
FINISHED => b ← NIL; --allocate failed, return b = NIL
ENDLOOP;
SELECT TRUE FROM
(syspool.buffersRequested <= syspool.buffersAllocated) => NULL;
(b = NIL) => {syspool.flag ← TRUE; NOTIFY syspool.fault};
(preallocate) AND Preallocate[] =>
BEGIN
IF CommFlags.doStats THEN Stats.StatIncr[statPreallocateCluster];
syspool.flag ← TRUE; NOTIFY syspool.fault;
END;
ENDCASE;
END; --GetBufferInternal
InitializeBuffer: PROC[c: Cluster, i: CARDINAL, b: Buffer.Buffer]
RETURNS[nb: Buffer.Buffer] =
BEGIN
--Properly align the buffer on an "almost quad-word" boundary.
nb ← b + (3 - LOOPHOLE[@b.encapsulation, LONG CARDINAL] MOD 4);
c.slot[i] ← nb;
nb↑ ← [
next: NIL, slot: i, currentOwner: NIL, queue: NIL, status: pending,
cluster: c, requeueProcedure: ReturnBuffer, requeueData: 0,
seal: bufferSeal, network: Driver.nilNetwork, context: NIL,
time: [0], allNets: FALSE, bypassZeroNet: FALSE, type: vagrant,
function: free, tries: 0, debug: 0,
driver: [length: 0, iocb: NIL, faceStatus: unknown[0]],
encapsulation: , bufferBody: ];
END; --InitializeBuffer
MakePool: PUBLIC ENTRY PROC[
send: CARDINAL, --number of send buffers permitted for client use
receive: CARDINAL, --number of receive buffers permitted for client use
type: Buffer.PoolType] --type of pool => type of pool seal
RETURNS [aH: Buffer.AccessHandle] =
BEGIN
<<
Creates the accounting object for client access to the buffer machinery.
It does not actually allocate any buffers, but does bump the counts so they
may be allocated if the need should arise.
>>
ENABLE UNWIND => NULL;
IF syspool.access = NIL THEN Driver.Glitch[NotInitialized];
aH ← CommHeap.zone.NEW[Buffer.AccessObject];
aH.next ← syspool.access; syspool.access ← aH; --link to begining of list
aH.seal ← type;
aH.send ← send; aH.receive ← receive;
aH.sendInUse ← 0; aH.receiveInUse ← 0;
aH.frame ← Frame.ReadGlobalLink[Frame.GetReturnFrame[]];
Process.InitializeCondition[@aH.clientFreeBuffer, 0];
Process.DisableTimeout[@aH.clientFreeBuffer];
Process.EnableAborts[@aH.clientFreeBuffer];
IF type # listenPool THEN
BEGIN
syspool.send ← syspool.send + send;
syspool.receive ← syspool.receive + receive;
syspool.buffersRequested ← syspool.buffersRequested + send + receive;
END;
IF CommFlags.doStats THEN Stats.StatIncr[statPoolCreated];
END; --MakePool
MakeSystemBufferPool: PUBLIC PROC[extraBuffers: CARDINAL] =
BEGIN
LockWaitAndSetCache: ENTRY PROC =
BEGIN
--ENABLE UNWIND => NULL;
UNTIL (syspool.cache ← syspool.cluster.head) # NIL DO
WAIT syspool.freeBuffer; ENDLOOP;
END; --LockWaitAndSetCache
IF syspool.access # NIL THEN Driver.Glitch[SystemBufferPoolConfused];
--CREATE ACCESS OBJECT FOR SYSTEM USE
syspool.access ← CommHeap.zone.NEW[Buffer.AccessObject];
syspool.access.next ← NIL; --this is the only one has .next = NIL
syspool.access.seal ← systemPool;
syspool.access.send ← syspool.defaultSendBuffers;
syspool.access.receive ← syspool.defaultReceiveBuffers + extraBuffers;
syspool.access.sendInUse ← syspool.access.receiveInUse ← 0;
Process.InitializeCondition[@syspool.access.clientFreeBuffer, 0];
Process.DisableTimeout[@syspool.access.clientFreeBuffer];
--INITIALIZE SYSTEM COUNTERS, ETC
syspool.send ← syspool.access.send;
syspool.receive ← syspool.access.receive;
syspool.lastTime ← System.GetClockPulses[];
syspool.sendInUse ← syspool.receiveInUse ← syspool.leaked ← 0;
syspool.access.frame ← Frame.ReadGlobalLink[Frame.GetReturnFrame[]];
syspool.buffersAllocated ← syspool.buffersAvailable ← 0;
syspool.buffersRequested ← syspool.access.send + syspool.access.receive;
Process.EnableAborts[@syspool.freeBuffer];
IF CommFlags.doStats THEN Stats.StatIncr[statPoolCreated];
<<SPECIAL CASE -- ACTUALLY COMMITTING THE SPACES BEHIND THE POOL>>
BEGIN
prio: Process.Priority ← Process.GetPriority[];
syspool.flag ← TRUE; --so it allocates below
Process.SetPriority[ProcessPriorities.priorityRealTime];
syspool.allocator ← FORK AllocateNewCluster[]; --will allocate real buffers
Process.SetPriority[prio]; --back to normal
END;
LockWaitAndSetCache[];
END; --MakeSystemBufferPool
ReturnBuffer, ReturnFreeBuffer: PUBLIC ENTRY PROC[b: Buffer.Buffer] =
BEGIN
<<ENABLE UNWIND => NULL; --generates and passes no signals>>
<<
Maybe we want to keep 'n' free buffers laying around at any time. This
would concievably be to handle burst traffic on gateways, and might be
reasonalbe on machines that don't believe in virtual memory.
>>
Bias: PROC RETURNS[BOOLEAN] = INLINE
{RETURN[INTEGER[syspool.buffersAvailable] < INTEGER[syspool.freeBias]]};
now: LONG CARDINAL;
slot: CARDINAL = b.slot;
current, previous, search: Cluster;
aH: Buffer.AccessHandle = b.currentOwner;
IF CommFlags.doDebug AND (b.queue # NIL) THEN Driver.Glitch[QueueScrambled];
SELECT b.function FROM
send =>
BEGIN
aH.sendInUse ← aH.sendInUse - 1;
syspool.sendInUse ← syspool.sendInUse - 1;
NOTIFY aH.clientFreeBuffer;
END;
receive =>
BEGIN
aH.receiveInUse ← aH.receiveInUse - 1;
syspool.receiveInUse ← syspool.receiveInUse - 1;
NOTIFY aH.clientFreeBuffer;
END;
<<driver => NULL; --in use by a driver - no access control>>
ENDCASE;
current ← b.cluster; --copy cluster value from buffer
current.useMap.map[slot] ← free;
b.function ← free; --buffer in cluster and free
BROADCAST syspool.freeBuffer; --tell everyone something has happened
--GENERAL POOL MAINTAINANCE
IF (slot # 0) THEN
BEGIN
syspool.buffersAvailable ← syspool.buffersAvailable + 1; --buffer's back
IF (current.order < syspool.cache.order) THEN
syspool.cache ← current; --set cache value back to oldest 'fullBuffer'
END;
now ← current.time ← System.GetClockPulses[] ;
IF (syspool.lastTime - now) < syspool.scanInterval THEN RETURN;
syspool.lastTime ← now; --record for next time;
previous ← current; --in order to remove from middle of list
search ← current.next; --so as to start just beyond current
UNTIL search = NIL DO
current ← search; --copy cluster to be tested
search ← current.next; --and get next record in list
<<previous.order < current.order < search.order>>
IF CommFlags.doDebug THEN
SELECT TRUE FROM
(previous.order >= current.order) => Driver.Glitch[ClusterNotOrdered];
(search = NIL) => NULL;
(current.order >= search.order) => Driver.Glitch[ClusterNotOrdered];
ENDCASE;
SELECT TRUE FROM
((now - current.time) < syspool.clusterAgeLimit) => previous ← current;
(current.useMap # allFree) => previous ← current;
(biasing) AND (Bias[]) => NULL; --just don't give it back
ENDCASE =>
BEGIN
previous.next ← search; --pull 'current' out of list
IF search = NIL THEN syspool.cluster.tail ← previous;
syspool.buffersAllocated ← syspool.buffersAllocated - 3;
syspool.buffersAvailable ← syspool.buffersAvailable - 3;
syspool.cluster.count ← syspool.cluster.count - 1;
CommUtil.FreeBuffers[current];
END;
ENDLOOP;
END; --ReturnBuffer, ReturnFreeBuffer
--INITIALIZATION
syspool.freeBias ← 0;
syspool.access ← NIL; syspool.cache ← NIL; syspool.cluster ← [NIL, NIL, 0];
syspool.defaultSendBuffers ← 0; syspool.defaultReceiveBuffers ← 1;
syspool.clusterAgeLimit ← System.MicrosecondsToPulses[30 * 1000000];
syspool.lostBufferLimit ← 60 * 1000; --msecs
syspool.scanInterval ← System.MicrosecondsToPulses[5 * 1000000];
syspool.fullSize ← Buffer.fixedOverhead + SIZE[DriverTypes.Encapsulation] +
1500/2 + crc32 + fudgeFactor;
syspool.smallSize ← Buffer.fixedOverhead + SIZE[DriverTypes.Encapsulation] +
NSTypes.wordsPerIDPHeader + NSTypes.wordsPerSppHeader + crc32;
Process.DisableTimeout[@syspool.fault];
Process.EnableAborts[@syspool.fault];
Process.DisableTimeout[@syspool.freeBuffer];
END.....
LOG
time - by - action
14-May-84 13:36:38 - AOF - Post Klamath
25-Jul-85 9:11:35 - AOF - NIL context pointer when allocating new buffers