Very freely adapted from translation code in: Cedar6.0>OISCP>EthernetOneDriver.mesa
Birrell on: 9-Oct-81 16:43:32
BLyon on: March 13, 1981 10:47 PM
Levin, August 9, 1983 9:28 am
Russ Atkinson (RRA) February 19, 1985 7:44:43 pm PST
DIRECTORY
Basics USING [bytesPerWord],
BasicTime USING [GetClockPulses, MicrosecondsToPulses, Pulses],
CommBuffer USING [Overhead],
CommDriver USING [AllocBuffer, Buffer, FreeBuffer, GetNetworkChain, InsertReceiveProc, Network, RecvProc],
CommDriverType USING [Encapsulation, ethernetOneBroadcastHost],
Process USING [MsecToTicks, priorityForeground, SecondsToTicks, SetPriority, SetTimeout, Ticks],
Pup USING [allHosts, Host, nullHost],
XNS USING [broadcastHost, GetThisHost, Host];
XNSEthernetOneTranslation:
CEDAR
MONITOR
LOCKS cH USING cH: Cache
IMPORTS BasicTime, CommDriver, XNS, Process
EXPORTS CommBuffer
~ {
BYTE: TYPE ~ [0..100H);
bytesPerWord: NAT ~ Basics.bytesPerWord;
Buffer: TYPE ~ CommDriver.Buffer;
Network: TYPE ~ CommDriver.Network;
Encapsulation: PUBLIC TYPE ~ CommDriverType.Encapsulation; -- exported to CommBuffer
thisHost: XNS.Host ~ XNS.GetThisHost[];
Time
Pulses: TYPE ~ BasicTime.Pulses;
MSecsToPulses:
PROC [n:
CARD]
RETURNS[Pulses] ~
INLINE {
RETURN [BasicTime.MicrosecondsToPulses[1000*n]] };
PulsesSince:
PROC [then: Pulses]
RETURNS[Pulses] ~
INLINE {
RETURN [BasicTime.GetClockPulses[] - then] };
Translation Request / Reply Packets
TranslationType: TYPE ~ MACHINE DEPENDENT RECORD [a, b: BYTE];
requestType: TranslationType ~ [010H, 041H];
replyType: TranslationType ~ [00eH, 038H];
HostPair:
TYPE ~
MACHINE
DEPENDENT
RECORD [
nsHost: XNS.Host,
pupHost: Pup.Host,
filler: BYTE];
hostPairBytes:
CARDINAL ~ bytesPerWord*
SIZE[HostPair];
-- Should be BYTES[HostPair]
TranslationPacketObject: TYPE ~
MACHINE
DEPENDENT
RECORD [
translationType: TranslationType,
replier: HostPair,
requestor: HostPair];
translationPacketBytes: CARDINAL ~ bytesPerWord*SIZE[TranslationPacketObject]; -- Should be BYTES[TranslationPacketObject]; Must be even!
translationShortPacketBytes:
CARDINAL ~ translationPacketBytes - hostPairBytes;
-- CROCK: some old implementations send reply packets without the requestor field
TranslationBuffer: TYPE ~ REF TranslationBufferObject;
TranslationBufferObject:
TYPE ~
MACHINE
DEPENDENT
RECORD [
ovh: CommBuffer.Overhead,
translationType: TranslationType,
replier: HostPair,
requestor: HostPair];
Translation Entry Cache
numHashHeaders: CARDINAL ~ 101;
HashIndex: TYPE ~ [0..numHashHeaders);
Hash:
PROC [nsHost:
XNS.Host]
RETURNS [HashIndex] ~
INLINE {
RETURN [ nsHost.f MOD numHashHeaders ] };
Cache: TYPE ~ REF CacheObject;
CacheObject:
TYPE ~
MONITORED
RECORD [
daemon: PROCESS,
event: CONDITION,
newPendingEntry: BOOL ← FALSE,
sweepTime: Pulses,
sendHead, sendTail: Buffer,
broadcastHostEntry: CacheEntry,
thisHostEntry: CacheEntry,
pendingEntries: CacheEntry,
validEntries: ARRAY HashIndex OF CacheEntry];
CacheEntry: TYPE ~ REF CacheEntryObject;
CacheEntryObject:
TYPE ~
RECORD [
next: CacheEntry,
hosts: HostPair,
whenToSend: Pulses,
timeToLive: CARDINAL
];
UpToDate:
PROC [eH: CacheEntry]
RETURNS [
BOOL]
~ INLINE { RETURN [eH.timeToLive > 0] };
Timeouts
pulsesPerSweep: Pulses ← MSecsToPulses[19000];
sweepTimeout: Process.Ticks ← Process.SecondsToTicks[20];
sweepsToLive: CARDINAL ← 6;
pulsesPerResend: Pulses ← MSecsToPulses[230];
resendTimeout: Process.Ticks ← Process.MsecToTicks[250];
sendsToLive: CARDINAL ← 8;
Encapsulating NS Packets
Statistics
noTranslation: INT ← 0;
notQuick: INT ← 0;
GetEncapsulation:
PROC [network: Network, nsHost:
XNS.Host]
RETURNS [Encapsulation] ~ {
cH: Cache ~ NARROW[network.xns.translation];
eH: CacheEntry ← NIL;
hashIndex: HashIndex ~ Hash[nsHost];
BEGIN
Quick check of first couple of entries without acquiring ML.
IF (eH ← cH.validEntries[hashIndex]) #
NIL
THEN {
IF (eH.hosts.nsHost = nsHost) AND UpToDate[eH] THEN GOTO Found;
IF (eH ← eH.next) #
NIL
THEN {
IF (eH.hosts.nsHost = nsHost) AND UpToDate[eH] THEN GOTO Found;
NULL; -- more checks would go here ...
};
};
IF nsHost = XNS.broadcastHost THEN { eH ← cH.broadcastHostEntry; GOTO Found };
IF nsHost = thisHost THEN { eH ← cH.thisHostEntry; GOTO Found };
notQuick ← notQuick.SUCC;
IF (eH ← GetCacheEntry[cH, hashIndex, nsHost]) # NIL THEN GOTO Found;
GOTO NotFound;
EXITS
Found => {
TRUSTED { RETURN[ [ethernetOne[etherSpare1~0, etherSpare2~0, etherSpare3~0, etherSpare4~0, etherSpare5~0, ethernetOneDest~eH.hosts.pupHost, ethernetOneSource~network.pup.host, ethernetOneType~xns]] ] }
};
NotFound => {
noTranslation ← noTranslation.SUCC;
TRUSTED { RETURN[ [ethernetOne[etherSpare1~0, etherSpare2~0, etherSpare3~0, etherSpare4~0, etherSpare5~0, ethernetOneDest~Pup.nullHost, ethernetOneSource~network.pup.host, ethernetOneType~translationFailed]] ] }
};
END;
};
GetCacheEntry:
ENTRY
PROC [cH: Cache, hashIndex: HashIndex, nsHost:
XNS.Host]
RETURNS [result: CacheEntry] ~ {
Search for a cache entry for the given nsHost. If an entry is found, return it. If no entry is found, or the entry is out of date, arrange for a new entry to be added.
eH, prevH: CacheEntry;
Search for an entry in the valid list.
eH ← cH.validEntries[hashIndex]; prevH ← NIL;
WHILE eH #
NIL
DO
IF eH.hosts.nsHost = nsHost
THEN {
IF UpToDate[eH]
THEN {
Move entry to head of list.
IF prevH #
NIL
THEN {
prevH.next ← eH.next;
eH.next ← cH.validEntries[hashIndex];
cH.validEntries[hashIndex] ← eH };
RETURN[eH] }
ELSE {
Entry needs to be refreshed — move it to pending list.
IF prevH #
NIL
THEN prevH.next ← eH.next
ELSE cH.validEntries[hashIndex] ← eH.next;
eH.timeToLive ← sendsToLive;
eH.whenToSend ← BasicTime.GetClockPulses[];
eH.next ← cH.pendingEntries;
cH.pendingEntries ← eH;
cH.newPendingEntry ← TRUE; NOTIFY cH.event;
RETURN[eH] };
};
prevH ← eH; eH ← eH.next
ENDLOOP;
Search for a pending entry.
FOR eH ← cH.pendingEntries, eH.next
WHILE eH #
NIL
DO
IF eH.hosts.nsHost = nsHost
THEN
RETURN [IF eH.hosts.pupHost # Pup.nullHost THEN eH ELSE NIL];
ENDLOOP;
No entry found, create a new pending one.
TRUSTED { cH.pendingEntries ← NEW[ CacheEntryObject ← [next~cH.pendingEntries, hosts~[nsHost~nsHost, pupHost~Pup.nullHost, filler~], whenToSend~BasicTime.GetClockPulses[], timeToLive~sendsToLive] ] };
cH.newPendingEntry ← TRUE; NOTIFY cH.event;
RETURN[NIL] };
Building Request / Reply Packets
MakeRequest:
PROC [cH: Cache, nsHost:
XNS.Host, sendTo: Pup.Host ← Pup.allHosts]
RETURNS [b: Buffer] ~ {
Allocate a buffer, build a request packet in it, and return it.
The sendTo parameter is the Pup Host to which the request packet will be sent. It should be allHosts (broadcast) for a normal request.
bH: TranslationBuffer;
b ← CommDriver.AllocBuffer[];
TRUSTED { bH ← LOOPHOLE[b] };
bH.translationType ← requestType;
bH.replier ← [nsHost~nsHost, pupHost~Pup.nullHost, filler~0];
bH.requestor ← cH.thisHostEntry.hosts;
TRUSTED { bH.ovh.encap ← Encapsulation[ethernetOne[etherSpare1~0, etherSpare2~0, etherSpare3~0, etherSpare4~0, etherSpare5~0, ethernetOneDest~sendTo, ethernetOneSource~cH.thisHostEntry.hosts.pupHost, ethernetOneType~translation]] };
};
ConvertToReply:
PROC [cH: Cache, bH: TranslationBuffer] ~ {
Given a request buffer, convert it to the corresponding reply.
Fill in the encapsulation part here, so the buffer can be sent using network.sendTranslate rather than network.return.
bH.translationType ← replyType;
bH.replier ← cH.thisHostEntry.hosts;
TRUSTED { bH.ovh.encap ← Encapsulation[ethernetOne[etherSpare1~0, etherSpare2~0, etherSpare3~0, etherSpare4~0, etherSpare5~0, ethernetOneDest~bH.requestor.pupHost, ethernetOneSource~cH.thisHostEntry.hosts.pupHost, ethernetOneType~translation]] };
};
Processing Received Translation Packets
AddTranslation:
ENTRY
PROC [cH: Cache, hosts: HostPair] ~ {
eH, prevH: CacheEntry;
i: HashIndex ~ Hash[hosts.nsHost];
IF hosts.pupHost = Pup.allHosts THEN RETURN; -- Sanity check
Look for a pending entry (most likely case).
eH ← cH.pendingEntries; prevH ← NIL;
WHILE eH #
NIL
DO
IF eH.hosts.nsHost = hosts.nsHost
THEN {
IF prevH = NIL THEN cH.pendingEntries ← eH.next ELSE prevH.next ← eH.next;
eH.hosts.pupHost ← hosts.pupHost;
EXIT };
prevH ← eH; eH ← eH.next
ENDLOOP;
If no pending entry, look for a valid one.
IF eH =
NIL
THEN {
eH ← cH.validEntries[i]; prevH ← NIL;
WHILE eH #
NIL
DO
IF eH.hosts.nsHost = hosts.nsHost
THEN {
IF prevH = NIL THEN cH.validEntries[i] ← eH.next ELSE prevH.next ← eH.next;
If existing entry is incorrect, drop it on the floor ...
IF eH.hosts.pupHost # hosts.pupHost THEN eH ← NIL;
EXIT };
prevH ← eH; eH ← eH.next
ENDLOOP;
};
IF eH =
NIL
THEN eH ← NEW[ CacheEntryObject ← [next~, hosts~hosts, whenToSend~, timeToLive~]];
eH.timeToLive ← sweepsToLive;
eH.next ← cH.validEntries[i];
cH.validEntries[i] ← eH;
};
Receive Statistics
requestsReceived: INT ← 0;
repliesReceived: INT ← 0;
tooShort: INT ← 0;
badProtocol: INT ← 0;
RecvTranslation: CommDriver.RecvProc
[network: Network, buffer: Buffer, bytes: NAT] RETURNS [Buffer]
~ {
cH: Cache ~ NARROW[network.xns.translation];
bH: TranslationBuffer;
CROCK: the following test should be "< translationPacketBytes", but some old implementations send reply packets without the requestor field. Eventually, when the old implementations go away, fix it by moving the other CROCK (below) up to this position.
IF bytes < translationShortPacketBytes
THEN {
tooShort ← tooShort.SUCC;
RETURN [buffer] };
TRUSTED { bH ← LOOPHOLE[buffer] };
SELECT
TRUE
FROM
bH.translationType = requestType => {
CROCK: the following test should be moved up to replace the previous CROCK.
IF bytes < translationPacketBytes
THEN {
tooShort ← tooShort.SUCC;
RETURN [buffer] };
IF bH.replier.nsHost = thisHost
THEN {
requestsReceived ← requestsReceived.SUCC;
AddTranslation[cH, bH.requestor];
ConvertToReply[cH, bH];
EnqueueForSending[cH, buffer];
buffer ← NIL;
};
};
bH.translationType = replyType => {
repliesReceived ← repliesReceived.SUCC;
AddTranslation[cH, bH.replier];
};
ENDCASE => {
badProtocol ← badProtocol.SUCC };
RETURN[buffer];
};
Daemon Process
EnqueueForSending:
ENTRY PROC [cH: Cache, b: Buffer] ~ {
IF cH.sendHead = NIL THEN cH.sendHead ← b ELSE cH.sendTail.ovh.next ← b;
cH.sendTail ← b;
b.ovh.next ← NIL;
NOTIFY cH.event };
InternalEnqueueForSending:
INTERNAL
PROC [cH: Cache, b: Buffer] ~ {
IF cH.sendHead = NIL THEN cH.sendHead ← b ELSE cH.sendTail.ovh.next ← b;
cH.sendTail ← b;
b.ovh.next ← NIL;
NOTIFY cH.event };
DequeueForSending:
ENTRY
PROC [cH: Cache]
RETURNS [b: Buffer] ~ {
IF (b ← cH.sendHead) = NIL THEN RETURN;
IF (cH.sendHead ← NARROW[b.ovh.next]) = NIL THEN cH.sendTail ← NIL;
};
InternalSendQueueIsEmpty:
INTERNAL
PROC [cH: Cache]
RETURNS [
BOOL] ~
INLINE {
RETURN [cH.sendHead = NIL] };
WaitAndScanCache:
ENTRY
PROC [cH: Cache] ~ {
eH, prevH: CacheEntry;
IF InternalSendQueueIsEmpty[cH]
AND
NOT cH.newPendingEntry
THEN {
TRUSTED {
IF cH.pendingEntries #
NIL
THEN Process.SetTimeout[@cH.event, resendTimeout]
ELSE Process.SetTimeout[@cH.event, sweepTimeout] };
WAIT cH.event };
prevH ← NIL; eH ← cH.pendingEntries;
WHILE eH #
NIL
DO
IF PulsesSince[eH.whenToSend] >= 0
THEN {
IF eH.timeToLive = 0
THEN {
Delete the entry.
eH ← eH.next;
IF prevH = NIL THEN cH.pendingEntries ← eH ELSE prevH.next ← eH;
LOOP };
{
Send the entry
destHost: Pup.Host ~ IF (eH.timeToLive > (sendsToLive/2)) AND (eH.hosts.pupHost # Pup.nullHost) THEN eH.hosts.pupHost ELSE Pup.allHosts;
buffer: Buffer ~ MakeRequest[cH, eH.hosts.nsHost, destHost];
InternalEnqueueForSending[cH, buffer];
eH.timeToLive ← eH.timeToLive - 1;
eH.whenToSend ← BasicTime.GetClockPulses[] + pulsesPerResend;
};
};
prevH ← eH; eH ← eH.next;
ENDLOOP;
cH.newPendingEntry ← FALSE;
IF PulsesSince[cH.sweepTime] >= pulsesPerSweep
THEN {
Do a sweep ...
FOR i: HashIndex
IN [0..numHashHeaders)
DO
FOR eH ← cH.validEntries[i], eH.next
WHILE eH #
NIL
DO
IF eH.timeToLive > 0 THEN eH.timeToLive ← eH.timeToLive - 1;
ENDLOOP;
ENDLOOP;
cH.sweepTime ← BasicTime.GetClockPulses[];
};
};
Daemon:
PROC [network: Network] ~ {
cH: Cache ~ NARROW[ network.xns.translation ];
buffer: Buffer;
Process.SetPriority[Process.priorityForeground];
DO
WaitAndScanCache[cH];
WHILE (buffer ← DequeueForSending[cH]) #
NIL
DO
network.xns.sendTranslate[network, buffer, translationPacketBytes];
CommDriver.FreeBuffer[buffer];
ENDLOOP;
ENDLOOP;
};
Initialization
Init:
PROC = {
Install a cache (and start a daemon) for each ethernetOne on the chain.
cH: Cache;
FOR network: Network ← CommDriver.GetNetworkChain[], network.next
UNTIL network =
NIL
DO
IF network.type # ethernetOne THEN LOOP;
cH ← NEW[ CacheObject ← [sweepTime~BasicTime.GetClockPulses[]] ];
cH.broadcastHostEntry ← NEW[ CacheEntryObject ← [hosts~[nsHost~XNS.broadcastHost, pupHost~CommDriverType.ethernetOneBroadcastHost, filler~], whenToSend~, timeToLive~] ];
cH.thisHostEntry ← NEW[ CacheEntryObject ← [hosts~[nsHost~thisHost, pupHost~network.pup.host, filler~], whenToSend~, timeToLive~] ];
network.xns.translation ← cH;
network.xns.getEncapsulation ← GetEncapsulation;
CommDriver.InsertReceiveProc[network~network, type~xnsTranslate, proc~RecvTranslation];
cH.daemon ← FORK Daemon[network];
ENDLOOP;
};
TryHost:
PROC [nsHost:
XNS.Host] ~ {
FOR network: Network ← CommDriver.GetNetworkChain[], network.next
UNTIL network =
NIL
DO
IF network.type = ethernetOne
THEN {
[] ← GetEncapsulation[network, nsHost]; EXIT };
ENDLOOP;
};
TryLogan: PROC ~ { TryHost[ [a~02H, b~60H, c~8cH, d~00H, e~34H, f~48H] ] };
TryHubbard: PROC ~ { TryHost[ [a~02H, b~60H, c~8cH, d~10H, e~89H, f~25H] ] };
Try:
PROC [n:
LONG
CARDINAL] =
TRUSTED {
t1: MACHINE DEPENDENT RECORD [b, c: CARDINAL];
t2: MACHINE DEPENDENT RECORD [a, b, c: CARDINAL];
t1 ← LOOPHOLE[n];
t2.a ← 0; t2.b ← t1.c; t2.c ← t1.b;
TryHost[ LOOPHOLE[t2] ];
};
Init[];
}.