DIRECTORY Basics USING [bytesPerWord], BasicTime USING [GetClockPulses, MicrosecondsToPulses, Pulses], CommBuffer USING [Overhead], CommDriver USING [AllocBuffer, Buffer, FreeBuffer, GetNetworkChain, InsertReceiveProc, Network, RecvProc], CommDriverType USING [Encapsulation, ethernetOneBroadcastHost], Process USING [SecondsToTicks, SetPriority, SetTimeout, Ticks], Pup USING [allHosts, Host, nullHost], XNS USING [broadcastHost, GetThisHost, Host]; XNSEthernetOneTranslation: CEDAR MONITOR LOCKS cH USING cH: Cache IMPORTS BasicTime, CommDriver, XNS, Process EXPORTS CommBuffer ~ { BYTE: TYPE ~ [0..100H); bytesPerWord: NAT ~ Basics.bytesPerWord; Buffer: TYPE ~ CommDriver.Buffer; Network: TYPE ~ CommDriver.Network; Encapsulation: PUBLIC TYPE ~ CommDriverType.Encapsulation; -- exported to CommBuffer thisHost: XNS.Host ~ XNS.GetThisHost[]; Pulses: TYPE ~ BasicTime.Pulses; MSecsToPulses: PROC [n: LONG CARDINAL] RETURNS[Pulses] ~ INLINE { RETURN [BasicTime.MicrosecondsToPulses[1000*n]] }; PulsesSince: PROC [then: Pulses] RETURNS[Pulses] ~ INLINE { RETURN [BasicTime.GetClockPulses[] - then] }; TranslationType: TYPE ~ MACHINE DEPENDENT RECORD [a, b: BYTE]; requestType: TranslationType ~ [010H, 041H]; replyType: TranslationType ~ [00eH, 038H]; HostPair: TYPE ~ MACHINE DEPENDENT RECORD [ nsHost: XNS.Host, pupHost: Pup.Host, filler: BYTE]; hostPairBytes: CARDINAL ~ bytesPerWord*SIZE[HostPair]; -- Should be BYTES[HostPair] TranslationPacketObject: TYPE ~ MACHINE DEPENDENT RECORD [ translationType: TranslationType, replier: HostPair, requestor: HostPair]; translationPacketBytes: CARDINAL ~ bytesPerWord*SIZE[TranslationPacketObject]; -- Should be BYTES[TranslationPacketObject]; Must be even! translationShortPacketBytes: CARDINAL ~ translationPacketBytes - hostPairBytes; -- CROCK: some old implementations send reply packets without the requestor field TranslationBuffer: TYPE ~ REF TranslationBufferObject; TranslationBufferObject: TYPE ~ MACHINE DEPENDENT RECORD [ ovh: CommBuffer.Overhead, translationType: TranslationType, replier: HostPair, requestor: HostPair]; numHashHeaders: CARDINAL ~ 101; HashIndex: TYPE ~ [0..numHashHeaders); Hash: PROC [nsHost: XNS.Host] RETURNS [HashIndex] ~ INLINE { RETURN [ nsHost.f MOD numHashHeaders ] }; Cache: TYPE ~ REF CacheObject; CacheObject: TYPE ~ MONITORED RECORD [ daemon: PROCESS, event: CONDITION, newPendingEntry: BOOL _ FALSE, sweepCheckTime: Pulses _ 0, sweepChecksUntilSweep: CARDINAL _ sweepChecksPerSweep, sendHead, sendTail: Buffer, broadcastHostEntry: CacheEntry, thisHostEntry: CacheEntry, pendingEntries: CacheEntry, validEntries: ARRAY HashIndex OF CacheEntry]; CacheEntry: TYPE ~ REF CacheEntryObject; CacheEntryObject: TYPE ~ RECORD [ next: CacheEntry, hosts: HostPair, referenced: BOOL, timeStamp: Pulses, tries: CARDINAL ]; pulsesPerSweepCheck: Pulses ~ MSecsToPulses[59500]; sweepCheckTimeout: Process.Ticks ~ Process.SecondsToTicks[60]; sweepChecksPerSweep: CARDINAL ~ 5; pulsesPerResend: Pulses ~ MSecsToPulses[1500]; resendTimeout: Process.Ticks _ Process.SecondsToTicks[1]; maxTries: CARDINAL ~ 8; noTranslation: INT _ 0; notQuick: INT _ 0; GetEncapsulation: PROC [network: Network, nsHost: XNS.Host] RETURNS [Encapsulation] ~ { cH: Cache ~ NARROW[network.xns.translation]; eH: CacheEntry _ NIL; hashIndex: HashIndex ~ Hash[nsHost]; BEGIN IF (eH _ cH.validEntries[hashIndex]) # NIL THEN { IF eH.hosts.nsHost = nsHost THEN GOTO Found; IF (eH _ eH.next) # NIL THEN { IF eH.hosts.nsHost = nsHost THEN GOTO Found; NULL; -- more checks would go here ... }; }; IF nsHost = XNS.broadcastHost THEN { eH _ cH.broadcastHostEntry; GOTO Found }; IF nsHost = thisHost THEN { eH _ cH.thisHostEntry; GOTO Found }; notQuick _ notQuick.SUCC; IF (eH _ GetCacheEntry[cH, hashIndex, nsHost]) # NIL THEN GOTO Found; GOTO NotFound; EXITS Found => { eH.referenced _ TRUE; TRUSTED { RETURN[ [ethernetOne[etherSpare1~0, etherSpare2~0, etherSpare3~0, etherSpare4~0, etherSpare5~0, ethernetOneDest~eH.hosts.pupHost, ethernetOneSource~network.pup.host, ethernetOneType~xns]] ] } }; NotFound => { noTranslation _ noTranslation.SUCC; TRUSTED { RETURN[ [ethernetOne[etherSpare1~0, etherSpare2~0, etherSpare3~0, etherSpare4~0, etherSpare5~0, ethernetOneDest~Pup.nullHost, ethernetOneSource~network.pup.host, ethernetOneType~translationFailed]] ] } }; END; }; GetCacheEntry: ENTRY PROC [cH: Cache, hashIndex: HashIndex, nsHost: XNS.Host] RETURNS [CacheEntry] ~ { eH, prevH: CacheEntry; eH _ cH.validEntries[hashIndex]; prevH _ NIL; WHILE eH # NIL DO IF eH.hosts.nsHost = nsHost THEN { IF prevH # NIL THEN { prevH.next _ eH.next; eH.next _ cH.validEntries[hashIndex]; cH.validEntries[hashIndex] _ eH }; RETURN[eH] }; prevH _ eH; eH _ eH.next ENDLOOP; FOR eH _ cH.pendingEntries, eH.next WHILE eH # NIL DO IF eH.hosts.nsHost = nsHost THEN RETURN[NIL]; ENDLOOP; TRUSTED { cH.pendingEntries _ NEW[ CacheEntryObject _ [next~cH.pendingEntries, hosts~[nsHost~nsHost, pupHost~Pup.allHosts, filler~], referenced~TRUE, timeStamp~BasicTime.GetClockPulses[], tries~0] ] }; cH.newPendingEntry _ TRUE; NOTIFY cH.event; RETURN[NIL] }; MakeRequest: PROC [cH: Cache, nsHost: XNS.Host, sendTo: Pup.Host _ Pup.allHosts] RETURNS [b: Buffer] ~ { bH: TranslationBuffer; b _ CommDriver.AllocBuffer[]; TRUSTED { bH _ LOOPHOLE[b] }; bH.translationType _ requestType; bH.replier _ [nsHost~nsHost, pupHost~Pup.nullHost, filler~0]; bH.requestor _ cH.thisHostEntry.hosts; TRUSTED { bH.ovh.encap _ Encapsulation[ethernetOne[etherSpare1~0, etherSpare2~0, etherSpare3~0, etherSpare4~0, etherSpare5~0, ethernetOneDest~sendTo, ethernetOneSource~cH.thisHostEntry.hosts.pupHost, ethernetOneType~translation]] }; }; ConvertToReply: PROC [cH: Cache, bH: TranslationBuffer] ~ { bH.translationType _ replyType; bH.replier _ cH.thisHostEntry.hosts; TRUSTED { bH.ovh.encap _ Encapsulation[ethernetOne[etherSpare1~0, etherSpare2~0, etherSpare3~0, etherSpare4~0, etherSpare5~0, ethernetOneDest~bH.requestor.pupHost, ethernetOneSource~cH.thisHostEntry.hosts.pupHost, ethernetOneType~translation]] }; }; AddTranslation: ENTRY PROC [cH: Cache, hosts: HostPair] ~ { eH, prevH: CacheEntry; i: HashIndex ~ Hash[hosts.nsHost]; eH _ cH.pendingEntries; prevH _ NIL; WHILE eH # NIL DO IF eH.hosts.nsHost = hosts.nsHost THEN { IF prevH = NIL THEN cH.pendingEntries _ eH.next ELSE prevH.next _ eH.next; eH.hosts.pupHost _ hosts.pupHost; EXIT }; prevH _ eH; eH _ eH.next ENDLOOP; IF eH = NIL THEN { eH _ cH.validEntries[i]; prevH _ NIL; WHILE eH # NIL DO IF eH.hosts.nsHost = hosts.nsHost THEN { IF prevH = NIL THEN cH.validEntries[i] _ eH.next ELSE prevH.next _ eH.next; IF eH.hosts.pupHost # hosts.pupHost THEN eH _ NIL; EXIT }; prevH _ eH; eH _ eH.next ENDLOOP; }; IF eH = NIL THEN cH.validEntries[i] _ NEW[ CacheEntryObject _ [next~cH.validEntries[i], hosts~hosts, referenced~TRUE, timeStamp~BasicTime.GetClockPulses[], tries~0] ] ELSE { eH.next _ cH.validEntries[i]; cH.validEntries[i] _ eH }; }; requestsReceived: INT _ 0; repliesReceived: INT _ 0; tooShort: INT _ 0; badProtocol: INT _ 0; RecvTranslation: CommDriver.RecvProc ~ { cH: Cache ~ NARROW[network.xns.translation]; bH: TranslationBuffer; IF bytes < translationShortPacketBytes THEN { tooShort _ tooShort.SUCC; RETURN [buffer] }; TRUSTED { bH _ LOOPHOLE[buffer] }; SELECT TRUE FROM bH.translationType = requestType => { IF bytes < translationPacketBytes THEN { tooShort _ tooShort.SUCC; RETURN [buffer] }; IF bH.replier.nsHost = thisHost THEN { requestsReceived _ requestsReceived.SUCC; AddTranslation[cH, bH.requestor]; ConvertToReply[cH, bH]; EnqueueForSending[cH, buffer]; buffer _ NIL; }; }; bH.translationType = replyType => { repliesReceived _ repliesReceived.SUCC; AddTranslation[cH, bH.replier]; }; ENDCASE => { badProtocol _ badProtocol.SUCC }; RETURN[buffer]; }; EnqueueForSending: ENTRY PROC [cH: Cache, b: Buffer] ~ { IF cH.sendHead = NIL THEN cH.sendHead _ b ELSE cH.sendTail.ovh.next _ b; cH.sendTail _ b; b.ovh.next _ NIL; NOTIFY cH.event }; InternalEnqueueForSending: INTERNAL PROC [cH: Cache, b: Buffer] ~ { IF cH.sendHead = NIL THEN cH.sendHead _ b ELSE cH.sendTail.ovh.next _ b; cH.sendTail _ b; b.ovh.next _ NIL; NOTIFY cH.event }; DequeueForSending: ENTRY PROC [cH: Cache] RETURNS [b: Buffer] ~ { IF (b _ cH.sendHead) = NIL THEN RETURN; IF (cH.sendHead _ NARROW[b.ovh.next]) = NIL THEN cH.sendTail _ NIL; }; InternalSendQueueIsEmpty: INTERNAL PROC [cH: Cache] RETURNS [BOOL] ~ INLINE { RETURN [cH.sendHead = NIL] }; WaitAndScanCache: ENTRY PROC [cH: Cache] ~ { eH, prevH: CacheEntry; IF InternalSendQueueIsEmpty[cH] AND NOT cH.newPendingEntry THEN { TRUSTED { IF cH.pendingEntries # NIL THEN Process.SetTimeout[@cH.event, resendTimeout] ELSE Process.SetTimeout[@cH.event, sweepCheckTimeout] }; WAIT cH.event }; prevH _ NIL; eH _ cH.pendingEntries; WHILE eH # NIL DO IF PulsesSince[eH.timeStamp] >= pulsesPerResend THEN { IF eH.tries >= maxTries THEN { eH _ eH.next; IF prevH = NIL THEN cH.pendingEntries _ eH ELSE prevH.next _ eH; LOOP }; eH.tries _ eH.tries + 1; eH.timeStamp _ BasicTime.GetClockPulses[]; { buffer: Buffer ~ MakeRequest[cH, eH.hosts.nsHost]; InternalEnqueueForSending[cH, buffer] }; }; prevH _ eH; eH _ eH.next; ENDLOOP; cH.newPendingEntry _ FALSE; IF PulsesSince[cH.sweepCheckTime] >= pulsesPerSweepCheck THEN { cH.sweepCheckTime _ BasicTime.GetClockPulses[]; IF cH.sweepChecksUntilSweep = 0 THEN { FOR i: HashIndex IN [0..numHashHeaders) DO eH _ cH.validEntries[i]; prevH _ NIL; WHILE eH # NIL DO IF eH.referenced THEN { eH.referenced _ FALSE; prevH _ eH; eH _ eH.next } ELSE { eH _ eH.next; IF prevH = NIL THEN cH.validEntries[i] _ eH ELSE prevH.next _ eH }; ENDLOOP; ENDLOOP; cH.sweepChecksUntilSweep _ sweepChecksPerSweep; } ELSE { cH.sweepChecksUntilSweep _ cH.sweepChecksUntilSweep - 1; }; }; }; Daemon: PROC [network: Network] ~ { cH: Cache ~ NARROW[ network.xns.translation ]; buffer: Buffer; Process.SetPriority[3]; -- ??? DO WaitAndScanCache[cH]; WHILE (buffer _ DequeueForSending[cH]) # NIL DO network.xns.sendTranslate[network, buffer, translationPacketBytes]; CommDriver.FreeBuffer[buffer]; ENDLOOP; ENDLOOP; }; Init: PROC = { cH: Cache; FOR network: Network _ CommDriver.GetNetworkChain[], network.next UNTIL network = NIL DO IF network.type # ethernetOne THEN LOOP; cH _ NEW[ CacheObject _ [] ]; cH.broadcastHostEntry _ NEW[ CacheEntryObject _ [hosts~[nsHost~XNS.broadcastHost, pupHost~CommDriverType.ethernetOneBroadcastHost, filler~], referenced~, timeStamp~, tries~] ]; cH.thisHostEntry _ NEW[ CacheEntryObject _ [hosts~[nsHost~thisHost, pupHost~network.pup.host, filler~], referenced~, timeStamp~, tries~] ]; network.xns.translation _ cH; network.xns.getEncapsulation _ GetEncapsulation; CommDriver.InsertReceiveProc[network~network, type~xnsTranslate, proc~RecvTranslation]; cH.daemon _ FORK Daemon[network]; ENDLOOP; }; TryHost: PROC [nsHost: XNS.Host] ~ { FOR network: Network _ CommDriver.GetNetworkChain[], network.next UNTIL network = NIL DO IF network.type = ethernetOne THEN { [] _ GetEncapsulation[network, nsHost]; EXIT }; ENDLOOP; }; TryLogan: PROC ~ { TryHost[ [a~02H, b~60H, c~8cH, d~00H, e~34H, f~48H] ] }; TryHubbard: PROC ~ { TryHost[ [a~02H, b~60H, c~8cH, d~10H, e~89H, f~25H] ] }; Try: PROC [n: LONG CARDINAL] = TRUSTED { t1: MACHINE DEPENDENT RECORD [b, c: CARDINAL]; t2: MACHINE DEPENDENT RECORD [a, b, c: CARDINAL]; t1 _ LOOPHOLE[n]; t2.a _ 0; t2.b _ t1.c; t2.c _ t1.b; TryHost[ LOOPHOLE[t2] ]; }; Init[]; }. `XNSEthernetOneTranslation.mesa Copyright c 1986 by Xerox Corporation. All rights reserved. Demers, June 5, 1986 10:38:20 pm PDT Very freely adapted from translation code in: Cedar6.0>OISCP>EthernetOneDriver.mesa Birrell on: 9-Oct-81 16:43:32 BLyon on: March 13, 1981 10:47 PM Levin, August 9, 1983 9:28 am Russ Atkinson (RRA) February 19, 1985 7:44:43 pm PST Time Translation Request / Reply Packets Translation Entry Cache Timeouts Encapsulating NS Packets Statistics Quick check of first couple of entries without acquiring ML. Search for a valid cache entry for the given nsHost. If a valid entry is found, return it; otherwise return NIL and arrange for an entry to be added. Move entry to head of list. Building Request / Reply Packets Allocate a buffer, build a request packet in it, and return it. The sendTo parameter is the Pup Host to which the request packet will be sent. It should be allHosts (broadcast) for a normal request. Given a request buffer, convert it to the corresponding reply. Fill in the encapsulation part here, so the buffer can be sent using network.sendTranslate rather than network.return. Processing Received Translation Packets Look for a pending entry. If no pending entry, look for a valid one. If existing entry is incorrect, drop it on the floor ... Receive Statistics [network: Network, buffer: Buffer, bytes: NAT] RETURNS [Buffer] CROCK: the following test should be "< translationPacketBytes", but some old implementations send reply packets without the requestor field. Eventually, when the old implementations go away, fix it by moving the other CROCK (below) up to this position. CROCK: the following test should be moved up to replace the previous CROCK. Daemon Process Delete the entry. Do a sweep check ... Do a sweep ... Delete the entry. Initialization Install a cache (and start a daemon) for each ethernetOne on the chain. Κ8˜codešœ™Kšœ Οmœ1™K˜Kšœ,˜,Kšœ*˜*K˜š œ žœžœž œžœ˜+Kšœžœ˜K˜Kšœžœ˜K˜—šœžœžœ  ˜SK˜—šœ žœž œžœ˜:Kšœ!˜!K˜K˜K˜—Kšœžœžœ :˜‰šœžœ+ Q˜‘K˜—Kšœžœžœ˜6š œžœžœž œžœ˜:K˜Kšœ!˜!K˜K˜K˜——™Kšœžœ˜Kšœ žœ˜&K˜š ‘œžœ žœžœžœ˜˜>Kšœžœ˜"K˜Kšœ.˜.Kšœ9˜9K˜Kšœ žœ˜K˜—™™ Kšœžœ˜Kšœ žœ˜—K˜š‘œžœžœžœ˜WKšœ žœ˜,Kšœžœ˜Kšœ$˜$K˜šž˜K™<šžœ%žœžœ˜1Kšžœžœžœ˜,šžœžœžœ˜Kšžœžœžœ˜,Kšžœ !˜'K˜—K˜—Kšžœ žœžœžœ ˜NKšžœžœžœ ˜@Kšœžœ˜Kšžœ/žœžœžœ˜EKšžœ ˜K˜šž˜šœ ˜ Kšœžœ˜KšžœžœΉ˜ΙK˜—šœ ˜ Kšœžœ˜#KšžœžœΓ˜ΣK˜——Kšžœ˜—K˜K˜—K˜š ‘ œžœžœ+žœžœ˜fK™–K˜Kšœ˜K˜Kšœ*žœ˜.šžœžœž˜šžœžœ˜"K™šžœ žœžœ˜K˜Kšœ%˜%Kšœ"˜"—Kšžœ˜ —K˜Kšž˜—K˜šžœ!žœžœž˜5Kšžœžœžœžœ˜-Kšžœ˜—K˜Kšžœžœožœ5˜ΙKšœžœžœ ˜,Kšžœžœ˜K˜——™ š‘ œžœžœ(žœ˜hK™?K™‡K˜Kšœ˜Kšžœžœ˜Kšœ!˜!Kšœ=˜=K˜&Kšžœα˜θK˜—K˜š‘œžœ'˜;K™>K™vKšœ˜K˜$Kšžœο˜φK˜K˜——™'š‘œžœžœ!˜;K˜Kšœ"˜"K˜K™Kšœ!žœ˜%šžœžœž˜šœ"žœ˜(Kšžœ žœžœžœ˜JK˜!Kšžœ˜—K˜Kšžœ˜—K˜K™*šžœžœžœ˜Kšœ"žœ˜&šžœžœž˜šžœ žœ˜(Kšžœ žœžœžœ˜K™8Kšžœ"žœžœ˜2—Kšžœ˜—K˜Kšžœ˜—K˜—K˜šžœž˜ KšžœžœGžœ2˜šKšžœ<˜@—K˜K˜—K˜™Kšœžœ˜Kšœžœ˜Kšœ žœ˜Kšœ žœ˜—K˜š‘œ˜$Kšœ*žœžœ ™?Kšœ˜Kšœ žœ˜,K˜K˜šœI ΄™ύšžœ%žœ˜-Kšœžœ˜Kšžœ ˜——K˜Kšžœžœ ˜"šžœžœž˜šœ%˜%™Kšžœ žœ˜(Kšœžœ˜Kšžœ ˜——šžœžœ˜&Kšœ$žœ˜)Kšœ!˜!K˜K˜Kšœ žœ˜ K˜—Kšœ˜—šœ#˜#Kšœ"žœ˜'Kšœ˜K˜—šžœ˜ Kšœžœ˜!——Kšžœ ˜K˜K˜——™š‘œž œ˜8Kšžœžœžœžœ˜HKšœ˜Kšœ ž˜Kšžœ ˜K˜—š‘œžœžœ˜CKšžœžœžœžœ˜HKšœ˜Kšœ ž˜Kšžœ ˜K˜—š‘œžœžœ žœ˜AKšžœžœžœžœ˜'Kš žœžœžœžœžœ˜CK˜K˜—š ‘œžœžœ žœžœžœ˜MKšžœžœ˜—K˜K˜š‘œžœžœ˜,K˜K˜šžœžœžœžœ˜Ašžœžœž˜$Kšžœ-˜1Kšžœ4˜8—Kšžœ ˜—K˜Kšœžœ˜%šžœžœž˜šžœ.žœ˜6šžœžœž˜K™K˜ Kšžœ žœžœžœ˜@Kšžœ˜—K˜Kšœ*˜*šœ4˜4Kšœ(˜(—K˜—K˜Kšžœ˜—Kšœžœ˜K˜šžœ7žœ˜?K™Kšœ/˜/šžœ˜šžœ˜K™šžœžœž˜*Kšœ"žœ˜&šžœžœž˜šžœ˜šžœ˜Kšœžœ˜K˜—šžœ˜K™K˜ Kšžœ žœžœžœ˜C——Kšžœ˜—Kšžœ˜—Kšœ/˜/K˜—šžœ˜Kšœ8˜8K˜——K˜—K˜Kšœ˜K˜K˜—š‘œžœ˜#Kšœ žœ˜.K˜Kšœ ˜šž˜K˜šžœ$žœž˜/KšœC˜CKšœ˜Kšžœ˜—Kšžœ˜—Kšœ˜K˜——™š‘œžœ˜K™GK˜ K˜šžœ?žœ žœž˜XKšžœžœžœ˜(Kšœžœ˜Kšœžœ$žœn˜°Kšœžœu˜‹K˜Kšœ0˜0KšœW˜WKšœ žœ˜!Kšžœ˜—K˜K˜—K˜—š‘œžœ žœ ˜$šžœ?žœ žœž˜Xšžœžœ˜$Kšœ)žœ˜0—Kšžœ˜—K˜K˜—Kš‘œžœ=˜KK˜Kš‘ œžœ=˜MK˜š ‘œžœžœžœžœ˜(Kš œžœž œžœžœ˜.Kš œžœž œžœ žœ˜1Kšœžœ˜K˜%Kšœ žœ˜K˜—K˜K˜K˜Kšœ˜—J˜—…—,¨B@