DIRECTORY Basics USING [bytesPerWord], BasicTime USING [GetClockPulses, MicrosecondsToPulses, Pulses], CommBuffer USING [Overhead], CommDriver USING [AllocBuffer, Buffer, FreeBuffer, GetNetworkChain, InsertReceiveProc, Network, RecvProc], CommDriverType USING [Encapsulation, ethernetOneBroadcastHost], Process USING [MsecToTicks, priorityForeground, SecondsToTicks, SetPriority, SetTimeout, Ticks], Pup USING [allHosts, Host, nullHost], XNS USING [broadcastHost, GetThisHost, Host]; XNSEthernetOneTranslation: CEDAR MONITOR LOCKS cH USING cH: Cache IMPORTS BasicTime, CommDriver, XNS, Process EXPORTS CommBuffer ~ { BYTE: TYPE ~ [0..100H); bytesPerWord: NAT ~ Basics.bytesPerWord; Buffer: TYPE ~ CommDriver.Buffer; Network: TYPE ~ CommDriver.Network; Encapsulation: PUBLIC TYPE ~ CommDriverType.Encapsulation; -- exported to CommBuffer thisHost: XNS.Host ~ XNS.GetThisHost[]; Pulses: TYPE ~ BasicTime.Pulses; MSecsToPulses: PROC [n: CARD] RETURNS[Pulses] ~ INLINE { RETURN [BasicTime.MicrosecondsToPulses[1000*n]] }; PulsesSince: PROC [then: Pulses] RETURNS[Pulses] ~ INLINE { RETURN [BasicTime.GetClockPulses[] - then] }; TranslationType: TYPE ~ MACHINE DEPENDENT RECORD [a, b: BYTE]; requestType: TranslationType ~ [010H, 041H]; replyType: TranslationType ~ [00eH, 038H]; HostPair: TYPE ~ MACHINE DEPENDENT RECORD [ nsHost: XNS.Host, pupHost: Pup.Host, filler: BYTE]; hostPairBytes: CARDINAL ~ bytesPerWord*SIZE[HostPair]; -- Should be BYTES[HostPair] TranslationPacketObject: TYPE ~ MACHINE DEPENDENT RECORD [ translationType: TranslationType, replier: HostPair, requestor: HostPair]; translationPacketBytes: CARDINAL ~ bytesPerWord*SIZE[TranslationPacketObject]; -- Should be BYTES[TranslationPacketObject]; Must be even! translationShortPacketBytes: CARDINAL ~ translationPacketBytes - hostPairBytes; -- CROCK: some old implementations send reply packets without the requestor field TranslationBuffer: TYPE ~ REF TranslationBufferObject; TranslationBufferObject: TYPE ~ MACHINE DEPENDENT RECORD [ ovh: CommBuffer.Overhead, translationType: TranslationType, replier: HostPair, requestor: HostPair]; numHashHeaders: CARDINAL ~ 101; HashIndex: TYPE ~ [0..numHashHeaders); Hash: PROC [nsHost: XNS.Host] RETURNS [HashIndex] ~ INLINE { RETURN [ nsHost.f MOD numHashHeaders ] }; Cache: TYPE ~ REF CacheObject; CacheObject: TYPE ~ MONITORED RECORD [ daemon: PROCESS, event: CONDITION, newPendingEntry: BOOL _ FALSE, sweepTime: Pulses, sendHead, sendTail: Buffer, broadcastHostEntry: CacheEntry, thisHostEntry: CacheEntry, pendingEntries: CacheEntry, validEntries: ARRAY HashIndex OF CacheEntry]; CacheEntry: TYPE ~ REF CacheEntryObject; CacheEntryObject: TYPE ~ RECORD [ next: CacheEntry, hosts: HostPair, whenToSend: Pulses, timeToLive: CARDINAL ]; UpToDate: PROC [eH: CacheEntry] RETURNS [BOOL] ~ INLINE { RETURN [eH.timeToLive > 0] }; pulsesPerSweep: Pulses _ MSecsToPulses[19000]; sweepTimeout: Process.Ticks _ Process.SecondsToTicks[20]; sweepsToLive: CARDINAL _ 6; pulsesPerResend: Pulses _ MSecsToPulses[230]; resendTimeout: Process.Ticks _ Process.MsecToTicks[250]; sendsToLive: CARDINAL _ 8; noTranslation: INT _ 0; notQuick: INT _ 0; GetEncapsulation: PROC [network: Network, nsHost: XNS.Host] RETURNS [Encapsulation] ~ { cH: Cache ~ NARROW[network.xns.translation]; eH: CacheEntry _ NIL; hashIndex: HashIndex ~ Hash[nsHost]; BEGIN IF (eH _ cH.validEntries[hashIndex]) # NIL THEN { IF (eH.hosts.nsHost = nsHost) AND UpToDate[eH] THEN GOTO Found; IF (eH _ eH.next) # NIL THEN { IF (eH.hosts.nsHost = nsHost) AND UpToDate[eH] THEN GOTO Found; NULL; -- more checks would go here ... }; }; IF nsHost = XNS.broadcastHost THEN { eH _ cH.broadcastHostEntry; GOTO Found }; IF nsHost = thisHost THEN { eH _ cH.thisHostEntry; GOTO Found }; notQuick _ notQuick.SUCC; IF (eH _ GetCacheEntry[cH, hashIndex, nsHost]) # NIL THEN GOTO Found; GOTO NotFound; EXITS Found => { TRUSTED { RETURN[ [ethernetOne[etherSpare1~0, etherSpare2~0, etherSpare3~0, etherSpare4~0, etherSpare5~0, ethernetOneDest~eH.hosts.pupHost, ethernetOneSource~network.pup.host, ethernetOneType~xns]] ] } }; NotFound => { noTranslation _ noTranslation.SUCC; TRUSTED { RETURN[ [ethernetOne[etherSpare1~0, etherSpare2~0, etherSpare3~0, etherSpare4~0, etherSpare5~0, ethernetOneDest~Pup.nullHost, ethernetOneSource~network.pup.host, ethernetOneType~translationFailed]] ] } }; END; }; GetCacheEntry: ENTRY PROC [cH: Cache, hashIndex: HashIndex, nsHost: XNS.Host] RETURNS [result: CacheEntry] ~ { eH, prevH: CacheEntry; eH _ cH.validEntries[hashIndex]; prevH _ NIL; WHILE eH # NIL DO IF eH.hosts.nsHost = nsHost THEN { IF UpToDate[eH] THEN { IF prevH # NIL THEN { prevH.next _ eH.next; eH.next _ cH.validEntries[hashIndex]; cH.validEntries[hashIndex] _ eH }; RETURN[eH] } ELSE { IF prevH # NIL THEN prevH.next _ eH.next ELSE cH.validEntries[hashIndex] _ eH.next; eH.timeToLive _ sendsToLive; eH.whenToSend _ BasicTime.GetClockPulses[]; eH.next _ cH.pendingEntries; cH.pendingEntries _ eH; cH.newPendingEntry _ TRUE; NOTIFY cH.event; RETURN[eH] }; }; prevH _ eH; eH _ eH.next ENDLOOP; FOR eH _ cH.pendingEntries, eH.next WHILE eH # NIL DO IF eH.hosts.nsHost = nsHost THEN RETURN [IF eH.hosts.pupHost # Pup.nullHost THEN eH ELSE NIL]; ENDLOOP; TRUSTED { cH.pendingEntries _ NEW[ CacheEntryObject _ [next~cH.pendingEntries, hosts~[nsHost~nsHost, pupHost~Pup.nullHost, filler~], whenToSend~BasicTime.GetClockPulses[], timeToLive~sendsToLive] ] }; cH.newPendingEntry _ TRUE; NOTIFY cH.event; RETURN[NIL] }; MakeRequest: PROC [cH: Cache, nsHost: XNS.Host, sendTo: Pup.Host _ Pup.allHosts] RETURNS [b: Buffer] ~ { bH: TranslationBuffer; b _ CommDriver.AllocBuffer[]; TRUSTED { bH _ LOOPHOLE[b] }; bH.translationType _ requestType; bH.replier _ [nsHost~nsHost, pupHost~Pup.nullHost, filler~0]; bH.requestor _ cH.thisHostEntry.hosts; TRUSTED { bH.ovh.encap _ Encapsulation[ethernetOne[etherSpare1~0, etherSpare2~0, etherSpare3~0, etherSpare4~0, etherSpare5~0, ethernetOneDest~sendTo, ethernetOneSource~cH.thisHostEntry.hosts.pupHost, ethernetOneType~translation]] }; }; ConvertToReply: PROC [cH: Cache, bH: TranslationBuffer] ~ { bH.translationType _ replyType; bH.replier _ cH.thisHostEntry.hosts; TRUSTED { bH.ovh.encap _ Encapsulation[ethernetOne[etherSpare1~0, etherSpare2~0, etherSpare3~0, etherSpare4~0, etherSpare5~0, ethernetOneDest~bH.requestor.pupHost, ethernetOneSource~cH.thisHostEntry.hosts.pupHost, ethernetOneType~translation]] }; }; AddTranslation: ENTRY PROC [cH: Cache, hosts: HostPair] ~ { eH, prevH: CacheEntry; i: HashIndex ~ Hash[hosts.nsHost]; IF hosts.pupHost = Pup.allHosts THEN RETURN; -- Sanity check eH _ cH.pendingEntries; prevH _ NIL; WHILE eH # NIL DO IF eH.hosts.nsHost = hosts.nsHost THEN { IF prevH = NIL THEN cH.pendingEntries _ eH.next ELSE prevH.next _ eH.next; eH.hosts.pupHost _ hosts.pupHost; EXIT }; prevH _ eH; eH _ eH.next ENDLOOP; IF eH = NIL THEN { eH _ cH.validEntries[i]; prevH _ NIL; WHILE eH # NIL DO IF eH.hosts.nsHost = hosts.nsHost THEN { IF prevH = NIL THEN cH.validEntries[i] _ eH.next ELSE prevH.next _ eH.next; IF eH.hosts.pupHost # hosts.pupHost THEN eH _ NIL; EXIT }; prevH _ eH; eH _ eH.next ENDLOOP; }; IF eH = NIL THEN eH _ NEW[ CacheEntryObject _ [next~, hosts~hosts, whenToSend~, timeToLive~]]; eH.timeToLive _ sweepsToLive; eH.next _ cH.validEntries[i]; cH.validEntries[i] _ eH; }; requestsReceived: INT _ 0; repliesReceived: INT _ 0; tooShort: INT _ 0; badProtocol: INT _ 0; RecvTranslation: CommDriver.RecvProc ~ { cH: Cache ~ NARROW[network.xns.translation]; bH: TranslationBuffer; IF bytes < translationShortPacketBytes THEN { tooShort _ tooShort.SUCC; RETURN [buffer] }; TRUSTED { bH _ LOOPHOLE[buffer] }; SELECT TRUE FROM bH.translationType = requestType => { IF bytes < translationPacketBytes THEN { tooShort _ tooShort.SUCC; RETURN [buffer] }; IF bH.replier.nsHost = thisHost THEN { requestsReceived _ requestsReceived.SUCC; AddTranslation[cH, bH.requestor]; ConvertToReply[cH, bH]; EnqueueForSending[cH, buffer]; buffer _ NIL; }; }; bH.translationType = replyType => { repliesReceived _ repliesReceived.SUCC; AddTranslation[cH, bH.replier]; }; ENDCASE => { badProtocol _ badProtocol.SUCC }; RETURN[buffer]; }; EnqueueForSending: ENTRY PROC [cH: Cache, b: Buffer] ~ { IF cH.sendHead = NIL THEN cH.sendHead _ b ELSE cH.sendTail.ovh.next _ b; cH.sendTail _ b; b.ovh.next _ NIL; NOTIFY cH.event }; InternalEnqueueForSending: INTERNAL PROC [cH: Cache, b: Buffer] ~ { IF cH.sendHead = NIL THEN cH.sendHead _ b ELSE cH.sendTail.ovh.next _ b; cH.sendTail _ b; b.ovh.next _ NIL; NOTIFY cH.event }; DequeueForSending: ENTRY PROC [cH: Cache] RETURNS [b: Buffer] ~ { IF (b _ cH.sendHead) = NIL THEN RETURN; IF (cH.sendHead _ NARROW[b.ovh.next]) = NIL THEN cH.sendTail _ NIL; }; InternalSendQueueIsEmpty: INTERNAL PROC [cH: Cache] RETURNS [BOOL] ~ INLINE { RETURN [cH.sendHead = NIL] }; WaitAndScanCache: ENTRY PROC [cH: Cache] ~ { eH, prevH: CacheEntry; IF InternalSendQueueIsEmpty[cH] AND NOT cH.newPendingEntry THEN { TRUSTED { IF cH.pendingEntries # NIL THEN Process.SetTimeout[@cH.event, resendTimeout] ELSE Process.SetTimeout[@cH.event, sweepTimeout] }; WAIT cH.event }; prevH _ NIL; eH _ cH.pendingEntries; WHILE eH # NIL DO IF PulsesSince[eH.whenToSend] >= 0 THEN { IF eH.timeToLive = 0 THEN { eH _ eH.next; IF prevH = NIL THEN cH.pendingEntries _ eH ELSE prevH.next _ eH; LOOP }; { destHost: Pup.Host ~ IF (eH.timeToLive > (sendsToLive/2)) AND (eH.hosts.pupHost # Pup.nullHost) THEN eH.hosts.pupHost ELSE Pup.allHosts; buffer: Buffer ~ MakeRequest[cH, eH.hosts.nsHost, destHost]; InternalEnqueueForSending[cH, buffer]; eH.timeToLive _ eH.timeToLive - 1; eH.whenToSend _ BasicTime.GetClockPulses[] + pulsesPerResend; }; }; prevH _ eH; eH _ eH.next; ENDLOOP; cH.newPendingEntry _ FALSE; IF PulsesSince[cH.sweepTime] >= pulsesPerSweep THEN { FOR i: HashIndex IN [0..numHashHeaders) DO FOR eH _ cH.validEntries[i], eH.next WHILE eH # NIL DO IF eH.timeToLive > 0 THEN eH.timeToLive _ eH.timeToLive - 1; ENDLOOP; ENDLOOP; cH.sweepTime _ BasicTime.GetClockPulses[]; }; }; Daemon: PROC [network: Network] ~ { cH: Cache ~ NARROW[ network.xns.translation ]; buffer: Buffer; Process.SetPriority[Process.priorityForeground]; DO WaitAndScanCache[cH]; WHILE (buffer _ DequeueForSending[cH]) # NIL DO network.xns.sendTranslate[network, buffer, translationPacketBytes]; CommDriver.FreeBuffer[buffer]; ENDLOOP; ENDLOOP; }; Init: PROC = { cH: Cache; FOR network: Network _ CommDriver.GetNetworkChain[], network.next UNTIL network = NIL DO IF network.type # ethernetOne THEN LOOP; cH _ NEW[ CacheObject _ [sweepTime~BasicTime.GetClockPulses[]] ]; cH.broadcastHostEntry _ NEW[ CacheEntryObject _ [hosts~[nsHost~XNS.broadcastHost, pupHost~CommDriverType.ethernetOneBroadcastHost, filler~], whenToSend~, timeToLive~] ]; cH.thisHostEntry _ NEW[ CacheEntryObject _ [hosts~[nsHost~thisHost, pupHost~network.pup.host, filler~], whenToSend~, timeToLive~] ]; network.xns.translation _ cH; network.xns.getEncapsulation _ GetEncapsulation; CommDriver.InsertReceiveProc[network~network, type~xnsTranslate, proc~RecvTranslation]; cH.daemon _ FORK Daemon[network]; ENDLOOP; }; TryHost: PROC [nsHost: XNS.Host] ~ { FOR network: Network _ CommDriver.GetNetworkChain[], network.next UNTIL network = NIL DO IF network.type = ethernetOne THEN { [] _ GetEncapsulation[network, nsHost]; EXIT }; ENDLOOP; }; TryLogan: PROC ~ { TryHost[ [a~02H, b~60H, c~8cH, d~00H, e~34H, f~48H] ] }; TryHubbard: PROC ~ { TryHost[ [a~02H, b~60H, c~8cH, d~10H, e~89H, f~25H] ] }; Try: PROC [n: LONG CARDINAL] = TRUSTED { t1: MACHINE DEPENDENT RECORD [b, c: CARDINAL]; t2: MACHINE DEPENDENT RECORD [a, b, c: CARDINAL]; t1 _ LOOPHOLE[n]; t2.a _ 0; t2.b _ t1.c; t2.c _ t1.b; TryHost[ LOOPHOLE[t2] ]; }; Init[]; }. XNSEthernetOneTranslation.mesa Copyright c 1986 by Xerox Corporation. All rights reserved. Demers, December 2, 1986 10:50:29 am PST Very freely adapted from translation code in: Cedar6.0>OISCP>EthernetOneDriver.mesa Birrell on: 9-Oct-81 16:43:32 BLyon on: March 13, 1981 10:47 PM Levin, August 9, 1983 9:28 am Russ Atkinson (RRA) February 19, 1985 7:44:43 pm PST Time Translation Request / Reply Packets Translation Entry Cache Timeouts Encapsulating NS Packets Statistics Quick check of first couple of entries without acquiring ML. Search for a cache entry for the given nsHost. If an entry is found, return it. If no entry is found, or the entry is out of date, arrange for a new entry to be added. Search for an entry in the valid list. Move entry to head of list. Entry needs to be refreshed  move it to pending list. Search for a pending entry. No entry found, create a new pending one. Building Request / Reply Packets Allocate a buffer, build a request packet in it, and return it. The sendTo parameter is the Pup Host to which the request packet will be sent. It should be allHosts (broadcast) for a normal request. Given a request buffer, convert it to the corresponding reply. Fill in the encapsulation part here, so the buffer can be sent using network.sendTranslate rather than network.return. Processing Received Translation Packets Look for a pending entry (most likely case). If no pending entry, look for a valid one. If existing entry is incorrect, drop it on the floor ... Receive Statistics [network: Network, buffer: Buffer, bytes: NAT] RETURNS [Buffer] CROCK: the following test should be "< translationPacketBytes", but some old implementations send reply packets without the requestor field. Eventually, when the old implementations go away, fix it by moving the other CROCK (below) up to this position. CROCK: the following test should be moved up to replace the previous CROCK. Daemon Process Delete the entry. Send the entry Do a sweep ... Initialization Install a cache (and start a daemon) for each ethernetOne on the chain. Κ”˜codešœ™Kšœ Οmœ1™K˜Kšœ,˜,Kšœ*˜*K˜š œ žœžœž œžœ˜+Kšœžœ˜K˜Kšœžœ˜K˜—šœžœžœ  ˜SK˜—šœ žœž œžœ˜:Kšœ!˜!K˜K˜K˜—Kšœžœžœ :˜‰šœžœ+ Q˜‘K˜—Kšœžœžœ˜6š œžœžœž œžœ˜:K˜Kšœ!˜!K˜K˜K˜——™Kšœžœ˜Kšœ žœ˜&K˜š ‘œžœ žœžœžœ˜K™vKšœ˜K˜$Kšžœο˜φK˜K˜——™'š‘œžœžœ!˜;K˜Kšœ"˜"K˜Kšžœžœžœ ˜