Cluster.rose
Copyright © 1984 by Xerox Corporation. All rights reserved.
Last edited by: Barth, February 8, 1984 10:53 am
Last edited by: McCreight, March 11, 1986 12:24:23 pm PST
Last edited by: Curry, September 6, 1985 0:25:30 am PDT
Last edited by: Herrmann September 6, 1985 4:15:14 pm PDT
Directory Basics, DragOpsCross;
TranslationNeeds Dragon;
Imports CacheOps, ClusterParams, DragonRosemary, IO, LizardRosemary, Process, Rope, RoseRun, RoseTypes;
Library IFU, EU, Cache;
CELLTYPE "Cluster"
PORTS[
Signal names obey the following convention: If a signal x is computed during PhA and remains valid throughout the following PhB, it is denoted as xAB. If x is computed during PhA and can change during the following PhB (as, for example, in precharged logic), it is denoted as xA. In this latter case, a client wanting to use x during PhB must receive it in his own latch open during PhA. xBA and xB are defined symmetrically. Positive logic is assumed (asserted = TRUE = 1 = more positive logic voltage); negative-logic signals have an extra "N" at or very near the beginning of the signal name (e.g., PNPError for PBus Negative-TRUE Parity Error).
Simulation control signals
IPRejectB   > BOOL,
DPRejectB  > BOOL,
Timing and housekeeping interface
PhA, PhB   < BOOL,
Vdd, Gnd   < BOOL,
PadVdd, PadGnd < BOOL,
RescheduleAB  < BOOL,
ClusterError   = BOOL,
P Interfaces for EU cache, for logging
DPData    > INT[32],
DPCmnd3A   > EnumType["Dragon.PBusCommands"],
DPFaultB    > EnumType["Dragon.PBusFaults"],
Main memory interface
MDataAB    = INT[32],
MCmdAB    = EnumType["Dragon.MBusCommands"],
MNShared   = BOOL,
MParityAB   = BOOL,
MNError    > BOOL,
MReadyBA   < BOOL,
M1Rq     > BOOL,
M2Rq     > BOOL,
MNewRq    = BOOL,
M1Gnt    < BOOL,
M2Gnt    < BOOL,
Serial debugging interface
All the following signals change during PhA and propagate during the remainder of PhA and PhB, giving an entire clock cycle for them to propagate throughout the machine. Each user must receive them into a latch open during PhB. The effects of changes are intended to happen throughout the following PhA, PhB pair.
ResetAB  < BOOL,
DHoldAB  < BOOL, -- must be high before testing
DShiftAB  < BOOL, -- shift the shift register by 1 bit if ~DNSelect
DExecuteAB < BOOL, -- interpret the content of the shift register if ~DNSelect
DNSelectAB < BOOL, -- if high, hold but don't Execute or Shift
DDataInAB < BOOL, -- sampled during each PhB following a PhB that DShift is asserted
DDataOutAB = BOOL -- changes during each PhA following a PhB that DShift is asserted, continues to be driven through the PhB following the PhA it changes
]
Expand
P Interfaces for IFU cache
IPData:    INT[32]; -- PhA - address to cache, PhB - data to/from cache
IPCmnd3A:   EnumType["Dragon.PBusCommands"];
IPFaultB:    EnumType["Dragon.PBusFaults"];
I interface
KBus:     INT[32];
During PhB the a, b, and c ram addresses are multiplexed on KBus: a=[0..7], b=[8..15], c=[16..23], cIsField=[24], EUAluLeftSrc1B=[25..26], EUAluRightSrc1B=[27..29], EUStore2ASrc1B=[30..31]. If cAdr is an IFU register address during PhA then data is carried from the EU or FP to the IFU, otherwise it moves from the IFU to the EU or FP.
EUSt3AisCBus2BA:  BOOL;
EURes3BisPBus3AB:  BOOL;
EUWriteToPBus3AB: BOOL;
EUAluOp2AB:   EnumType["Dragon.ALUOps"];
EUCondSel2AB:   EnumType["Dragon.CondSelects"];
EUCondition2B:   BOOL;
ifu: IFU[logRef: |ClusterParams.ifuLogRef|, lizardSimRef: |ClusterParams.lizardSimRef| ][];
eu: EU[logRef: |ClusterParams.euLogRef|][];
iCache: Cache[cacheParm: |ClusterParams.iCache|, skipRejectsParm: |ClusterParams.iCacheSkipRejects|][
PData:  IPData,
PCmdA: IPCmnd3A,
PRejectB: IPRejectB,
PFaultB: IPFaultB,
MRq:  M1Rq,
MGnt: M1Gnt ];
eCache: Cache[cacheParm: |ClusterParams.eCache|, skipRejectsParm: |FALSE|][
PData:  DPData,
PCmdA: DPCmnd3A,
PRejectB: DPRejectB,
PFaultB: DPFaultB,
MRq:  M2Rq,
MGnt: M2Gnt ];
CEDAR
{ };
Test T BlackBox
originalPriority: Process.Priority = Process.GetPriority[];
diagnosticName: Rope.ROPE ← "unknown";
Process.SetPriority[ Process.priorityBackground ];
IF clusterPanelChecker=NIL
THEN clusterPanelChecker ← FORK ClusterParams.PanelCheck[];
{
ENABLE UNWIND => {
ClusterParams.clusterLog.PutF["\nSimulation of %g aborted\n\n", IO.rope[diagnosticName]];
ClusterParams.clusterLog.Flush[];
Process.SetPriority[ originalPriority ] };
DoEval: PROC = {
OPEN ClusterParams;
clusterPanel.continueTestFromAbort ← FALSE;
IF PhA THEN {
RescheduleAB ← clusterPanel.resched;
ResetAB   ← clusterPanel.reset};
[] ← RoseRun.Eval[ handle
! RoseTypes.Stop => IF clusterPanel.continueTestFromAbort
THEN CONTINUE ELSE REJECT ];
IF clusterPanel.reset THEN clusterPanel.instrCount ← -1;
Process.Yield[];
clusterPanel.continueTestFromAbort ← FALSE};
Cycles: PROC [ n: INT ] =
{FOR i: INT IN [0..n) DO DoPh[a]; DoPh[b] ENDLOOP};
DoPh: PROC [ ph: Dragon.Phase ] = {
cp: REF ClusterParams.ControlPanel ← ClusterParams.clusterPanel;
PhB  ← ph=b;
PhA  ← ph=a;
cp.phase ← ph;
DoEval[];
IF (cp.cycle >= cp.slowFromCycle OR
(cp.cycle>=0 AND cp.instrCount >= cp.slowFromInstr)) AND
cp.stopInPh[ph] THEN {
Remark[IO.PutFR["Doing cycle %g Ph%g...",
IO.int[cp.cycle], IO.char[IF ph=a THEN 'A ELSE 'B]]];
DoEval[] };
PhA ← PhB ← FALSE;
DoEval[];
WHILE cp.repeatPhase DO
PhB ← ph=b; PhA ← ph=a; DoEval[];
Remark[IO.PutFR["...repeating Ph%g evaluation..",
IO.char[IF ph=a THEN 'A ELSE 'B]]];
DoEval[];
PhA ← PhB ← FALSE; DoEval[];
ENDLOOP };
FOR port: ClusterPort IN ClusterPort DO IF drive[port]=test THEN drive[port] ← see ENDLOOP;
drive[PhA]    ← drive;
drive[PhB]    ← drive;
drive[RescheduleAB] ← drive;
drive[ResetAB]   ← drive;
root  ← RoseTypes.GetSimulationFromCellTestHandle[handle].root;
DO
Chop: PROC RETURNS [ first, rest: Rope.ROPENIL ] =
BEGIN
dStream: IO.STREAM = IO.RIS[ClusterParams.clusterPanel.diagnostic];
first ← dStream.GetTokenRope[IO.IDProc ! IO.EndOfStream => CONTINUE].token;
rest ← ClusterParams.clusterPanel.diagnostic.Substr[dStream.GetIndex];
END;
diagnosticFileName: Rope.ROPE ← diagnosticName ← Chop[].first;
ClusterError     ← FALSE;
ClusterParams.clusterPanel.reset   ← TRUE;
ClusterParams.clusterPanel.stopInPh  ← ALL[TRUE];
ClusterParams.clusterPanel.repeatPhase ← ClusterParams.clusterPanel.resched ← FALSE;
ClusterParams.clusterPanel.cycle   ← -1;
ClusterParams.clusterPanel.instrCount  ← -1;
Cycles[5 !
DragonRosemary.AssertionFailed => RESUME;
RoseTypes.Stop => IF data = $FailedAssertion THEN RESUME ELSE REJECT ];
SELECT TRUE FROM
Rope.Equal[s1: diagnosticFileName, s2: "END", case: FALSE] => EXIT;
diagnosticFileName # NIL =>
BEGIN
CacheOps.VirtualMemoryFromFile[ClusterParams.vm, diagnosticFileName ];
ClusterParams.lizardSimRef^ ← (IF ClusterParams.clusterPanel.lizardToo THEN LizardRosemary.StartNewLizard[ClusterParams.vm] ELSE NIL);
END;
ClusterParams.clusterPanel.randomSeed#0 =>
BEGIN
diagnosticName ← IO.PutFR["random code (seed = %d)", IO.int[ClusterParams.clusterPanel.randomSeed]];
ClusterParams.InsertRandomProgramInVM[ClusterParams.vm, ClusterParams.clusterPanel.randomSeed, ClusterParams.clusterLog];
ClusterParams.lizardSimRef^ ← (IF ClusterParams.clusterPanel.lizardToo THEN LizardRosemary.StartNewLizard[ClusterParams.vm] ELSE NIL);
END;
ENDCASE => EXIT; -- test is finished!!!
DoPh[a];
ClusterError   ← FALSE;
ClusterParams.clusterPanel.reset ← FALSE;
DoEval[];    -- changes ResetAB during PhA
DoPh[b];
IF ClusterParams.clusterPanel.slowFromCycle<=0 THEN Remark["Processor has been reset..."];
ClusterParams.clusterPanel.cycle ← 0;
ClusterParams.clusterLog.PutF["\n\n\n%g Dragon Rosemary simulation of %g beginning...\n\n",
IO.time[],
IO.rope[diagnosticName]];
WHILE ClusterParams.clusterPanel.randomSeed=0 OR ClusterParams.clusterPanel.cycle<=ClusterParams.clusterPanel.randomCycleLimit DO
ENABLE {
LizardRosemary.SuccessHalt => {
ClusterParams.clusterLog.PutF["\n%g Success XOP in %g at instruction %d, cycle %d.\n\n",
IO.time[],
IO.rope[diagnosticName],
IO.int[ClusterParams.clusterPanel.instrCount],
IO.int[ClusterParams.clusterPanel.cycle]];
EXIT;
};
LizardRosemary.Breakpoint => {
ClusterParams.clusterLog.PutF["\n%g Breakpoint XOP in %g at instruction %d, cycle %d.\n\n",
IO.time[],
IO.rope[diagnosticName],
IO.int[ClusterParams.clusterPanel.instrCount],
IO.int[ClusterParams.clusterPanel.cycle]];
SELECT TRUE FROM
ClusterParams.clusterPanel.emulateBreakpoint => RESUME;
diagnosticFileName # NIL => REJECT;
ENDCASE => EXIT;
};
};
DoPh[a];
DoPh[b];
IF ClusterParams.clusterPanel.ckptEveryNCycles > 0 AND ClusterParams.clusterPanel.ckptAtCycle <= ClusterParams.clusterPanel.cycle THEN
ClusterParams.clusterPanel.ckptAtCycle ← ClusterParams.clusterPanel.cycle+ClusterParams.clusterPanel.ckptEveryNCycles;
ClusterParams.clusterPanel.cycle ← ClusterParams.clusterPanel.cycle+1;
IF ClusterParams.clusterPanel.ckpt OR ClusterParams.clusterPanel.ckptAtCycle = ClusterParams.clusterPanel.cycle THEN {
ClusterParams.clusterPanel.ckpt ← FALSE;
ClusterParams.CheckPoint[ClusterParams.clusterPanel.ckptFile, root]
};
ENDLOOP; -- on ClusterParams.clusterPanel.cycle
SELECT TRUE FROM
diagnosticFileName # NIL =>
BEGIN
first, rest: Rope.ROPE;
[first, rest] ← Chop[];
IF first.Equal[diagnosticFileName] THEN
ClusterParams.clusterPanel.diagnostic ← Rope.Cat[rest, " ",first];
END;
ClusterParams.clusterPanel.randomSeed # 0 =>
ClusterParams.clusterPanel.randomSeed ← ClusterParams.clusterPanel.randomSeed+1;
ENDCASE => NULL;
ENDLOOP; -- on ClusterParams.clusterPanel.randomSeed
}; -- for catching UNWIND
ENDCELLTYPE;
CEDAR
root:      RoseTypes.Cell ← NIL;
clusterPanelChecker: PROCESSNIL;
Remark: PROC[message: Rope.ROPE] = ClusterParams.Remark;