DIRECTORY
BootFile USING[ Location ],
Disk USING[ Add, Channel, defaultTries, DoIO, DriveAttributes, GetBootChainLink, GetDeviceFromChannel, invalid, Label, labelCheck, ok, PageNumber, PageCount, Request, SameDrive, Status ],
DiskFace USING[ DiskAddress, DontCare, GetTrueDeviceAttributes, wordsPerPage ],
File USING[ Error, FP, nullDA, nullFP, PageCount, PageNumber, PagesForWords, PropertyStorage, RC, Reason, SystemVolume, Volume, VolumeFile ],
FileBackdoor USING[ GetVolumePages, IsDebugger],
FileInternal,
FileStats USING[ Data, Type, Pulses ],
PhysicalVolume USING [Physical, SubVolumeDetails],
PrincOpsUtils USING[ LongCopy ],
Process USING [GetPriority, Pause, Priority, priorityForeground, priorityNormal, SetPriority], 
ProcessorFace USING[ GetClockPulses ],
VolumeFormat USING[ AbsID, allocatedBadPages, Attributes, lastLogicalRun, LogicalPage, LogicalPageCount, LogicalRun, LogicalRunObject, RelID, RunPageCount ],
VM USING[ AddressForPageNumber, Allocate, Free, Interval, PageCount, PageNumber, PageNumberForAddress, PagesForWords, SwapIn, Unpin, wordsPerPage],
VMBacking USING[ AttachBackingStorage, Run, RunTableIndex, RunTableObject, RunTablePageNumber ];

FileImpl: CEDAR MONITOR LOCKS FileInternal.FileImplMonitorLock
IMPORTS Disk, DiskFace, File, FileBackdoor, FileInternal, PrincOpsUtils, Process, ProcessorFace, VM, VMBacking
EXPORTS DiskFace--RelID,AbsID,Attributes--, File, FileBackdoor, FileInternal, FileStats
SHARES File = {


FileImplMonitorLock: PUBLIC MONITORLOCK;  -- monitor for FileImpl and FilePagesImpl




--DiskFace.--Attributes: PUBLIC TYPE = VolumeFormat.Attributes;

--DiskFace.--AbsID: PUBLIC TYPE = VolumeFormat.AbsID;

--DiskFace.--RelID: PUBLIC TYPE = VolumeFormat.RelID;

--File.--DA: PUBLIC TYPE = VolumeFormat.LogicalPage;

Handle: TYPE = REF Object;

--File.--Object: PUBLIC TYPE = FileInternal.Object;

Volume: TYPE = REF VolumeObject;

--File.--VolumeObject: PUBLIC TYPE = FileInternal.VolumeObject;

RunTable: TYPE = FileInternal.RunTable;

RunTableObject: TYPE = VMBacking.RunTableObject;

RunTableIndex: TYPE = VMBacking.RunTableIndex;

PhysicalRun: TYPE = FileInternal.PhysicalRun;

lastRun: VMBacking.RunTablePageNumber = LAST[INT]; -- end marker in runTable --

initRuns: CARDINAL = HeaderPagesToRuns[1];

normalHeaderSize: CARDINAL = 2;


RunsToHeaderPages: PUBLIC PROC [runs: CARDINAL] RETURNS [pages: VolumeFormat.LogicalPageCount] = {
pages _ ((runs*SIZE[VolumeFormat.LogicalRun]) + SIZE[VolumeFormat.LogicalRunObject[0]]+ (DiskFace.wordsPerPage-1)) / DiskFace.wordsPerPage;
};

HeaderPagesToRuns: PUBLIC PROC [pages: VolumeFormat.LogicalPageCount] RETURNS [runs: CARDINAL] = {
RETURN[((DiskFace.wordsPerPage * pages) - SIZE[VolumeFormat.LogicalRunObject[0]]) /
SIZE[VolumeFormat.LogicalRun]];
};


DoPinnedIO: PUBLIC PROC [channel: Disk.Channel, label: POINTER TO Disk.Label, req: POINTER TO Disk.Request] RETURNS [status: Disk.Status, countDone: Disk.PageCount] = TRUSTED {
interval: VM.Interval = [
page: VM.PageNumberForAddress[req.data],
count: VM.PagesForWords[
(IF req.incrementDataPtr THEN req.count ELSE 1)*DiskFace.wordsPerPage] ];
VM.SwapIn[interval: interval, kill: req.command.data=read, pin: TRUE];
[status, countDone] _ Disk.DoIO[channel, label, req ! UNWIND => VM.Unpin[interval] ];
VM.Unpin[interval];
};

Error: PUBLIC ERROR[why: File.Reason, diskPage: INT] = CODE;

badData: Disk.Status = [unchanged[dataCRCError]];

CheckStatus: PUBLIC PROC [status: Disk.Status, diskPage: INT] = {
why: File.RC = FileInternal.TranslateStatus[status];
IF why # ok THEN ERROR File.Error[why, diskPage];
};



GetHeaderVM: PUBLIC PROC [file: Handle, runs: CARDINAL, propertyPages: CARDINAL _ 1 ] = TRUSTED {
oldVM: LONG POINTER = file.headerVM;
oldHeaderVMPages: INT = file.headerVMPages;
oldProperties: File.PropertyStorage = file.properties;
oldLogical: LONG POINTER TO VolumeFormat.LogicalRunObject = file.logicalRunTable;
runTableWords: INT = SIZE[VolumeFormat.LogicalRunObject[runs]];
runTablePages: INT = VM.PagesForWords[runTableWords];
vmPages: INT = VM.PagesForWords[runTableWords] + propertyPages;
runTableFilePages: File.PageCount = File.PagesForWords[runTableWords];
interval: VM.Interval _ VM.Allocate[vmPages];

VM.SwapIn[interval];
file.headerVM _ VM.AddressForPageNumber[interval.page];
file.headerVMPages _ vmPages; -- assign only after VM.Allocate succeeds
file.properties _ LOOPHOLE[file.headerVM+runTableFilePages*DiskFace.wordsPerPage];
file.logicalRunTable _ LOOPHOLE[file.headerVM];
file.logicalRunTable.headerPages _ runTableFilePages + propertyPages;
file.logicalRunTable.maxRuns _ runs;
IF oldVM = NIL
THEN {
temp: LONG POINTER TO ARRAY [0..DiskFace.wordsPerPage) OF WORD;
file.logicalRunTable[0].first _ VolumeFormat.lastLogicalRun;
temp _ LOOPHOLE[file.properties];
temp^ _ ALL[0];
}
ELSE {
runWords: CARDINAL = oldProperties-oldVM;
propertyWords: LONG CARDINAL = MAX[0, VM.wordsPerPage*oldHeaderVMPages - runWords];
PrincOpsUtils.LongCopy[from: oldLogical, to: file.logicalRunTable, 
nwords: runWords];
PrincOpsUtils.LongCopy[from: oldLogical+ runWords, to: file.properties, 
nwords: propertyWords];
VM.Free[[VM.PageNumberForAddress[oldVM], oldHeaderVMPages]];
};
file.runPages _ runTablePages;
file.propertyPages _ propertyPages;
};

FreeHeaderVM: PUBLIC PROC [file: Handle] = TRUSTED {
IF file.headerVMPages # 0 THEN
VM.Free[[VM.PageNumberForAddress[file.headerVM], file.headerVMPages]];
file.headerVMPages _ 0;
file.headerVM _ NIL;
file.logicalRunTable _ NIL;
file.properties _ NIL
};

TranslateLogicalRunTable: PUBLIC PROC [file: Handle, prefixOnly: BOOL _ FALSE] RETURNS [ File.PageCount ] = TRUSTED {
nRuns: CARDINAL;
filePage: File.PageNumber _ [-file.logicalRunTable.headerPages];
FOR nRuns IN [0..file.logicalRunTable.maxRuns) DO
IF file.logicalRunTable[nRuns].first = VolumeFormat.lastLogicalRun THEN EXIT;
REPEAT FINISHED => IF ~prefixOnly THEN ERROR File.Error[inconsistent]
ENDLOOP;
IF file.runTable = NIL OR file.runTable.length < nRuns+10--arbitrary--
THEN file.runTable _ NEW[RunTableObject[nRuns+10]];  
file.runTable.nRuns _ nRuns;
FOR i: CARDINAL IN [0..nRuns) DO
IF file.logicalRunTable[i].first = VolumeFormat.lastLogicalRun THEN EXIT;
[channel: file.runTable[i].channel, diskPage: file.runTable[i].diskPage] _
FileInternal.TranslateLogicalRun[file.logicalRunTable[i], file.volume];
file.runTable[i].filePage _ filePage;
filePage _ [filePage + file.logicalRunTable[i].size];
ENDLOOP;
file.runTable.nDataPages _ filePage;
file.runTable[file.runTable.nRuns].filePage _ lastRun;
RETURN[ file.runTable.nDataPages ]
};

SetPropertiesSize: PUBLIC PROC [file: Handle, nPages: File.PageCount] = TRUSTED {
runPages: CARDINAL _ file.runPages;
IF file.runTable.nRuns+3 >= file.logicalRunTable.maxRuns THEN runPages _ runPages + 1;
ExtendFileHeader[file: file, newRunPages: runPages, newPropertyPages: nPages];
FileInternal.WriteRunTable[file];
};

ExtendFileHeader: PUBLIC PROC [file: Handle, newRunPages: CARDINAL, newPropertyPages: CARDINAL] = TRUSTED {
newMaxRuns: CARDINAL = HeaderPagesToRuns[newRunPages];
GetHeaderVM[file: file, runs: newMaxRuns, propertyPages: newPropertyPages ];
file.logicalRunTable.maxRuns _ newMaxRuns;
file.logicalRunTable.headerPages _ newRunPages + newPropertyPages;
};


LastLogicalPage: PUBLIC PROC [file: Handle] RETURNS [VolumeFormat.LogicalPage] = TRUSTED {
lastRun: VolumeFormat.LogicalRun = file.logicalRunTable[file.runTable.nRuns-1];
RETURN[ [lastRun.first + lastRun.size-1] ]
};




UnstableRunTable: PROC [file: Handle, newSize: File.PageCount] = TRUSTED {
file.logicalRunTable.intention _ [unstable: TRUE, size: newSize];
FileInternal.Transfer[file: file, data: file.headerVM, filePage: [0], nPages: 1, action: write, where: header];
};

StableRunTable: PROC [file: Handle] = TRUSTED {
file.logicalRunTable.intention _ [unstable: FALSE];
FileInternal.Transfer[file: file, data: file.headerVM, filePage: [0], nPages: 1, action: write, where: header];
};

MakeBootable: PROC [file: Handle, firstPage: File.PageNumber] RETURNS [id: RelID, firstLink: DiskFace.DontCare, channel: Disk.Channel] = {
eof: DiskFace.DontCare = LOOPHOLE[LONG[-1]]; -- bit-pattern known by microcode
data: LONG POINTER;
label: Disk.Label _ FileInternal.DataLabel[file.fp];
req: Disk.Request;
filePage: File.PageNumber _ firstPage;
thisDiskPage: Disk.PageNumber;
WriteLink: PROC [link: DiskFace.DontCare] = {
status: Disk.Status;
countDone: Disk.PageCount;
label.filePage _ filePage-1;
req _  [
diskPage: thisDiskPage,
data: data,
incrementDataPtr: TRUE,
command: [header: verify, label: verify, data: read],
count: 1 ];
TRUSTED{[status, countDone] _ DoPinnedIO[channel, @label, @req]}; -- get data
CheckStatus[status, thisDiskPage+countDone];  -- use thisDiskPage instead of req.diskPage since DiskImpl modifies req.diskPage
label.dontCare _ link;
label.filePage _ filePage-1; -- previous transfer incremented it
TRUSTED{[status, countDone] _ FileInternal.WriteLabels[channel, thisDiskPage, 1, data, @label]};
CheckStatus[status, thisDiskPage+countDone];
};
thisSize: Disk.PageCount;
IF file.size <= firstPage THEN ERROR File.Error[unknownPage, firstPage];
[diskPage: thisDiskPage, size: thisSize, channel: channel] _
FileInternal.FindRun[start: filePage, nPages: file.size-filePage, runTable: file.runTable];
firstLink _ Disk.GetBootChainLink[channel, thisDiskPage];
id _ RelID[file.fp];
data _ FileInternal.GetScratchPage[];
DO {
ENABLE UNWIND => FileInternal.FreeScratchPage[data];
nextDiskPage: Disk.PageNumber;
nextSize: Disk.PageCount;
nextChannel: Disk.Channel;
filePage _ [filePage+thisSize]; -- file page number of start of next run
thisDiskPage _ [thisDiskPage + thisSize - 1]; -- disk page number of last page of this run
IF filePage >= file.size THEN EXIT;
[diskPage: nextDiskPage, size: nextSize, channel: nextChannel] _
FileInternal.FindRun[start: filePage, nPages: file.size-filePage, runTable: file.runTable];
IF NOT Disk.SameDrive[nextChannel, channel] THEN
ERROR File.Error[mixedDevices, thisDiskPage];
WriteLink[ Disk.GetBootChainLink[channel, nextDiskPage] ];
thisDiskPage _ nextDiskPage; thisSize _ nextSize;
}
ENDLOOP;
WriteLink[eof];
FileInternal.FreeScratchPage[data];
};


Reporter: TYPE = PROC [file: File.FP, props: File.PropertyStorage, nPages: File.PageCount];

TryToMakeALotOfExtensions: BOOL _ FALSE;  -- debugging

Extend: PROC [file: Handle, delta, min: INT, report: Reporter, VMBackingCreate: BOOL _ FALSE] = {
volume: File.Volume = file.volume;
amount: INT _ delta;
nearTo: VolumeFormat.LogicalPage _ -- hint for FileInternal.Alloc --
IF file.runTable.nRuns = 0 THEN [0] ELSE FileInternal.LastLogicalPage[file];
headerVMPos: LONG POINTER _ file.headerVM; -- headerVM to be written to disk (creating)
freeLabel: Disk.Label _ FileInternal.FreeLabel[volume];
label: Disk.Label;
loopCount: INT _ 0 ;
nChunksLeft: INT;
chunkSize: INT;
chunkEnd: INT;
incrementToNextChunk: INT;

ComputeVMBackingLocation: PROC [] = {
channel: Disk.Channel;
firstBigBlock: VolumeFormat.LogicalPage;
countBigBlock: VolumeFormat.LogicalPageCount;
opaqueDiskAddress: DiskFace.DontCare;
diskAddress: DiskFace.DiskAddress ;
sectorsPerTrack: INT _ 0 ;
subVolume: PhysicalVolume.SubVolumeDetails;
cylinders: INT _ 1 ;

[firstBigBlock, countBigBlock, subVolume] _ FileInternal.FindLargestFreeBlockInBiggestSubVolume[volume];
IF countBigBlock > delta THEN {
channel _ subVolume.channel;
[cylinders: cylinders, sectorsPerTrack: sectorsPerTrack] _ DiskFace.GetTrueDeviceAttributes[Disk.GetDeviceFromChannel[channel]];
DO
opaqueDiskAddress _ Disk.GetBootChainLink[channel, [sectorsPerTrack]];
diskAddress _ LOOPHOLE[opaqueDiskAddress];
IF diskAddress.cylinder ~= 0 THEN {
sectorsPerPlatter: INT;
firstPhysicalInBigBlock: INT ;
firstPhysicalInBigBlockOnNewPlatter: INT ;
firstLogicalInBigBlockOnNewPlatter: INT  ;

firstPhysicalInBigBlock _ subVolume.address + firstBigBlock - subVolume.start;
sectorsPerPlatter _ sectorsPerTrack * cylinders;
firstPhysicalInBigBlockOnNewPlatter _ ((firstPhysicalInBigBlock+sectorsPerPlatter-1)/sectorsPerPlatter) * sectorsPerPlatter;
firstLogicalInBigBlockOnNewPlatter _ subVolume.start + firstPhysicalInBigBlockOnNewPlatter - subVolume.address ;
nChunksLeft _ (countBigBlock - firstLogicalInBigBlockOnNewPlatter + firstBigBlock + (sectorsPerPlatter/8)) / sectorsPerPlatter ;
IF nChunksLeft <= 1
THEN {
chunkEnd _ 0;
nChunksLeft _ 1;
chunkSize _ delta;
incrementToNextChunk _ 1;
EXIT;
}
ELSE {
chunkSize _ delta/nChunksLeft ;
incrementToNextChunk _ sectorsPerPlatter ;
chunkEnd _ firstLogicalInBigBlockOnNewPlatter + (sectorsPerPlatter+chunkSize)/2;
EXIT;
};
};
IF diskAddress.head ~= 0 THEN {
chunkEnd _ subVolume.start + (subVolume.size+delta)/2;
nChunksLeft _ 1;
chunkSize _ delta;
incrementToNextChunk _ 1;
EXIT;
};
sectorsPerTrack _ sectorsPerTrack + 1;
ENDLOOP;
}
 ELSE {
nChunksLeft _ 1;
chunkSize _ delta;
incrementToNextChunk _ 1;
chunkEnd _ 0;
};
};

IF file.size >= 0 THEN {
label _ FileInternal.DataLabel[file.fp];
label.filePage _ file.size;
}; -- Otherwise, wait until we know the FP!
TRUSTED{ file.logicalRunTable.intention _ [unstable: TRUE, size: file.size] };
IF VMBackingCreate THEN ComputeVMBackingLocation[];
WHILE amount > 0 DO
logicalRun: VolumeFormat.LogicalRun ;
nowAmount: INT _ amount ;
IF VMBackingCreate THEN {
IF loopCount = 0 THEN {
nearTo _ [chunkEnd];
nowAmount _ chunkSize ;
}
 ELSE {
chunkEnd _ chunkEnd + incrementToNextChunk ;
nChunksLeft _ nChunksLeft - 1 ;
nearTo _ [chunkEnd];
nowAmount _ chunkSize ;
};
IF nChunksLeft <= 0 THEN nowAmount _ amount ;
};
loopCount _ loopCount.SUCC;
IF TryToMakeALotOfExtensions THEN nearTo _ [nearTo+nowAmount+2];  -- debugging
logicalRun _ FileInternal.Alloc[volume: volume, first: nearTo, size: nowAmount, min: min ];
nearTo _ [logicalRun.first + logicalRun.size]; -- hint for next call of Alloc --
WHILE logicalRun.size > 0 DO
labelsOK: Disk.PageCount; -- count of labels that are genuinely free pages --
status: Disk.Status;
run: PhysicalRun;
labelsThisTime: Disk.PageCount _ 0; -- labels written in this transfer --
[run.channel, run.diskPage] _ FileInternal.TranslateLogicalRun[logicalRun, volume];
run.filePage _ file.size;
freeLabel.filePage _ logicalRun.first;
TRUSTED{ [status, labelsOK] _
FileInternal.VerifyLabels[run.channel, run.diskPage, logicalRun.size, @freeLabel] };
IF status # Disk.ok THEN FileInternal.notReallyFree _ FileInternal.notReallyFree+1; -- statistics
IF labelsOK > 0 THEN {
labelsWritten: Disk.PageCount _ 0; -- total labels actually written --
Consume: PROC = {
file.size _ file.size + labelsThisTime;
labelsWritten _ labelsWritten + labelsThisTime;
amount _ amount - labelsThisTime;
logicalRun.first _ [logicalRun.first+labelsThisTime];
logicalRun.size _ logicalRun.size - labelsThisTime;
};
firstHeaderPage: BOOL = file.runTable.nRuns = 0;
TRUSTED{FileInternal.AddRun[file, @run, logicalRun.first, labelsOK]};
IF firstHeaderPage THEN {
IF file.size >= 0 THEN ERROR File.Error[inconsistent, run.diskPage];
file.fp _ [id: FileInternal.NewID[volume], da: logicalRun.first];
label _ FileInternal.HeaderLabel[file.fp];
IF report # NIL THEN report[file.fp, file.properties, file.propertyPages];
}
 ELSE {
FileInternal.WriteRunTable[file];
};
IF file.size < 0 THEN {
TRUSTED{[status, labelsThisTime] _ FileInternal.WriteLabels[run.channel,
run.diskPage, MIN[-file.size, labelsOK], headerVMPos, @label]};
Consume[];
IF file.size < 0 -- still writing header pages, even after the ones we just wrote
THEN TRUSTED {
headerVMPos _ headerVMPos + labelsThisTime * DiskFace.wordsPerPage}
ELSE {
label _ FileInternal.DataLabel[file.fp];
label.filePage _ file.size;
};
};
IF labelsOK > labelsWritten AND file.size >= 0 THEN {
TRUSTED{[status, labelsThisTime] _ FileInternal.WriteLabels[run.channel,
[run.diskPage+labelsThisTime], labelsOK-labelsWritten, NIL, @label]};
Consume[];
};
IF labelsOK > labelsWritten THEN
FileInternal.RemoveFromRunTable[file, labelsOK-labelsWritten];
};
SELECT status FROM
Disk.ok => NULL;
Disk.invalid => ERROR File.Error[wentOffline, run.diskPage+labelsThisTime];
ENDCASE => {
logicalRun.first _ [logicalRun.first+1];
logicalRun.size _ logicalRun.size-1 };
ENDLOOP-- Loop for each available fragment of disk run --;
ENDLOOP-- Loop for each allocated disk run --;
StableRunTable[file]
};

Contract: PROC [file: Handle, delete: BOOL, newSize: File.PageCount-- -1 if delete--, recovery: BOOL] = TRUSTED {
logical: LONG POINTER TO VolumeFormat.LogicalRunObject = file.logicalRunTable;
IF NOT recovery THEN UnstableRunTable[file, newSize];
WHILE delete OR file.size > newSize DO
IF file.runTable.nRuns=0 THEN {
IF delete THEN EXIT ELSE ERROR File.Error[inconsistent];
};
{
label: Disk.Label;
labelPtr: POINTER TO Disk.Label _ NIL;
lastRun: VolumeFormat.LogicalRun _ logical[file.runTable.nRuns-1];
thisTime: VolumeFormat.RunPageCount =
IF delete
THEN IF recovery AND file.size > 0
THEN --restrict to data pages for label-check--MIN[file.size, lastRun.size]
ELSE --run is entirely data or entirely header--lastRun.size
ELSE MIN[file.size-newSize, lastRun.size];
file.size _ file.size - thisTime;
IF recovery
THEN {
IF file.size + thisTime > 0
THEN {
label _ FileInternal.DataLabel[file.fp];
label.filePage _ file.size;
}
ELSE {
label _ FileInternal.HeaderLabel[file.fp];
label.filePage _ logical.headerPages + file.size;
};
labelPtr _ @label;
}
ELSE labelPtr _ NIL;
FileInternal.RemoveFromRunTable[file, thisTime];
FileInternal.FreeRun[
[first: [lastRun.first + lastRun.size-thisTime], size: thisTime], file.volume, labelPtr];
};
ENDLOOP;
IF NOT delete THEN {
logical.intention _ [unstable: FALSE];
FileInternal.WriteRunTable[file];
};
};

recoveries: INT _ 0;

DoOpen: PROC [file: Handle] = TRUSTED {
volume: File.Volume = file.volume;
diskPage: Disk.PageNumber;
FileInternal.GetHeaderVM[file, initRuns];
{ -- First try to transfer entire header in a single request (an optimisation!)
initTryPages: INT = file.logicalRunTable.headerPages;
logicalRun: VolumeFormat.LogicalRun = [first: file.fp.da, size: initTryPages];
channel: Disk.Channel;
label: Disk.Label;
req: Disk.Request;
status: Disk.Status;
countDone: Disk.PageCount;
[channel, diskPage] _ FileInternal.TranslateLogicalRun[logicalRun, volume];
label _ FileInternal.HeaderLabel[file.fp];
req _ [
diskPage: diskPage,
data: file.headerVM,
incrementDataPtr: TRUE,
command: [header: verify, label: verify, data: read],
count: initTryPages,
tries: 1  -- one try at first.  If we have guessed wrong about the length of the header and there is only one header page here, we would do lots of retries.
 ];
TRUSTED{[status, countDone] _ DoPinnedIO[channel, @label, @req]};
req.tries _ Disk.defaultTries;
IF countDone = 0 THEN {
TRUSTED{[status, countDone] _ DoPinnedIO[channel, @label, @req]};  -- try again with retries
IF countDone = 0 THEN {
IF status = Disk.labelCheck
THEN ERROR File.Error[unknownFile, diskPage+countDone]
ELSE CheckStatus[status, diskPage+countDone];
};
};
file.runPages _ RunsToHeaderPages[file.logicalRunTable.maxRuns];
file.diskRunPages _ file.runPages;
file.propertyPages _ file.logicalRunTable.headerPages - file.runPages;
file.diskPropertyPages _ file.propertyPages;

IF countDone = file.logicalRunTable.headerPages THEN {
diskPageForHeader1: Disk.PageNumber;
file.size _ FileInternal.TranslateLogicalRunTable[file];
[diskPage: diskPageForHeader1] _ FileInternal.FindRun[ 
start: [-file.logicalRunTable.headerPages+1] ,
nPages: 1,
runTable: file.runTable] ;
IF Disk.Add[diskPage, 1] # diskPageForHeader1 THEN countDone _ 1 ;
};

IF countDone # file.logicalRunTable.headerPages THEN {
savedMaxRuns: CARDINAL _ file.logicalRunTable.maxRuns;
FileInternal.GetHeaderVM[file, file.logicalRunTable.maxRuns, file.diskPropertyPages];
countDone _ 1;
WHILE countDone < file.logicalRunTable.headerPages DO
doneThisTime: Disk.PageCount = MIN[initRuns, file.logicalRunTable.headerPages-countDone];  -- read at most 83 pages at a time to be sure we do not exceed the known run table
file.logicalRunTable.maxRuns _ MIN[savedMaxRuns, HeaderPagesToRuns[countDone]];  -- fix up file so it will translate OK for a big enough prefix of the file.
[] _ FileInternal.TranslateLogicalRunTable[file: file, prefixOnly: TRUE]; 
FileInternal.Transfer[file: file, data: file.headerVM + countDone*DiskFace.wordsPerPage,
filePage: [countDone],
nPages: doneThisTime,
action: read, where: header];
countDone _ countDone + doneThisTime;
ENDLOOP;
file.logicalRunTable.maxRuns _ savedMaxRuns;
file.size _ FileInternal.TranslateLogicalRunTable[file]; 
};
};
IF file.logicalRunTable.intention.unstable THEN {
delete: BOOL = file.logicalRunTable.intention.size < 0;
recoveries _ recoveries+1;
Contract[file: file,
delete: delete,
newSize: file.logicalRunTable.intention.size,
recovery: TRUE];
IF delete THEN { file.state _ deleted; ERROR File.Error[unknownFile, diskPage] };
};
file.state _ opened;
};

nowHaveBackingFile: BOOL _ FALSE;

HaveBackingFile: ENTRY PROC RETURNS [did: BOOL] = {
ENABLE UNWIND => NULL;
did _ nowHaveBackingFile; nowHaveBackingFile _ TRUE;
};

RegisterVMFile: PUBLIC PROC [file: Handle] = {
Acquire[file, shared];
{
ENABLE File.Error => Unlock[file];
label: Disk.Label _ FileInternal.DataLabel[file.fp];
IF NOT HaveBackingFile[]
THEN TRUSTED{ VMBacking.AttachBackingStorage[label, 0, file.runTable] };
};
Unlock[file];
};

unlocked: CONDITION;

Acquire: PUBLIC PROC [file: Handle, mode: FileInternal.LockMode] = {
Lock[file, mode];
IF file.state = none THEN {
ENABLE File.Error => Unlock[file];
startPulse: FileStats.Pulses = GetPulses[];
DoOpen[file];
Incr[open, file.size, startPulse];
};
};

Lock: PUBLIC ENTRY PROC [file: Handle, mode: FileInternal.LockMode] = {
ENABLE UNWIND => NULL;
IF file = NIL THEN RETURN WITH ERROR File.Error[unknownFile];
DO IF file.state = deleted THEN RETURN WITH ERROR File.Error[unknownFile];
IF mode = shared AND file.state # none
THEN { IF file.users >= 0 --no writers-- THEN { file.users _ file.users + 1; EXIT } }
ELSE { IF file.users = 0 --nobody-- THEN { file.users _ -1; EXIT } };
WAIT unlocked;
ENDLOOP;
};

Unlock: PUBLIC ENTRY PROC [file: Handle] = {
SELECT file.users FROM
< 0 => file.users _ file.users + 1;
> 0 => file.users _ file.users - 1;
ENDCASE => NULL;
BROADCAST unlocked;
};


NextFile: PUBLIC PROC [volume: File.Volume, prev: File.FP] RETURNS [next: File.FP _ File.nullFP] = TRUSTED {
finishedStatus: Disk.Status _ Disk.ok;
badPage: INT _ -1;
Work: FileInternal.EnumeratePagesProc = TRUSTED {
attr: Attributes = label.attributes;
rel: RelID = label.fileID.relID;
SELECT TRUE FROM
status # Disk.ok => { exit _ TRUE; badPage _ diskPage; finishedStatus _ status };
attr = header AND label.filePage = 0 => { exit _ TRUE; next _ rel };
ENDCASE => NULL;
};
EnumeratePages[volume, prev.da, TRUE, Work];
CheckStatus[finishedStatus, -1];
};

disableBadPage: BOOL _ FALSE;
matchChannels: INT _ 0;

enumBufferSize: INT = 150;
enumBufferType: TYPE = ARRAY [0..enumBufferSize) OF RECORD [
status: Disk.Status,
da: VolumeFormat.LogicalPage,
label: Disk.Label,
diskPage: INT
];
savedEnumBuffer: REF enumBufferType _ NIL;

EnumeratePages: PUBLIC PROC [ volume: File.Volume, start: VolumeFormat.LogicalPage, skipBadPages: BOOL _ TRUE, work: FileInternal.EnumeratePagesProc] = TRUSTED {
originalPriority: Process.Priority _ Process.priorityNormal;
volumeAlias: Volume _ volume;
size: INT = FileBackdoor.GetVolumePages[volume].size;
current: VolumeFormat.LogicalPage _ start;
done: VolumeFormat.LogicalPage _ current; 
counter: VolumeFormat.LogicalPage _ current;
finished: BOOL _ FALSE;
allDone: BOOL _ FALSE;
changed: CONDITION;
knownChannel: Disk.Channel _ NIL;
knownPhysical: PhysicalVolume.Physical _ NIL;
doFirstPage: BOOL _ start = LOOPHOLE[File.nullDA, VolumeFormat.LogicalPage];
badPagesType: TYPE = ARRAY [0..VolumeFormat.allocatedBadPages) OF VolumeFormat.LogicalPage;
badPages: REF badPagesType;
countBadPages: CARDINAL _ 0;
buffer: REF enumBufferType _ NIL;
getLow: CARDINAL _ 0 ;
getHigh: CARDINAL _ 0 ;
putLow: CARDINAL _ 0 ;
putHigh: CARDINAL _ 0 ;

GetBuffer: ENTRY PROC RETURNS [buffer: REF enumBufferType _ NIL] = CHECKED {
IF savedEnumBuffer # NIL THEN {
buffer _ savedEnumBuffer;
savedEnumBuffer _ NIL;
};
};
PutBuffer: ENTRY PROC [buffer: REF enumBufferType ] = CHECKED {
savedEnumBuffer _ buffer;
};
Next: ENTRY PROC RETURNS [prevCounter, page: VolumeFormat.LogicalPage,
channel: Disk.Channel, diskPage: Disk.PageNumber] = CHECKED INLINE {
InlineTranslateLogicalRun: PROC [logicalRun: VolumeFormat.LogicalRun, volume: Volume]
RETURNS [channel: Disk.Channel, diskPage: Disk.PageNumber] = INLINE {
FOR sv: LIST OF PhysicalVolume.SubVolumeDetails _ volume.subVolumes, sv.rest UNTIL sv = NIL DO
IF sv.first.start <= logicalRun.first AND sv.first.start + sv.first.size > logicalRun.first THEN RETURN[
channel: sv.first.channel,
diskPage: [sv.first.address + (logicalRun.first - sv.first.start)]
 ];
REPEAT FINISHED => RETURN WITH ERROR File.Error[inconsistent]
ENDLOOP;
};
prevCounter _ counter;
counter _ [counter+1];
DO
page _ current _ IF (prevCounter = current AND doFirstPage) THEN current ELSE [current+1];
doFirstPage _ FALSE;
IF current < size AND skipBadPages THEN {
[channel, diskPage] _ InlineTranslateLogicalRun[[first: current, size: 1], volume];
IF ~disableBadPage THEN {
isBad: BOOL _ FALSE;
FOR i: CARDINAL IN [0..countBadPages) DO
IF badPages[i] = current THEN {isBad _ TRUE; EXIT};
ENDLOOP;
IF isBad THEN LOOP;
};
}
 ELSE {
IF current < size THEN [channel, diskPage] _ InlineTranslateLogicalRun[[first: current, size: 1], volume];
};
EXIT;
ENDLOOP;
};
QueueWork: FileInternal.EnumeratePagesProc = TRUSTED INLINE {
buffer[putLow] _ [status, da, label^, diskPage];
putLow _ putLow + 1;
IF putLow >= enumBufferSize THEN {putLow _ 0; putHigh _ putHigh + 1};
WHILE putLow = getLow AND putHigh # getHigh DO Process.Pause[1]; ENDLOOP; 
};

Scan: PROC = TRUSTED {
exit: BOOL _ FALSE;
Process.SetPriority[Process.priorityForeground];
UNTIL exit DO 
WaitTurn: ENTRY PROC = CHECKED {
UNTIL done = prevCounter DO WAIT changed[! UNWIND => NULL] ENDLOOP;
};
CompletedTurn: ENTRY PROC = CHECKED {
done _ [prevCounter+1];
BROADCAST changed;
};
channel: Disk.Channel;
diskPage: Disk.PageNumber;
prevCounter: VolumeFormat.LogicalPage;
label: Disk.Label;
req: Disk.Request;
status: Disk.Status;
this: VolumeFormat.LogicalPage;
[prevCounter, this, channel, diskPage] _ Next[];
IF this >= size THEN EXIT;
req _ [
diskPage: diskPage,
data: FileInternal.scratchReader,
command: [header: verify, label: read, data: read],
count: 1 ];
status _ Disk.DoIO[channel, @label, @req].status;
IF status = badData THEN status _ Disk.ok; -- don't care about the data, only the label!
WaitTurn[];
IF NOT finished THEN finished _ QueueWork[status, this, @label, diskPage
! UNWIND => CompletedTurn[]];
exit _ finished;
CompletedTurn[];
ENDLOOP;
allDone _ TRUE;
};
badPageWorkProc: PROC [page: VolumeFormat.LogicalPage] = TRUSTED {
IF countBadPages < VolumeFormat.allocatedBadPages THEN {
badPages[countBadPages] _ page;
countBadPages _ countBadPages + 1;
};
};
scratchInterval: VM.Interval = [
page: VM.PageNumberForAddress[FileInternal.scratchReader],
count: VM.PagesForWords[DiskFace.wordsPerPage] ];
VM.SwapIn[interval: scratchInterval, kill: TRUE, pin: TRUE];
originalPriority _ Process.GetPriority[];
Process.SetPriority[Process.priorityNormal];
buffer _ GetBuffer[];
IF buffer = NIL THEN buffer _ NEW [enumBufferType];
badPages _ NEW[badPagesType];
FOR sv: LIST OF PhysicalVolume.SubVolumeDetails _ volumeAlias.subVolumes, sv.rest
UNTIL sv = NIL DO
FileInternal.GetBadPages [subVolume: sv.first, work: badPageWorkProc] ;
ENDLOOP;
{
ENABLE UNWIND => {
VM.Unpin[scratchInterval];
PutBuffer[buffer];
Process.SetPriority[originalPriority];
};
oneGuy: PROCESS = FORK Scan[];
otherGuy: PROCESS = FORK Scan[];
WHILE ~finished OR putLow # getLow OR putHigh # getHigh DO
DO
myPutLow: CARDINAL = putLow;
myPutHigh: CARDINAL = putHigh;
IF myPutLow >= enumBufferSize OR myPutLow # putLow OR myPutHigh # putHigh OR (putLow = getLow AND putHigh = getHigh) THEN EXIT;
IF ~finished THEN {
label: Disk.Label _ buffer[getLow].label;
finished _ work[buffer[getLow].status, buffer[getLow].da, @label, buffer[getLow].diskPage];
};
getLow _ getLow + 1;
IF getLow >= enumBufferSize THEN {getLow _ 0; getHigh _ getHigh + 1};
ENDLOOP;
IF allDone AND putLow = getLow AND putHigh = getHigh THEN EXIT;
Process.Pause[3];
ENDLOOP;
JOIN otherGuy;
JOIN oneGuy;
};
VM.Unpin[scratchInterval];
PutBuffer[buffer];
Process.SetPriority[originalPriority];
};


statistics: REF ARRAY FileStats.Type OF FileStats.Data _
NEW[ARRAY FileStats.Type OF FileStats.Data _ ALL[]];

hardExtends: FileStats.Type = spare0;

GetPulses: PROC RETURNS [FileStats.Pulses] = TRUSTED INLINE {
RETURN[ ProcessorFace.GetClockPulses[] ];
};

Incr: PUBLIC ENTRY PROC [type: FileStats.Type, pages: INT, startPulse: FileStats.Pulses] = {
old: FileStats.Data = statistics[type];
statistics[type] _
[calls: old.calls+1, pages: old.pages+pages, pulses: old.pulses + (GetPulses[]-startPulse)];
};

GetData: PUBLIC ENTRY PROC [type: FileStats.Type] RETURNS [FileStats.Data] = {
RETURN[statistics[type]];
};

ClearData: PUBLIC ENTRY PROC [type: FileStats.Type] = {
statistics[type] _ [];
};


Create: PUBLIC PROC [volume: File.Volume, size: File.PageCount, report: Reporter _ NIL] RETURNS [file: Handle] = {
startPulse: FileStats.Pulses = GetPulses[];
file _ FileInternal.AllocForCreate[]; -- gives us a handle not yet in FileTable
{
ENABLE UNWIND => FileInternal.DontInsert[];
file.volume _ volume;
GetHeaderVM[file, initRuns];
file.size _ TranslateLogicalRunTable[file];
InnerSetSize[file: file, size: size, create: TRUE, report: report !
UNWIND => FileInternal.FreeHeaderVM[file]--Delete[File]???--];
file.state _ opened;
};
FileInternal.Insert[file];
Incr[create, size, startPulse];
};

CreateVMBacking: PUBLIC PROC [volume: File.Volume, size: File.PageCount, report: Reporter _ NIL] RETURNS [file: Handle] = {
startPulse: FileStats.Pulses = GetPulses[];
file _ FileInternal.AllocForCreate[]; -- gives us a handle not yet in FileTable
{
ENABLE UNWIND => FileInternal.DontInsert[];
file.volume _ volume;
GetHeaderVM[file, initRuns];
file.size _ TranslateLogicalRunTable[file];
InnerSetSize[file: file, size: size, create: TRUE, report: report, VMBackingCreate: TRUE !
UNWIND => FileInternal.FreeHeaderVM[file]--Delete[File]???--];
file.state _ opened;
};
FileInternal.Insert[file];
Incr[create, size, startPulse];
};


Open: PUBLIC PROC [volume: File.Volume, fp: File.FP] RETURNS [file: Handle] = {
IF fp = File.nullFP THEN ERROR File.Error[unknownFile];
file _ FileInternal.Lookup[volume, fp];
Acquire[file, shared];
Unlock[file];
};

Delete: PUBLIC PROC [file: Handle] = {
Acquire[file, exclusive];
{
ENABLE File.Error => Unlock[file];
startPulse: FileStats.Pulses = GetPulses[];
delta: INT = file.size;
Contract[file, TRUE, -1, FALSE];
file.state _ deleted;
Incr[delete, delta, startPulse];
};
Unlock[file];
};

SetSize: PUBLIC PROC [file: Handle, size: File.PageCount] = {
InnerSetSize[file: file, size: size, create: FALSE, report: NIL];
};

minFactor: INT _ 10; -- => initial minimal runs are 1/10th of size change

InnerSetSize: PROC [file: Handle, size: File.PageCount, create: BOOL, report: Reporter, VMBackingCreate: BOOL _ FALSE ] = {
flushes: INT _ 0;
startingFree: INT _ -1;  -- free pages on volume; initially -1 for not-known
minRun: INT _ MAX[MIN[FileInternal.MaxTransferRun, size / minFactor], MIN[size+normalHeaderSize, 10]]; -- smallest run we will accept; decreased if necessary
DO
IF create THEN Lock[file, exclusive] ELSE Acquire[file, exclusive];
{
startPulse: FileStats.Pulses = GetPulses[];
delta: INT = size-file.size;
SELECT TRUE FROM
delta > 0 => {
ENABLE File.Error => {
lack: INT = MIN[minRun, size-file.size]; -- calculate it while we still have the file locked
IF startingFree = -1 THEN startingFree _ FileBackdoor.GetVolumePages[file.volume].free;
IF why = volumeFull THEN {
hardExtend: BOOL;
flushedOK: BOOL _ TRUE;
FSRootFile: BOOL _ FALSE;
volume: Volume _ file.volume;
IF volume # NIL THEN TRUSTED {
IF file.fp = volume.root.rootFile[client].fp THEN FSRootFile _ TRUE;
};
IF size > 5000 AND minRun <= 5 AND flushes > 50 THEN REJECT;  -- a big file is filling up the disk => treat the error as real
Unlock[file];
hardExtend _  (FileBackdoor.GetVolumePages[file.volume].free - startingFree) > 8 * lack AND flushes > 10 ;
flushes _ flushes + 1;
IF ~FSRootFile THEN flushedOK _ FileInternal.Flush[file.volume, lack];
SELECT TRUE FROM
FSRootFile => {
Incr[hardExtends, delta, startPulse];
minRun _ minRun / 2;
IF minRun < 2 THEN REJECT; -- never let minRun get below 2
LOOP;
};
flushedOK AND ~FSRootFile => {
IF FileBackdoor.GetVolumePages[file.volume].free >= delta AND (hardExtend OR (flushes MOD 8) = 7) THEN {
Incr[hardExtends, delta, startPulse];
minRun _ minRun / 2;
IF minRun < 2 THEN REJECT; -- never let minRun get below 2
};
LOOP;
};
~flushedOK => {
IF FileBackdoor.GetVolumePages[file.volume].free >= lack THEN {
Incr[hardExtends, delta, startPulse];
minRun _ minRun / 2;
IF minRun < 2 THEN REJECT; -- never let minRun get below 2
LOOP;
}
 ELSE REJECT;
};
ENDCASE => REJECT;
}
 ELSE {
IF NOT create -- core run-table # disk, so close the file; let DoOpen recover.
THEN { file.state _ none; FileInternal.FreeHeaderVM[file] };
Unlock[file];
};
};
IF delta < minRun THEN minRun _ delta;
Extend[file, delta, minRun, report, VMBackingCreate];
Incr[extend, delta, startPulse];
};
delta < 0 => {
ENABLE File.Error => {
file.state _ none;
FileInternal.FreeHeaderVM[file];
Unlock[file];
};
Contract[file, FALSE, size, FALSE]; Incr[contract, -delta, startPulse];
};
ENDCASE => NULL;
};
Unlock[file];
EXIT
ENDLOOP;
};

Info: PUBLIC PROC [file: Handle] RETURNS [volume: File.Volume, fp: File.FP, size: File.PageCount] = {
Acquire[file, shared];
{
ENABLE File.Error => Unlock[file];
volume _ file.volume; fp _ file.fp; size _ file.size;
};
Unlock[file];
};

SetRoot: PUBLIC PROC [root: File.VolumeFile, file: Handle, page: File.PageNumber _ [0]] = TRUSTED {
Acquire[file, shared];
{
ENABLE File.Error => Unlock[file];
id: RelID;
firstLink: DiskFace.DontCare;
channel: Disk.Channel;
[id, firstLink, channel] _ MakeBootable[file, page];
FileInternal.RecordRootFile[file.volume, root, file.fp, page, id, firstLink, channel];
};
Unlock[file];
{
ENABLE File.Error => CONTINUE;
IF root = VM
AND File.SystemVolume[] # NIL
AND FileBackdoor.IsDebugger[File.SystemVolume[]] = FileBackdoor.IsDebugger[file.volume]
THEN RegisterVMFile[file];
};
};

GetFileLocation: PUBLIC PROC [file: Handle, firstPage: File.PageNumber] RETURNS [location: BootFile.Location] = TRUSTED {
Acquire[file, shared];
{
ENABLE File.Error => Unlock[file];
id: RelID;
firstLink: DiskFace.DontCare;
channel: Disk.Channel;
[id, firstLink, channel] _ MakeBootable[file, firstPage];
location.diskFileID _ [fID: [rel[id]], firstPage: firstPage, firstLink: firstLink];
[type: location.deviceType, ordinal: location.deviceOrdinal] _
Disk.DriveAttributes[channel];
};
Unlock[file];
};

Read: PUBLIC UNSAFE PROC [file: Handle, from: File.PageNumber, nPages: File.PageCount, to: LONG POINTER] = {
IF from < 0 THEN ERROR File.Error[unknownPage, from];
Acquire[file, shared];
{
ENABLE File.Error => Unlock[file];
startPulse: FileStats.Pulses = GetPulses[];
FileInternal.Transfer[file: file, data: to, filePage: from, nPages: nPages, action: read, where: data];
Incr[read, nPages, startPulse];
};
Unlock[file];
};

Write: PUBLIC PROC [file: Handle, to: File.PageNumber, nPages: File.PageCount, from: LONG POINTER] = {
IF to < 0 THEN ERROR File.Error[unknownPage, to];
Acquire[file, shared];
{
ENABLE File.Error => Unlock[file];
startPulse: FileStats.Pulses = GetPulses[];
FileInternal.Transfer[file: file, data: from, filePage: to, nPages: nPages, action: write, where: data];
Incr[write, nPages, startPulse];
};
Unlock[file];
};

GetProperties: PUBLIC PROC [file: Handle] RETURNS [prop: File.PropertyStorage, nPages: File.PageCount] = {
Acquire[file, shared];
{
ENABLE File.Error => Unlock[file];
prop _ file.properties;
nPages _ file.propertyPages;
};
Unlock[file];
};

WriteProperties: PUBLIC PROC [file: Handle] = TRUSTED {
Acquire[file, shared];
{
ENABLE File.Error => Unlock[file];
FileInternal.Transfer[file: file, data: file.properties,
filePage: [file.logicalRunTable.headerPages-file.propertyPages],
nPages: file.propertyPages, action: write, where: header]
};
Unlock[file];
};

}.

����FileImpl.mesa - per-file operations, locking file header data structures
Copyright c 1985 by Xerox Corporation.  All rights reserved.
Andrew Birrell  December 8, 1983 9:51 am
Levin, August 8, 1983 5:57 pm
Schroeder, June 10, 1983 5:20 pm
Bob Hagmann, January 9, 1986 4:05:49 pm PST
Russ Atkinson (RRA) May 7, 1985 3:48:32 pm PDT

Several different modules export various of the below types.  I suspect it is because of the glitches in opaque types, but I don't really know. - rbh
******** Data Types and minor subroutines ********
Header and Run Conversion
exported to File
indicates data is bad but label is ok
exported to FileInternal
******** Header and Run-table management ********
Avoids page faults, which would take longer to handle but have the same effect.
initialise only
initialise from old tables
exported to FileInternal
exported to FileInternal
exported to File
exported to FileInternal
exported to FileInternal
Assumes file.runTable.nRuns > 0
******** Some Subroutines for access to file pages.  Others in FilePagesImpl ********
Write boot-chain link at "thisDiskPage" for current "filePage"
At top of loop, thisDiskPage, thisSize and filePage correspond to the start of a run
Here, thisDiskPage, thisSize and filePage correspond to the last page of the file
NOTE: on entry for creation, file.size = -(number of header pages) and file.fp = nullFP.
the next four variables are only used when allocating the VM Backing File
get a empty logical run so that we can find out the channel
logicalRun _ FileInternal.Alloc[volume: volume, first: 0, size: 0, min: 10 ];
[channel: channel] _ FileInternal.TranslateLogicalRun[logicalRun, volume];
Cylinder changed before head.  This is an Alto Environment compatible disk such as currently used on the Dorado.
Compute the number of platters to use.  This is done by finding the number of completely free platters, and dividing the backing file evenly between them.  The last platter is allowed to have a little bit on the end already allocated (an eighth of the platter). 
Head changed before cylinder.  Disk is organized in a more standard manner, as on a DLion.  Put the backing file in the center of the largest subVolume.
disk is already fragmented - give up
Start of body for Extend
Record the safe length of the file in the disk run table
Loop for each allocated disk run
Loop for each available fragment of disk run --
Ensuing transfer will write the run table to disk
Ensure disk run-table is superset of allocated pages
write labels and data for header area
i.e. if there's still pages free and we've finished the header
correct run-table to number of pages successfully written --
skip bad/in-use page
Write new length and "unstable" in disk run table in case of crash
Got the whole header, but is page 1 the correct page for the run specified in the header?
Normally, this IF is false because almost all files have exactly two header pages and they are allocated together.  Hence, they have already been read.
read in the remaining header pages
get a new VM buffer, and copy data read above into it.  This time we know that we will allocate a big enough chunk of VM
We set countDone to one because we have to rely on header page 0 to discriminate between possible duplicate sets of header pages 1..n caused by a crash.  If the header is more than 2 pages, and we read two here ok, the second is almost certainly bogus anyway.
Loop to read in all the header pages.  The normal case is that this will do one Transfer, but in case the header cannot all fit in one page of runs (currently not supported) we do a loop.
read the more of the header
exported to FileInternal
exported to FileInternal
Called instead of Lock to re-open file if it was invalidated by a checkpoint
File is in table but has not yet been opened.  We try it, under the file's exclusive lock
exported to FileInternal
Wrinkle: if file.state=none, we need exclusive access, so we can call DoOpen
exported to FileInternal
******** Scanning for files (File.NextFile) ********

exported to FileBackdoor
[status: Disk.Status, da: VolumeFormat.LogicalPage, label: POINTER TO Disk.Label, diskPage: INT] RETURNS [exit: BOOL _ FALSE];
exported to FileInternal
Accesses only immutable fields of the volume
******** Statistics ********
exported to FileStats
exported to FileStats
exported to FileStats
******** Top-level procedures ********
exported to File
exported to FileBackdoor
exported to File
exported to File
exported to File
We know that the disk run-table matches the in-core one (???).
Prevent flushes when extending the FS BTree -  or we will deadlock
Flusher succeeded
Notes: FS flusher will flush at most one file per Flush request!  It ignores lack completely
LogicalVolumeImpl.Alloc will not even try to allocate if the size is bigger than free on the volume - it ignores minRun completely in this case.
	can't flush, but enough room so go for it
-- old code below
IF ~FSRootFile AND flushedOK AND ~hardExtend AND flushes < 8 THEN {  -- Flusher succeeded
	Note: FS flusher will flush at most one file per Flush request!  It ignores lack completely
LOOP;
}
 ELSE {  -- can't flush anymore
IF FileBackdoor.GetVolumePages[file.volume].free >= lack THEN {
Incr[hardExtends, delta, startPulse];
minRun _ minRun / 2;
IF minRun < 2 THEN REJECT; -- never let minRun get below 2
LOOP;
};
};
Otherwise, let the error propagate to our client
core run-table # disk, so close the file.  DoOpen will try to recover.
exported to File
exported to FileBackdoor
exported to FileInternal
exported to File
exported to File
exported to File
exported to File
Bob Hagmann January 31, 1985 12:09:12 pm PST
Cedar 6.0 conversion
changes to: DIRECTORY, GetProperties, Reporter 
Bob Hagmann March 19, 1985 9:30:58 am PST
changes to: RunsToHeaderPages, HeaderPagesToRuns, DoPinnedIO, CheckStatus, GetHeaderVM, WriteRunTable, FreeHeaderVM, TranslateLogicalRunTable
Russ Atkinson (RRA) May 7, 1985 3:47:44 pm PDT
Added call to VM.SwapIn after call to VM.Allocate to reduce bogus page faults
Bob Hagmann May 9, 1985 4:38:40 pm PDT
changes to: GetHeaderVM, DIRECTORY, InnerSetSize
Bob Hagmann May 28, 1985 4:15:30 pm PDT
changes to: GetHeaderVM, DoOpen
Bob Hagmann October 4, 1985 2:51:57 pm PDT
prevent flushes from occuring during extend of FS.BTree; insure minrun is not bigger than delta in InnerSetSize
changes to: InnerSetSize
Bob Hagmann October 29, 1985 7:40:45 am PST
chnage flushing logic to flush (while flushing works) for a file that is bigger than free
changes to: InnerSetSize
Bob Hagmann January 9, 1986 4:05:28 pm PST
Fixes to bad status reporting
changes to: WriteLink (local of MakeBootable)
Bob Hagmann January 20, 1986 5:39:46 pm PST
Make first try to read the header pages in DoOpen only do one try to avoid lots of re-tries if the header run is only one page.  Fix computation of minRun in InnerSetSize: it used to be able to get a 0 minRun for small files.
changes to: DoOpen, InnerSetSize

Ê):��˜�codešœH™HKšœ
Ïmœ1™<Kšœ(™(K™K™ K™+K™.—K™�šÏk	˜	Kšœ	žœ
˜Kšœžœ±˜»Kšœ	žœA˜OKšœžœ	žœIžœ-˜Kšœ
žœ˜0K˜
Kšœ
žœ˜&Kšœžœ˜2Kšœžœ
˜ KšœžœR˜_Kšœžœ˜&Kšœ
žœ‹˜Kšžœžœ‹˜“Kšœ
žœQ˜`—K˜�šœ
žœžœžœ!˜>KšžœZžœ˜nKšžœ	Ïcœ-˜WKšžœ	˜K˜�—˜�KšœžœžœŸ)˜SK˜�˜�K™•—K˜�—Kšœ2™2˜�KšŸ
œžœžœ˜?K˜�KšŸ
œžœžœ˜5K˜�KšŸ
œžœžœ˜5K˜�KšŸ	žœžœžœ˜4K˜�Kšœžœžœ˜K˜�KšŸ	œžœžœ˜3K˜�Kšœžœžœ˜ K˜�KšŸ	œžœžœ˜?K˜�Kšœ
žœ˜'K˜�Kšœžœ˜0K˜�Kšœžœ˜.K˜�Kšœ
žœ˜-K˜�Kšœ(žœžœŸ˜OK˜�Kšœ
žœ˜*K˜�Kšœžœ˜K˜�—K™˜�š
Ïnœžœžœžœžœ+˜bKšœžœžœW˜‹K˜—K˜�š
 œžœžœ(žœžœ˜bšžœ$žœ%˜SKšžœ˜—K˜—K˜�K˜�š 
œžœžœ žœžœžœžœžœ4žœ˜°šœ
žœ
˜Kšœžœ ˜(šœžœ˜Kšœžœžœžœ˜I——Kšžœ>žœ˜FKšœ6žœžœ˜UKšžœ˜Kšœ˜—K˜�š	œžœžœžœžœ˜<KšŸ™—K˜�šœ1˜1Kšœ%™%—K˜�š œžœžœ!žœ˜AKšŸ™Kšœ
žœ(˜4Kšžœ
žœžœ˜1Kšœ˜K˜�—K˜�—Kšœ1™1˜�š œžœžœžœžœ	žœ˜aKšœžœžœ˜$Kšœžœ˜+Kšœ6˜6Kšœžœžœžœ6˜QKšœžœžœ&˜?Kšœžœžœ˜5Kšœ	žœžœ.˜?KšœF˜FKšœ
žœžœ˜-K˜�šžœ˜KšœO™O—Kšœžœ%˜7KšœŸ)˜GKšœžœ8˜RKšœžœ˜/KšœE˜EK˜$šžœ	ž˜šžœ˜Kšœ™Kš
œžœžœžœžœžœžœ˜?Kšœ<˜<Kšœžœ˜!Kšœžœ˜Kšœ˜—šžœ˜Kšœ™Kšœ
žœ˜)Kš	œžœžœžœžœ+˜SšœC˜CKšœ˜—šœH˜HKšœ˜—Kšžœžœ1˜<Kšœ˜——Kšœ˜Kšœ#˜#Kšœ˜—K˜�š œžœžœžœ˜4KšŸ™šžœž˜Kšžœžœ;˜F—Kšœ˜Kšœžœ˜Kšœžœ˜Kšœž˜Kšœ˜—K˜�š œžœžœžœžœžœžœ˜uKšŸ™Kšœžœ˜K˜@šžœžœ#ž˜1KšžœAžœžœ˜MKš
žœžœžœ
žœžœ˜EKšžœ˜—Kšžœžœžœ Ÿ
˜FKšžœžœ˜5K˜šžœžœžœž˜ Kšžœ=žœžœ˜IšœJ˜JKšœG˜G—Kšœ%˜%Kšœ5˜5Kšžœ˜—K˜$K˜6Kšžœ˜"Kšœ˜—K˜�š œžœžœ*žœ˜QKšŸœ™Kšœ
žœ˜#Kšžœ7žœ˜VKšœN˜NKšœ!˜!K˜K˜�—š œžœžœžœžœžœ˜kKšŸ™Kšœžœ"˜6KšœL˜LKšœ*˜*KšœB˜BK˜—K˜�K˜�š
 œžœžœžœžœ˜ZKšŸ™Kšœ™KšœO˜OKšžœ$˜*Kšœ˜—K˜�K˜�K˜�KšœU™UK˜�š œžœ+žœ˜JKšœ,žœ˜AKšœo˜oKšœ˜—K˜�š œžœžœ˜/Kšœ,žœ˜3Kšœo˜oKšœ˜—K˜�š œžœ,žœE˜ŠKšœžœžœŸ!˜NKšœžœžœ˜Kšœ4˜4K˜Kšœ&˜&K˜š 	œžœ˜-Kšœ>™>Kšœ˜Kšœ˜K˜˜K˜K˜Kšœžœ˜K˜5K˜—Kšžœ;Ÿ˜MKšœ.ŸœŸ=˜~K˜KšœŸ#˜@KšžœY˜`Kšœ,˜,Kšœ˜—K˜Kšžœžœžœ$˜Hšœ<˜<K˜[—Kšœ9˜9K˜K˜%šžœ˜Kšžœžœ'˜4KšœT™TKšœ˜K˜K˜Kšœ Ÿ(˜HKšœ.Ÿ,˜ZKšžœžœžœ˜#šœ@˜@K˜[—šžœžœ&ž˜0Kšžœ(˜-—K˜:K˜1Kšœ˜Kšžœ˜—KšœQ™QK˜Kšœ#˜#Kšœ˜—K˜�K˜�Kš œžœžœ
žœ7˜[K˜�KšœžœžœŸ˜6K˜�š
 œžœžœ%žœžœ˜aKšœX™XKšœ"˜"Kšœžœ	˜šœ#Ÿ!˜DKšžœžœžœ$˜L—Kšœ
žœžœŸ,˜WKšœ7˜7Kšœ˜šœžœ˜K™I—Kšœ
žœ˜Kšœžœ˜Kšœ
žœ˜Kšœžœ˜K˜�š œžœ˜%Kšœ˜Kšœ(˜(Kšœ-˜-Kšœ%˜%Kšœ#˜#Kšœžœ˜Kšœ+˜+Kšœžœ˜K˜�Kšœh˜hšžœžœ˜K™;KšœM™MKšœJ™JKšœ˜Kšœ€˜€šž˜KšœF˜FKšœžœ˜*šžœžœ˜#Kšœp™pKšœžœ˜Kšœžœ˜Kšœ%žœ˜*Kšœ$žœ˜*K˜�KšœN˜Nšœ0˜0K™†—Kšœ|˜|Kšœp˜pKšœ€˜€šžœ˜šžœ˜Kšœ
˜
Kšœ˜Kšœ˜Kšœ˜Kšžœ˜K˜—šžœ˜Kšœ˜Kšœ*˜*KšœP˜PKšžœ˜K˜——K˜—šžœžœ˜K™˜Kšœ6˜6Kšœ˜Kšœ˜Kšœ˜Kšžœ˜K˜—Kšœ&˜&Kšžœ˜—K˜—šœžœ˜K™$Kšœ˜Kšœ˜Kšœ˜Kšœ
˜
K˜—K˜K˜�Kšœ™—šžœžœ˜Kšœ(˜(Kšœ˜KšœŸ(˜+—Kšœ8™8Kšžœ.žœ˜NKšžœžœ˜3šžœž˜Kšœ ™ Kšœ%˜%Kšœžœ˜šžœžœ˜šžœžœ˜Kšœ˜Kšœ˜K˜—šœžœ˜Kšœ,˜,Kšœ˜Kšœ˜Kšœ˜K˜—Kšžœžœ˜-K˜—Kšœžœ˜Kšžœžœ!Ÿ˜NKšœ[˜[Kšœ/Ÿ!˜Pšžœž˜Kšœ/™/KšœŸ3˜MK˜Kšœ˜Kšœ$Ÿ%˜IKšœS˜SKšœ˜K˜&šžœ˜KšœT˜T—Kšžœžœ<Ÿ
˜ašžœžœ˜Kšœ#Ÿ#˜Fš œžœ˜K˜'K˜/K˜!K˜5K˜3Kšœ˜—Kšœžœ˜0Kšžœ>˜Ešžœžœ˜Kšžœžœžœ(˜DKšœA˜AK˜*Kšžœ
žœžœ6˜JKšœ1™1Kšœ˜—šœžœ˜Kšœ4™4Kšœ!˜!Kšœ˜—šžœžœ˜Kšœ%™%šžœA˜HKšœžœ.˜?—K˜
šžœŸ@˜Qšžœžœ˜KšœC˜C—šžœ˜Kšœ(˜(K˜Kšœ˜——Kšœ˜—šžœžœžœ˜5Kšœ>™>šžœA˜HKšœ7žœ˜E—K˜
Kšœ˜—Kšœ<™<šžœž˜ Kšœ>˜>—Kšœ˜—šžœž˜Kšœžœ˜Kšœžœ6˜Kšžœ˜Kšœ™K˜(K˜&——KšžŸ2œ˜:—KšžŸ&œ˜.—K˜Kšœ˜—K˜�š œžœžœŸœžœžœ˜qKšœ	žœžœžœ6˜NKšœB™BKšžœžœ
žœ!˜5šžœžœž˜&šžœžœ˜Kš
žœžœžœžœžœ˜8Kšœ˜—šœ˜Kšœ˜Kšœ
žœžœžœ˜&KšœB˜Bšœ%˜%šžœ˜	šžœžœ
žœ˜"KšžœŸ*žœ˜KKšžœŸ+œ˜<—Kšžœžœ"˜*——Kšœ!˜!šžœ	˜šžœ˜šžœ˜šžœ˜Kšœ(˜(Kšœ˜Kšœ˜—šžœ˜Kšœ*˜*Kšœ1˜1Kšœ˜——Kšœ˜Kšœ˜—Kšžœžœ˜—K˜0šœ˜KšœY˜Y—Kšœ˜—Kšžœ˜—šžœžœžœ˜Kšœžœ˜&Kšœ!˜!Kšœ˜—Kšœ˜—K˜�Kšœžœ˜K˜�š œžœžœ˜'Kšœ"˜"K˜K˜)šœŸM˜OKšœžœ$˜5KšœN˜NK˜Kšœ˜K˜K˜K˜K˜KKšœ*˜*˜K˜K˜Kšœžœ˜K˜5Kšœ˜Kšœ
Ÿ’˜œ—Kšœ˜Kšžœ:˜AK˜šžœžœ˜Kšžœ<Ÿ˜\šžœžœ˜šžœ˜Kšžœžœ,˜6Kšžœ)˜-—Kšœ˜—K˜—Kšœ@˜@Kšœ"˜"KšœF˜FKšœ,˜,˜�K™Y—šžœ.žœ˜6Kšœ$˜$Kšœ8˜8šœ7˜7Kšœ.˜.Kšœ
˜
Kšœ˜—Kšžœ,žœ˜BK˜—˜�KšœÏsœ†™——šžœ.žœ˜6Kšœ"™"šœžœ ˜6KšŸx™x—šœU˜UKšœƒ™ƒ—šœ˜Kšœ»™»—šžœ.ž˜5Kšœžœ9ŸR˜­Kšœžœ/ŸK˜œšœCžœ˜JK™—šœX˜XKšœ˜Kšœ˜Kšœ˜—Kšœ%˜%Kšžœ˜—Kšœ,˜,Kšœ9˜9K˜—Kšœ˜—šžœ)žœ˜1Kšœžœ+˜7Kšœ˜šœ˜Kšœ˜Kšœ-˜-Kšœ
žœ˜—Kšžœžœžœ%˜QKšœ˜—Kšœ˜šœ˜K˜�——Kšœžœžœ˜!K˜�š
 œžœžœžœžœ˜3Kšžœžœžœ˜Kšœ/žœ˜4Kšœ˜—K˜�š œžœžœ˜.KšŸ™K˜šœ˜Kšžœ˜"Kšœ4˜4Kšžœžœ˜Kšžœžœ<˜HKšœ˜—K˜
Kšœ˜—K˜�Kšœ
ž	œ˜K˜�š œžœžœ0˜DKšŸ™KšœL™LKšœ˜šžœžœ˜KšœY™YKšžœ˜"K˜+K˜
K˜"Kšœ˜—Kšœ˜—K˜�š œžœžœžœ0˜GKšŸ™Kšžœžœžœ˜Kšžœžœžœžœžœžœ˜=KšœL™Lšžœžœžœžœžœžœ˜JKšžœžœ˜&Kš
žœžœŸœžœ žœ˜UKš
žœžœŸ
œžœžœ˜EKšžœ
˜—Kšžœ˜Kšœ˜—K˜�š œžœžœžœ˜,KšŸ™šžœž˜Kšœ#˜#Kšœ#˜#Kšžœžœ˜—Kšž	œ
˜Kšœ˜K˜�—K˜�—šŸ	œ"Ÿ	™4K™�š œžœžœ"žœžœ
žœžœ˜lKšŸ™Kšœ&˜&Kšœ	žœ˜šœ(žœ˜1Kš
œ;žœžœžœžœžœžœ™~K˜$Kšœ ˜ šžœžœž˜Kšœžœ0˜QKšœžœ žœ˜D—Kšžœžœ˜Kšœ˜—Kšœ žœ˜,Kšœ ˜ Kšœ˜—˜�Kšœžœžœ˜Kšœžœ˜K˜�Kšœžœ˜š	œžœžœžœžœ˜<Kšœ˜Kšœ˜Kšœ˜Kšœ
ž˜
K˜—Kšœžœžœ˜*K˜�š œžœžœGžœžœ+žœ˜¡KšŸ™Kšœ<˜<Kšœ˜Kšœžœ,˜5Kšœ*˜*Kšœ*˜*Kšœ,˜,Kšœ
žœžœ˜Kšœ	žœžœ˜Kšœ	ž	œ˜Kšœžœ˜!Kšœ)žœ˜-Kšœ
žœžœ(˜LKšœžœžœ%žœ˜[Kšœ
žœ˜Kšœžœ˜Kšœžœžœ˜!Kšœžœ˜Kšœ	žœ˜Kšœžœ˜Kšœ	žœ˜K˜�š 	œžœžœžœ
žœžœžœ˜Lšžœžœžœ˜Kšœ˜Kšœžœ˜K˜—K˜—š
 	œžœžœ
žœžœ˜?Kšœ˜K˜—š œžœžœžœcžœžœ˜‹š œžœ6˜UKšžœ6žœ˜EKšœ,™,šžœžœžœ>žœžœž˜^šžœ$žœ3žœžœ˜hKšœ˜K˜B—K˜Kš
žœžœžœžœžœ˜=Kšžœ˜—Kšœ˜—Kšœ˜Kšœ˜šž˜Kš	œžœžœžœ	žœ
˜ZKšœžœ˜šžœžœžœ˜)KšœS˜Sšžœžœ˜Kšœžœžœ˜šžœžœžœž˜(Kšžœžœ
žœžœ˜3Kšžœ˜—Kšžœžœžœ˜K˜—K˜—šœžœ˜KšžœžœT˜jKšœ˜—Kšžœ˜Kšžœ˜—Kšœ˜—šÏb	œ$žœžœ˜=Kšœ0˜0Kšœ˜Kšžœžœ%˜EKšžœžœžœžœ˜JK˜—K˜�š œžœžœ˜Kšœžœžœ˜Kšœ0˜0šžœžœ˜š œžœžœžœ˜ Kšžœžœžœžœžœžœ˜CKšœ˜—š 
œžœžœžœ˜%Kšœ˜Kšž	œ	˜Kšœ˜—K˜Kšœ˜Kšœ&˜&K˜K˜K˜Kšœ˜Kšœ0˜0Kšžœžœžœ˜˜K˜K˜!K˜3K˜—K˜1KšžœžœŸ-˜XKšœ˜šžœžœ
žœ4˜HKšœžœ˜—K˜K˜—Kšžœ˜Kšœ
žœ˜Kšœ˜—š¢œžœ$žœ˜Bšžœ0žœ˜8Kšœ˜Kšœ"˜"K˜—K˜—šœžœ
˜ Kšœžœ2˜:Kšœžœ(˜1—Kšžœ)žœžœ˜<Kšœ)˜)Kšœ,˜,Kšœ˜Kšžœ
žœžœ
žœ˜3Kšœžœ˜šžœžœžœCžœžœž˜cKšœG˜GKšžœ˜—šœ˜šžœžœ˜Kšžœ˜Kšœ˜Kšœ&˜&Kšœ˜—Kšœžœžœ˜Kšœ
žœžœ˜ šžœžœžœž˜:šž˜Kšœ
žœ
˜Kšœžœ˜Kšžœžœžœžœžœžœžœ˜šžœžœ˜Kšœ)˜)Kšœ[˜[K˜—Kšœ˜Kšžœžœ%˜EKšžœ˜—Kš
žœ	žœžœžœžœ˜?K˜Kšžœ˜—Kšžœ
˜Kšžœ˜—Kšœ˜Kšžœ˜Kšœ˜Kšœ&˜&Kšœ˜——K˜�—Kšœ™˜�šœžœžœžœ˜8Kšžœžœžœžœ˜4—K˜�Kšœ%˜%K˜�š
 	œžœžœžœžœ˜=Kšžœ#˜)Kšœ˜—K˜�š
 œžœžœžœžœ#˜\KšŸœ	™Kšœ'˜'šœ˜Kšœ\˜\—Kšœ˜—K˜�š
 œžœžœžœžœ˜NKšŸœ	™Kšžœ˜Kšœ˜—K˜�š 	œžœžœžœ˜7KšŸœ	™Kšœ˜Kšœ˜—K˜�—šœ&™&K˜�š
 œžœžœ@žœžœ˜rKšŸ™K˜+Kšœ&Ÿ)˜Ošœ˜Kšžœžœ˜+K˜K˜Kšœ+˜+šœ-žœ˜CKšžœ#Ÿœ˜>—K˜Kšœ˜—K˜K˜Kšœ˜—K˜�š
 œžœžœ@žœžœ˜{KšŸ™K˜+Kšœ&Ÿ)˜Ošœ˜Kšžœžœ˜+K˜K˜Kšœ+˜+šœ-žœ#žœ˜ZKšžœ#Ÿœ˜>—K˜Kšœ˜—K˜K˜Kšœ˜—K˜�K˜�š
 œžœžœ žœžœ˜OKšŸ™Kšžœžœžœ˜7Kšœ'˜'K˜K˜
Kšœ˜—K˜�š œžœžœ˜&KšŸ™K˜šœ˜Kšžœ˜"K˜+Kšœžœ
˜Kšœžœžœ˜ K˜Kšœ ˜ Kšœ˜—K˜
Kšœ˜—K˜�š œžœžœ)˜=KšŸ™Kšœ-žœ
žœ˜AKšœ˜—K˜�KšœžœŸ4˜IK˜�š
 œžœ.žœ%žœžœ˜{Kšœ	žœ˜JšœžœŸ3˜LJš
œžœžœžœ1žœŸ6˜šž˜Kšžœžœžœ˜Cšœ˜K˜+Kšœžœ˜šžœžœž˜šœ˜šžœ˜JšœžœžœŸ3˜\Jšžœžœ>˜Wšžœžœ˜Jšœžœ˜Jšœžœ˜Jšœžœžœ˜Jšœ˜Jšœ>™>šžœ
žœžœžœ˜J™BJšžœ+žœžœ˜DJšœ˜—Jšžœ
žœ
žœžœžœŸ?˜}Jšœ
˜
JšœXžœ˜jJšœ˜Jšžœ
žœ3˜Fšžœžœž˜šœ˜Jšœ%˜%Jšœ˜JšžœžœžœŸ˜:Jšžœ˜Kšœ˜—šœ
žœ˜šœ™Jšœ\™\J™—š
žœ8žœ
žœ
žœ	žœ˜hJšœ%˜%Jšœ˜JšžœžœžœŸ˜:Jšœ˜—Jšžœ˜K˜—šœ˜šžœ7žœ˜?Kšœ*™*Jšœ%˜%Jšœ˜JšžœžœžœŸ˜:Jšžœ˜K˜—Kšœžœžœ˜
K˜—Kšžœžœ˜—J™šžœ
žœžœ
žœ
žœŸ™YJšœ\™\Jšžœ™J™—šœžœŸ™šžœ7žœ™?Jšœ%™%Jšœ™JšžœžœžœŸ™:Jšžœ™J™—J™—Jšœ˜—šœžœ˜JšžœžœŸ@˜NJšžœ8˜<Jšœ
˜
Jšœ˜—Jšœ0™0Jšœ˜—Kšžœžœ˜&Kšœ5˜5Kšœ ˜ Kšœ˜—šœ˜šžœ˜KšœF™FKšœ˜Kšœ ˜ Kšœ
˜
Kšœ˜—Kšœžœžœ&˜GKšœ˜—Kšžœžœ˜—Kšœ˜—K˜
Kšž˜Kšžœ˜—Kšœ˜—K˜�š
 œžœžœžœ žœ˜eKšŸ™K˜šœ˜Kšžœ˜"Kšœ5˜5Kšœ˜—K˜
Kšœ˜—K˜�š œžœžœFžœ˜cKšŸ™K˜šœ˜Kšžœ˜"Kšœ
˜
Kšœ˜Kšœ˜Kšœ4˜4KšœV˜VKšœ˜—K˜
šœ˜Kšžœžœ˜šžœž˜Kšžœž˜KšžœT˜WKšžœ˜—Kšœ˜—Kšœ˜—K˜�š
 œžœžœ,žœ!žœ˜yKšŸ™K˜šœ˜Kšžœ˜"Kšœ
˜
Kšœ˜K˜Kšœ9˜9KšœS˜Sšœ>˜>Kšœ˜—Kšœ˜—K˜
Kšœ˜—K˜�š œžœžœžœCžœžœ˜lKšŸ™Kšžœ
žœžœ˜5K˜šœ˜Kšžœ˜"K˜+K˜gK˜Kšœ˜—K˜
Kšœ˜—K˜�š
 œžœžœCžœžœ˜fKšŸ™Kšžœžœžœ˜1K˜šœ˜Kšžœ˜"K˜+K˜hK˜ Kšœ˜—K˜
Kšœ˜—K˜�š 
œžœžœžœ9˜jKšŸ™K˜šœ˜Kšžœ˜"Kšœ˜Kšœ˜Kšœ˜—K˜
Kšœ˜—K˜�š œžœžœžœ˜7KšŸ™K˜šœ˜Kšžœ˜"šœ8˜8Kšœ@˜@Kšœ9˜9—Kšœ˜—K˜
Kšœ˜—K˜�—Kšœ˜K˜�™,K™Kšœ/™/—™)KšœÏr™—™.K™M—™&Kšœ£$™0—™'Kšœ£™—™*Kšœc£™oKšœ£™—™+K™YKšœ£™—™*K™Kšœ£	œ™-—™+Kšœá™áKšœ£™ —K™�—�…—����J��ѐ��