DIRECTORY Basics USING [bytesPerWord, LongNumber, BITAND, LowHalf, UnsafeBlock], BasicTime USING [GMT], FileStream USING [FinalizationProc, SetLength], FileStreamPrivate USING[ Data, FSDataHandle, BufferNodeHandle, BufferNode, DoFinalization, FileDataHandle, FileData, NodeStatus, ProcHandle, ProcHandleFromAccessRights, SetupBuffer, StartRequest ], FS USING [ByteCount, BytesForPages, Close, Error, ErrorDesc, ExtendFileProc, GetInfo, GetName, InitialPosition, Lock, nullOpenFile, OpenFile, Read, SetByteCountAndCreatedTime, SetPageCount, StreamBufferParms, StreamOptions, Write], FSBackdoor USING [ProduceError], FSLock USING [RecordREF, RemoveREF ], IO USING [Close, CreateStream, CreateStreamProcs, EndOfStream, Error, GetChar, GetIndex, GetLength, SetIndex, STREAM, StreamProcs], IOUtils USING [closedStreamProcs, StoreData], PrincOps USING [ByteBltBlock], PrincOpsUtils USING [ByteBlt], Process USING [GetPriority, Pause, Priority, priorityForeground, SetPriority], Rope USING [ROPE], RuntimeError USING [BoundsFault], SafeStorage USING [EnableFinalization, FinalizationQueue, FQNext], VM USING [ AddressForPageNumber, Allocate, BytesForPages, CantAllocate, Free, Interval, MakeChanged, MakeUnchanged, PageNumber, SwapIn]; FileStreamImpl: CEDAR MONITOR LOCKS fileData.lockRecord USING fileData: FileDataHandle IMPORTS Basics, FileStream, FileStreamPrivate, FS, FSBackdoor, FSLock, PrincOpsUtils, IO, IOUtils, Process, RuntimeError, SafeStorage, VM EXPORTS FileStreamPrivate, FileStream = BEGIN OPEN Basics, Rope; STREAM: TYPE = IO.STREAM; ByteCount: TYPE = INT; ByteNumber: TYPE = ByteCount; -- index rather than count PageNumber: TYPE = VM.PageNumber; PageCount: TYPE = VM.PageNumber; bytesPerFilePage: CARDINAL = FS.BytesForPages[1]; maxVMPagesPerBuffer: INT = 65536/VM.BytesForPages[pages: 1] ; clearLowBits: CARDINAL = CARDINAL.LAST-(bytesPerFilePage-1); minFileExtend: INT = 10*bytesPerFilePage; Data: TYPE = FileStreamPrivate.Data; FSDataHandle: TYPE = FileStreamPrivate.FSDataHandle; BufferNode: TYPE = FileStreamPrivate.BufferNode; BufferNodeHandle: TYPE = FileStreamPrivate.BufferNodeHandle; FileDataHandle: TYPE = FileStreamPrivate.FileDataHandle; FileData: TYPE = FileStreamPrivate.FileData; ProcHandle: TYPE = FileStreamPrivate.ProcHandle; CleanupAfterPut: ENTRY PROC [fileData: FileDataHandle, selfData: FSDataHandle] = INLINE { currentNode: BufferNodeHandle = selfData.currentNode; IF currentNode.didPut THEN { currentNode.bufferDirty _ TRUE; IF selfData.index > currentNode.dataBytesInBuffer THEN { currentNode.dataBytesInBuffer _ selfData.index; fileData.fileLength _ currentNode.firstFileByteInBuffer + selfData.index; }; currentNode.didPut _ FALSE; }; }; EstablishFileLength: ENTRY PROC[fileData: FileDataHandle ] RETURNS [fileLength: INT] = INLINE { ENABLE UNWIND => NULL; writeData: FSDataHandle _ fileData.writeStreamData; IF writeData = NIL OR writeData.streamIsClosed THEN { writeData _ NIL ; RETURN[fileData.fileLength] ; } ELSE { writeNode: BufferNodeHandle = writeData.currentNode ; writeNode.dataBytesInBuffer _ MAX[ writeData.index, writeNode.dataBytesInBuffer] ; fileLength _ MAX[ fileData.fileLength, writeNode.firstFileByteInBuffer + writeNode.dataBytesInBuffer ] ; fileData.fileLength _ fileLength; }; writeData _ NIL ; }; convertFStoIOError: PROC [self: STREAM, error: FS.ErrorDesc] = INLINE { selfData: FSDataHandle _ NARROW[self.streamData]; IF selfData # NIL THEN { selfData.FSErrorDesc _ error ; IF selfData.ConvertFStoIOErrors THEN IO.Error[$Failure, self]; } ELSE { IO.Error[$Failure, self]; }; }; GetChar: PUBLIC PROC [self: STREAM] RETURNS [CHAR] = { ENABLE FS.Error => { convertFStoIOError [self, error]; }; selfData: FSDataHandle _ NARROW[self.streamData]; node: BufferNodeHandle _ selfData.currentNode ; c: CHAR; fileLength: INT ; IF bufferBad[node.status] THEN IO.Error[$Failure, self]; IF selfData.index >= node.dataBytesInBuffer THEN { fileLength _ EstablishFileLength[fileData: selfData.fileData ] ; IF fileLength <= selfData.index+node.firstFileByteInBuffer THEN ERROR IO.EndOfStream[self]; IF selfData.index = node.bufferBytes THEN node _ AdvanceBuffer[selfData] }; TRUSTED{c _ node.buffer[selfData.index]}; selfData.index _ selfData.index + 1; selfData _ NIL ; RETURN[c] ; }; PutChar: PUBLIC PROC [self: STREAM, char: CHAR] = { ENABLE FS.Error => { convertFStoIOError [self, error]; }; selfData: FSDataHandle _ NARROW[self.streamData]; node: BufferNodeHandle _ selfData.currentNode ; IF selfData.index = node.bufferBytes THEN node _ AdvanceBuffer[selfData]; IF bufferBad[node.status] THEN IO.Error[$Failure, self]; TRUSTED{node.buffer[selfData.index] _ char}; selfData.index _ selfData.index + 1; node.didPut _ TRUE ; selfData _ NIL ; }; AddNAT: PROC [a, b: NAT] RETURNS [NAT] = INLINE { RETURN [MIN[CARDINAL[a]+CARDINAL[b], NAT.LAST]]; }; GetBlock: PUBLIC PROC [self: STREAM, block: REF TEXT, startIndex: NAT, count: NAT] RETURNS [nBytesRead: NAT] = TRUSTED { ENABLE FS.Error => { convertFStoIOError [self, error]; }; selfData: FSDataHandle _ NARROW[self.streamData]; textBlock: PrincOps.ByteBltBlock; countRemaining: NAT; stopIndexPlusOne: NAT = MIN [block.maxLength, AddNAT[startIndex, count]]; IF bufferBad[selfData.currentNode.status] THEN IO.Error[$Failure, self]; textBlock _ [ blockPointer: LOOPHOLE[block, LONG POINTER] + TEXT[0].SIZE, startIndex: startIndex, stopIndexPlusOne: stopIndexPlusOne]; countRemaining _ IF startIndex > stopIndexPlusOne THEN 0 ELSE stopIndexPlusOne-startIndex; nBytesRead _ 0; WHILE countRemaining # 0 DO bufferBlock: PrincOps.ByteBltBlock _ [ blockPointer: selfData.currentNode.buffer, startIndex: selfData.index, stopIndexPlusOne: selfData.currentNode.dataBytesInBuffer]; countTransferred: CARDINAL _ 0; IF bufferBlock.startIndex < bufferBlock.stopIndexPlusOne THEN countTransferred _ PrincOpsUtils.ByteBlt[from: bufferBlock, to: textBlock]; selfData.index _ selfData.index + countTransferred; nBytesRead _ nBytesRead + countTransferred; IF (countRemaining _ countRemaining - countTransferred) = 0 THEN EXIT; IF EstablishFileLength[fileData: selfData.fileData ] <= selfData.index + selfData.currentNode.firstFileByteInBuffer THEN EXIT; textBlock.startIndex _ textBlock.startIndex + countTransferred; IF selfData.index = selfData.currentNode.bufferBytes THEN [] _ AdvanceBuffer[selfData]; ENDLOOP; IF nBytesRead # 0 THEN block.length _ startIndex + nBytesRead; selfData _ NIL ; RETURN[nBytesRead] }; PutBlock: PUBLIC PROC [self: STREAM, block: REF READONLY TEXT, startIndex: NAT, count: NAT] = TRUSTED { ENABLE FS.Error => { convertFStoIOError [self, error]; }; selfData: FSDataHandle _ NARROW[self.streamData]; textBlock: PrincOps.ByteBltBlock; countRemaining: NAT; stopIndexPlusOne: NAT _ AddNAT[startIndex, count]; IF bufferBad[selfData.currentNode.status] THEN IO.Error[$Failure, self]; IF stopIndexPlusOne > block.maxLength THEN stopIndexPlusOne _ block.length; textBlock _ [ blockPointer: LOOPHOLE[block, LONG POINTER] + TEXT[0].SIZE, startIndex: startIndex, stopIndexPlusOne: stopIndexPlusOne]; countRemaining _ IF startIndex > stopIndexPlusOne THEN 0 ELSE stopIndexPlusOne-startIndex; WHILE countRemaining # 0 DO bufferBlock: PrincOps.ByteBltBlock _ [ blockPointer: selfData.currentNode.buffer, startIndex: selfData.index, stopIndexPlusOne: selfData.currentNode.bufferBytes]; -- allow put past current eof. countTransferred: CARDINAL _ PrincOpsUtils.ByteBlt[from: textBlock, to: bufferBlock]; selfData.index _ selfData.index + countTransferred; selfData.currentNode.didPut _ TRUE; IF (countRemaining _ countRemaining - countTransferred) = 0 THEN EXIT; textBlock.startIndex _ textBlock.startIndex + countTransferred; [] _ AdvanceBuffer[selfData]; ENDLOOP; selfData _ NIL ; }; maxWordsMoved: INT = (LAST[CARDINAL] / bytesPerWord) - 1; maxBytesMoved: INT = maxWordsMoved * bytesPerWord; maxStopIndexPlusOne: INT = maxBytesMoved ; UnsafeGetBlock: PUBLIC UNSAFE PROC [self: STREAM, block: UnsafeBlock] RETURNS [nBytesRead: INT] = UNCHECKED { ENABLE FS.Error => { convertFStoIOError [self, error]; }; selfData: FSDataHandle _ NARROW[self.streamData]; textBlock: PrincOps.ByteBltBlock; stopIndexPlusOne: INT; IF bufferBad[selfData.currentNode.status] THEN IO.Error[$Failure, self]; IF block.startIndex < 0 OR block.count < 0 THEN ERROR RuntimeError.BoundsFault; IF block.count = 0 THEN { selfData _ NIL ; RETURN [0]; }; IF block.startIndex > maxBytesMoved THEN { wordOffset: INT = block.startIndex / bytesPerWord; block.base _ block.base + wordOffset; block.startIndex _ block.startIndex - wordOffset*bytesPerWord; }; stopIndexPlusOne _ block.startIndex + block.count; nBytesRead _ 0; DO countRemaining: CARDINAL; textBlock _ [ blockPointer: block.base, startIndex: block.startIndex, stopIndexPlusOne: MIN[maxStopIndexPlusOne, stopIndexPlusOne]]; countRemaining _ textBlock.stopIndexPlusOne - textBlock.startIndex; DO bufferBlock: PrincOps.ByteBltBlock _ [ blockPointer: selfData.currentNode.buffer, startIndex: selfData.index, stopIndexPlusOne: selfData.currentNode.dataBytesInBuffer]; countTransferred: CARDINAL _ 0; IF bufferBlock.startIndex < bufferBlock.stopIndexPlusOne THEN countTransferred _ PrincOpsUtils.ByteBlt[from: bufferBlock, to: textBlock]; selfData.index _ selfData.index + countTransferred; nBytesRead _ nBytesRead + countTransferred; IF (countRemaining _ countRemaining - countTransferred) = 0 THEN EXIT; IF EstablishFileLength[fileData: selfData.fileData ] <= selfData.index + selfData.currentNode.firstFileByteInBuffer THEN { selfData _ NIL ; GOTO return; }; textBlock.startIndex _ textBlock.startIndex + countTransferred; IF selfData.index = selfData.currentNode.bufferBytes THEN [] _ AdvanceBuffer[selfData]; ENDLOOP; IF textBlock.stopIndexPlusOne = stopIndexPlusOne THEN { selfData _ NIL ; GOTO return; }; block.base _ block.base + maxWordsMoved; block.startIndex _ 0; stopIndexPlusOne _ stopIndexPlusOne - maxBytesMoved; ENDLOOP; EXITS return => RETURN [nBytesRead] }; UnsafePutBlock: PUBLIC PROC [self: STREAM, block: UnsafeBlock] = TRUSTED { ENABLE FS.Error => { convertFStoIOError [self, error]; }; selfData: FSDataHandle _ NARROW[self.streamData]; textBlock: PrincOps.ByteBltBlock; stopIndexPlusOne: INT; IF bufferBad[selfData.currentNode.status] THEN IO.Error[$Failure, self]; IF block.startIndex < 0 OR block.count < 0 THEN ERROR RuntimeError.BoundsFault; IF block.startIndex > maxBytesMoved THEN { wordOffset: INT = block.startIndex / bytesPerWord; block.base _ block.base + wordOffset; block.startIndex _ block.startIndex - wordOffset*bytesPerWord; }; stopIndexPlusOne _ block.startIndex + block.count; DO countRemaining: CARDINAL; textBlock _ [ blockPointer: block.base, startIndex: block.startIndex, stopIndexPlusOne: MIN[maxStopIndexPlusOne, stopIndexPlusOne]]; countRemaining _ textBlock.stopIndexPlusOne - textBlock.startIndex; DO bufferBlock: PrincOps.ByteBltBlock _ [ blockPointer: selfData.currentNode.buffer, startIndex: selfData.index, stopIndexPlusOne: selfData.currentNode.bufferBytes]; -- allow put past current eof. countTransferred: CARDINAL _ PrincOpsUtils.ByteBlt[from: textBlock, to: bufferBlock]; selfData.index _ selfData.index + countTransferred; selfData.currentNode.didPut _ TRUE; IF (countRemaining _ countRemaining - countTransferred) = 0 THEN EXIT; textBlock.startIndex _ textBlock.startIndex + countTransferred; [] _AdvanceBuffer[selfData]; ENDLOOP; IF textBlock.stopIndexPlusOne = stopIndexPlusOne THEN EXIT; block.base _ block.base + maxWordsMoved; block.startIndex _ 0; stopIndexPlusOne _ stopIndexPlusOne - maxBytesMoved; ENDLOOP ; selfData _ NIL ; }; AdvanceBuffer: PROC [fsData: FSDataHandle] RETURNS [node: BufferNodeHandle]= { fileData: FileDataHandle = fsData.fileData; firstByteOfNextPage: INT = fsData.currentNode.firstFileByteInBuffer + fsData.currentNode.bufferBytes; changeSize: BOOL _ FALSE; IF firstByteOfNextPage = maxLength THEN ERROR IO.Error[$Failure, NIL]; IF fsData.isWriteStream THEN CleanupAfterPut[fileData: fileData, selfData: fsData]; IF firstByteOfNextPage >= fileData.byteSize THEN { newSize: INT _ 0; IF fileData.extendFileProc # NIL THEN newSize _ fileData.extendFileProc[firstByteOfNextPage] ; fileData.byteSize _ IF newSize # 0 THEN MAX[newSize, firstByteOfNextPage] ELSE fileData.byteSize + MAX[minFileExtend, ((fileData.byteSize/10)/bytesPerFilePage)*bytesPerFilePage]; SetFileSize[fileData.fileHandle, fileData.byteSize] }; node _ SetupBuffer[fileData: fileData, fsData: fsData, fileByte: firstByteOfNextPage]; fsData.index _ LowHalf[firstByteOfNextPage-fsData.currentNode.firstFileByteInBuffer]; fsData _ NIL ; }; EndOf: PUBLIC PROC [self: STREAM] RETURNS[BOOL] = { selfData: FSDataHandle _ NARROW[self.streamData]; node: BufferNodeHandle _ selfData.currentNode ; IF selfData.index >= node.dataBytesInBuffer THEN { fileLength: INT = EstablishFileLength[fileData: selfData.fileData]; IF fileLength <= selfData.index+node.firstFileByteInBuffer THEN RETURN[TRUE]; }; selfData _ NIL ; RETURN[FALSE]; }; CharsAvail: PUBLIC PROC [self: STREAM, wait: BOOL] RETURNS [INT] = { RETURN[INT.LAST] }; GetIndex: PUBLIC PROC [self: STREAM] RETURNS [index: INT] = { selfData: FSDataHandle _ NARROW[self.streamData]; index _ selfData.currentNode.firstFileByteInBuffer + selfData.index ; selfData _ NIL ; }; SetIndex: PUBLIC PROC [self: STREAM, index: INT] = { ENABLE FS.Error => { convertFStoIOError [self, error]; }; fsData: FSDataHandle _ NARROW[self.streamData]; currentNode: BufferNodeHandle _ fsData.currentNode ; firstBufferByte: INT _ currentNode.firstFileByteInBuffer; fileData: FileDataHandle = fsData.fileData ; fileLength: INT ; IF index < 0 THEN ERROR IO.Error[BadIndex, self]; IF fsData.isWriteStream THEN { CleanupAfterPut[fileData: fileData, selfData: fsData]; fileLength _ fileData.fileLength; } ELSE fileLength _ EstablishFileLength[fileData: fileData ]; IF index > fileLength THEN ERROR IO.EndOfStream[self]; IF index NOT IN [firstBufferByte .. firstBufferByte+currentNode.bufferBytes) THEN { firstBufferByte _ index - (index MOD currentNode.bufferBytes); currentNode _ SetupBuffer[fileData: fileData, fsData: fsData, fileByte: firstBufferByte]; }; fsData.index _ index - firstBufferByte; fsData _ NIL ; }; Reset: PUBLIC PROC [self: STREAM] = { SetIndex[self, GetLength[self]] }; Flush: PUBLIC PROC [self: STREAM] = { ENABLE FS.Error => { convertFStoIOError [self, error]; }; fsData: FSDataHandle _ NARROW[self.streamData]; IF fsData.isWriteStream THEN ForceOut[ fsData: fsData ]; fsData _ NIL ; }; Close: PUBLIC PROC [self: STREAM, abort: BOOL] = { ENABLE FS.Error => { convertFStoIOError [self, error]; }; fsData: FSDataHandle _ NARROW[self.streamData]; IF ~abort THEN ForceOut[ fsData: fsData ! UNWIND => { fsData: FSDataHandle _ NARROW[self.streamData]; CloseFileDataForStream[fileData: fsData.fileData, fsData: fsData, forceCleanUp: TRUE]; fsData _ NIL ; self.streamData _ NIL ; self.streamProcs _ IOUtils.closedStreamProcs; }; ]; CloseFileDataForStream[fileData: fsData.fileData, fsData: fsData]; fsData _ NIL ; self.streamData _ NIL ; self.streamProcs _ IOUtils.closedStreamProcs; }; GetLength: PUBLIC PROC [self: STREAM] RETURNS [length: INT] = { selfData: FSDataHandle _ NARROW[self.streamData]; IF selfData.streamIsClosed THEN ERROR IO.Error[StreamClosed, self]; length _ EstablishFileLength[fileData: selfData.fileData ] ; selfData _ NIL ; }; clearHighBits: CARDINAL = (bytesPerFilePage-1); maxLength: INT = INT.LAST - bytesPerFilePage; SetLength: PUBLIC PROC [self: STREAM, length: INT] = { ENABLE FS.Error => { convertFStoIOError [self, error]; }; fsData: FSDataHandle _ NARROW[self.streamData]; IF fsData.streamIsClosed THEN ERROR IO.Error[StreamClosed, self]; IF length NOT IN [0 .. maxLength] THEN ERROR IO.Error[BadIndex, self]; SetLengthUnderMonitor[fileData: fsData.fileData, length: length]; IF fsData.index+fsData.currentNode.firstFileByteInBuffer > length THEN { fsData.index _ 0 ; SetIndex[self: self, index: length]; }; fsData _ NIL ; }; RoundUpToPages: PROC [bytes: INT] RETURNS [INT] = INLINE { bytes _ bytes + (bytesPerFilePage-1); LOOPHOLE[bytes, LongNumber[pair]].lo _ BITAND[LOOPHOLE[bytes, LongNumber[pair]].lo, clearLowBits]; RETURN[bytes]; }; PagesForRoundUpBytes: PROC [bytes: INT] RETURNS [INT] = INLINE { RETURN[RoundUpToPages[bytes]/bytesPerFilePage]; }; SetLengthUnderMonitor: ENTRY PROC [fileData: FileDataHandle, length: INT] = { ENABLE UNWIND => NULL; newFileBytes: INT = RoundUpToPages[length]; oldFileLength: INT ; nowNode: BufferNodeHandle _ fileData.firstBufferNode; writeData: FSDataHandle _ fileData.writeStreamData ; IF writeData # NIL AND writeData.currentNode.didPut THEN { currentNode: BufferNodeHandle = writeData.currentNode; currentNode.bufferDirty _ TRUE; currentNode.didPut _ FALSE; IF writeData.index > currentNode.dataBytesInBuffer THEN { currentNode.dataBytesInBuffer _ writeData.index; fileData.fileLength _ currentNode.firstFileByteInBuffer + writeData.index }; currentNode.didPut _ FALSE }; oldFileLength _ fileData.fileLength ; fileData.fileLength _ length; IF length < fileData.validBytesOnDisk THEN fileData.validBytesOnDisk _ length ; IF length > fileData.byteSize THEN { fileData.byteSize _ newFileBytes; SetFileSize[fileData.fileHandle, fileData.byteSize]; }; UNTIL nowNode = NIL DO IF (nowNode.status # invalid) THEN { IF (nowNode.firstFileByteInBuffer+nowNode.bufferBytes > length) THEN { IF nowNode.firstFileByteInBuffer >= length THEN { nowNode.dataBytesInBuffer _ 0; nowNode.bufferDirty _ FALSE ; nowNode.didPut _ FALSE ; } ELSE { nowNode.dataBytesInBuffer _ length - nowNode.firstFileByteInBuffer ; IF nowNode.didPut THEN { nowNode.didPut _ FALSE ; nowNode.bufferDirty _ TRUE ; }; }; } ELSE { nowNode.dataBytesInBuffer _ nowNode.bufferBytes ; }; }; nowNode _ nowNode.nextBufferNode ; ENDLOOP; writeData _ NIL ; }; EraseChar: PUBLIC PROC [self: STREAM, char: CHAR] = { index: INT = GetIndex[self]; IF index = 0 THEN ERROR IO.Error[IllegalBackup, self]; SetIndex[self, index-1]; IF GetChar[self] # char THEN {PutChar[self, '\\]; PutChar[self, char]} ELSE SetIndex[self, index-1] }; Backup: PUBLIC PROC [self: STREAM, char: CHAR] = { selfData: FSDataHandle _ NARROW[self.streamData]; index: INT; IF selfData.streamIsClosed THEN ERROR IO.Error[StreamClosed, self]; index _ GetIndex[self]; IF index = 0 THEN ERROR IO.Error[IllegalBackup, self]; SetIndex[self, index-1]; IF GetChar[self] # char THEN ERROR IO.Error[IllegalBackup, self]; SetIndex[self, index-1]; selfData _ NIL ; }; CloseFileDataForStream: ENTRY PROC [fileData: FileDataHandle, fsData: FSDataHandle, forceCleanUp: BOOL _ FALSE] = { ENABLE UNWIND => BROADCAST fileData.somethingHappened; needDeleted: INT _ IF fileData.numberOfStreams = 0 THEN INT.LAST ELSE fileData.streamBufferParms.nBuffers ; lastNode: BufferNodeHandle _ NIL ; node: BufferNodeHandle ; IF (node _ fsData.readAheadNode) # NIL THEN { WHILE node.useCount = 1 AND node.status # valid AND node.status # invalid AND node.status # needsSequentialRead DO WAIT fileData.somethingHappened ; ENDLOOP; node.useCount _ node.useCount-1 ; }; fsData.readAheadNode _ NIL; IF ~forceCleanUp AND fsData.isWriteStream AND fileData.accessRights = $write AND fileData.streamOptions[truncatePagesOnClose] THEN { SetFileSize[fileData.fileHandle, fileData.fileLength] ; }; IF (node _ fsData.currentNode) # NIL THEN node.useCount _ node.useCount-1 ; fsData.currentNode _ NIL ; node _ fileData.firstBufferNode ; UNTIL node = NIL OR needDeleted = 0 DO IF node.useCount = 0 THEN { TRUSTED{VM.Free[node.bufferInterval]}; IF fileData.firstBufferNode = node THEN fileData.firstBufferNode _ node.nextBufferNode ELSE lastNode.nextBufferNode _ node.nextBufferNode ; needDeleted _ needDeleted - 1 ; } ELSE { lastNode _ node ; }; node _ node.nextBufferNode ; ENDLOOP; fsData.streamIsClosed _ TRUE; IF (fileData.numberOfStreams _ fileData.numberOfStreams - 1) = 0 THEN { fileDataTemp: FS.OpenFile _ fileData.fileHandle ; fileData.fileHandle _ FS.nullOpenFile ; IF fileData.streamOptions[closeFSOpenFileOnClose] THEN { fileDataTemp.Close[! FS.Error => IF forceCleanUp THEN CONTINUE;]; -- let finialization close the file is we fail. }; }; BROADCAST fileData.somethingHappened ; fsData _ NIL ; }; FinishWrites: ENTRY PROC [fileData: FileDataHandle, fsData: FSDataHandle, currentNode: BufferNodeHandle, parallelWritesOK: BOOL _ FALSE] = { ENABLE UNWIND => NULL; nowNode: BufferNodeHandle _ fileData.firstBufferNode; IF fileData.accessRights = read THEN RETURN ; IF currentNode = NIL THEN { -- this is only true when called via ForceOut WHILE fileData.writeCount # 0 DO WAIT fileData.somethingHappened ; ENDLOOP; }; UNTIL nowNode = NIL DO IF nowNode # currentNode THEN { WHILE (nowNode.status = parallelReadActive) OR (nowNode.status = needsParallelRead) OR ( ~parallelWritesOK AND ((nowNode.status = parallelWriteActive) OR (nowNode.status = needsParallelWrite))) DO WAIT fileData.somethingHappened; ENDLOOP; IF (nowNode.status = needsSequentialWrite OR (currentNode = NIL AND nowNode.status # invalid)) THEN { nowNode.status _ sequentialWriteActive ; TRUSTED{WriteFilePages[f: fsData.fileData.fileHandle, node: nowNode ! UNWIND => nowNode.status _ valid] }; nowNode.bufferDirty _ FALSE ; nowNode.status _ valid ; }; }; nowNode _ nowNode.nextBufferNode ; ENDLOOP; fsData _ NIL ; }; nodeNowAvailable: ENTRY PROC [fileData: FileDataHandle] RETURNS [BOOL]= { ENABLE UNWIND => NULL; nowNode: BufferNodeHandle ; DO nowNode _ fileData.firstBufferNode; UNTIL nowNode = NIL DO IF ( nowNode.status = valid OR nowNode.status = invalid) AND nowNode.useCount = 0 THEN RETURN [TRUE]; IF nowNode.status = needsSequentialWrite THEN RETURN[FALSE]; nowNode _ nowNode.nextBufferNode ; ENDLOOP; WAIT fileData.somethingHappened; ENDLOOP; }; FinishRead: ENTRY PROC [fileData: FileDataHandle, node: BufferNodeHandle, bufferSize: INT] = INLINE { ENABLE UNWIND => NULL; node.status _ valid; node.dataBytesInBuffer _ bufferSize ; BROADCAST fileData.somethingHappened; }; FinishBadRead: ENTRY PROC [fileData: FileDataHandle, node: BufferNodeHandle] = INLINE { ENABLE UNWIND => NULL; node.status _ invalid; node.dataBytesInBuffer _ 0 ; BROADCAST fileData.somethingHappened; }; FinishBadPreRead: ENTRY PROC [fileData: FileDataHandle, node: BufferNodeHandle] = INLINE { ENABLE UNWIND => NULL; IF (fileData.firstReadStream # NIL AND fileData.firstReadStream.currentNode = node) OR (fileData.writeStreamData # NIL AND fileData.writeStreamData.currentNode = node) THEN { node.status _ needsSequentialRead; node.dataBytesInBuffer _ 0 ; BROADCAST fileData.somethingHappened; } ELSE { IF fileData.firstReadStream # NIL AND fileData.firstReadStream.readAheadNode = node THEN { fileData.firstReadStream.readAheadNode _ NIL ; node.useCount _ 0 ; }; IF fileData.writeStreamData # NIL AND fileData.writeStreamData.readAheadNode = node THEN { fileData.writeStreamData.readAheadNode _ NIL ; node.useCount _ 0 ; }; node.status _ invalid; BROADCAST fileData.somethingHappened; }; }; markNodeNotWritten: ENTRY PROC [fileData: FileDataHandle, node: BufferNodeHandle] = INLINE { ENABLE UNWIND => NULL; node.status _ needsSequentialWrite ; node.bufferDirty _ TRUE ; fileData.writeCount _ fileData.writeCount - 1; BROADCAST fileData.somethingHappened; }; markNodeWritten: ENTRY PROC [fileData: FileDataHandle, node: BufferNodeHandle] = INLINE { ENABLE UNWIND => NULL; node.status _ valid ; fileData.writeCount _ fileData.writeCount - 1; BROADCAST fileData.somethingHappened; }; bumpWriteCount: ENTRY PROC [fileData: FileDataHandle] = INLINE { ENABLE UNWIND => NULL; fileData.writeCount _ fileData.writeCount + 1; }; WaitForOneBufferNotWriting: ENTRY PROC [fileData: FileDataHandle] = INLINE { ENABLE UNWIND => NULL; WHILE fileData.writeCount >= fileData.streamBufferParms.nBuffers DO WAIT fileData.somethingHappened; ENDLOOP; }; WaitForParallelWriteToComplete: ENTRY PROC [fileData: FileDataHandle, node: BufferNodeHandle] = INLINE { ENABLE UNWIND => NULL; WHILE node.status = needsParallelWrite OR node.status = parallelWriteActive DO WAIT fileData.somethingHappened; ENDLOOP; }; ProcessNode: PUBLIC PROC [ fileData: FileDataHandle, node: BufferNodeHandle ] = { IF node.status = needsParallelRead THEN { node.status _ parallelReadActive ; ReadAhead [node: node, fileData: fileData]; RETURN ; }; IF node.status = needsParallelWrite THEN { node.status _ parallelWriteActive ; parallelWriteBuffer [ node: node, fileData: fileData ] ; RETURN ; }; ERROR ; }; parallelWriteBuffer: PROC [node: BufferNodeHandle, fileData: FileDataHandle] = { ENABLE FS.Error => { markNodeNotWritten[fileData: fileData, node: node]; GOTO done; }; TRUSTED{WriteFilePages[f: fileData.fileHandle, node: node] }; markNodeWritten[fileData: fileData, node: node]; EXITS done => RETURN }; ReadAhead: PROC [node: BufferNodeHandle, fileData: FileDataHandle] = { ENABLE FS.Error => { FinishBadPreRead[fileData: fileData, node: node]; GOTO done; }; bytesToRead: INT ; fileByte: INT = node.firstFileByteInBuffer ; IF (bytesToRead _ MIN[fileData.fileLength - fileByte, node.bufferBytes]) > 0 THEN TRUSTED{ReadFilePages[f: fileData.fileHandle, from: fileByte, numPages: PagesForRoundUpBytes[bytesToRead], to: node.buffer, interval: node.bufferInterval]}; FinishRead[fileData: fileData, node: node, bufferSize: bytesToRead]; EXITS done => RETURN }; SetupBuffer: PUBLIC PROC [fileData: FileDataHandle, fsData: FSDataHandle, fileByte: INT] RETURNS [currentNode: BufferNodeHandle] = { node: BufferNodeHandle _ fsData.currentNode ; readAheadNode: BufferNodeHandle; currentNodeStatus: FileStreamPrivate.NodeStatus; success: BOOL _ FALSE ; IF node = NIL THEN node _ fileData.firstBufferNode ; IF node.bufferDirty AND fsData.isWriteStream THEN { FinishWrites[fileData: fileData, fsData: fsData, currentNode: node, parallelWritesOK: TRUE]; IF node.dataBytesInBuffer + node.firstFileByteInBuffer > fileData.byteSize THEN { fileData.byteSize _ node.dataBytesInBuffer + node.firstFileByteInBuffer ; SetFileSize[fileData.fileHandle, fileData.byteSize] ; }; IF fileData.validBytesOnDisk < node.dataBytesInBuffer + node.firstFileByteInBuffer THEN fileData.validBytesOnDisk _ node.dataBytesInBuffer + node.firstFileByteInBuffer ; WaitForParallelWriteToComplete[fileData: fileData, node: node]; node.status _ needsParallelWrite ; node.bufferDirty _ FALSE ; bumpWriteCount[ fileData: fileData]; FileStreamPrivate.StartRequest [ fileData: fileData, node: node ] ; WaitForOneBufferNotWriting[fileData: fileData]; }; WHILE success # TRUE DO [success, currentNode, readAheadNode] _ SetUpNodes[fileData: fileData, fsData: fsData, fileByte: fileByte]; IF success THEN { currentNodeStatus _ currentNode.status ; SELECT currentNodeStatus FROM invalid,needsParallelRead,parallelReadActive,needsSequentialRead,sequentialReadActive=>{ IF readAheadNode = NIL THEN { makeNodeValid[fileData: fileData, node: currentNode ]; } ELSE { myPriority: Process.Priority ; myPriority _ Process.GetPriority[]; Process.SetPriority[Process.priorityForeground]; FileStreamPrivate.StartRequest [ fileData: fileData, node: readAheadNode ]; makeNodeValid[fileData: fileData, node: currentNode ]; Process.SetPriority[myPriority]; }; }; valid,needsParallelWrite,parallelWriteActive,needsSequentialWrite,sequentialWriteActive=>{ IF readAheadNode # NIL THEN { FileStreamPrivate.StartRequest [ fileData: fileData, node: readAheadNode ]; }; }; ENDCASE ; fsData _ NIL ; } ELSE { IF NOT nodeNowAvailable[fileData: fileData] THEN FinishWrites[fileData: fileData, fsData: fsData, currentNode: NIL]; }; ENDLOOP; }; makeNodeValid: PROC [fileData: FileDataHandle, node: BufferNodeHandle] = { bytesToRead: INT ; WHILE node.status # valid DO IF doTheRead[fileData: fileData, node: node] THEN { bytesToRead _ MIN[fileData.fileLength - node.firstFileByteInBuffer, node.bufferBytes]; IF fileData.validBytesOnDisk <= node.firstFileByteInBuffer THEN { FinishRead[fileData: fileData, node: node, bufferSize: bytesToRead]; } ELSE { IF bytesToRead > 0 THEN TRUSTED{ ReadFilePages[f: fileData.fileHandle, from: node.firstFileByteInBuffer, numPages: PagesForRoundUpBytes[bytesToRead], to: node.buffer, interval: node.bufferInterval ! FS.Error => { FinishBadRead[ fileData: fileData, node: node]; }; ]; }; FinishRead[fileData: fileData, node: node, bufferSize: bytesToRead]; }; }; ENDLOOP; }; doTheRead: ENTRY PROC [fileData: FileDataHandle, node: BufferNodeHandle] RETURNS [BOOL] = INLINE { ENABLE UNWIND => NULL; IF node.status = invalid OR node.status = needsSequentialRead THEN { node.status _ sequentialReadActive ; RETURN [TRUE]; }; IF node.status = valid THEN RETURN [ FALSE ] ELSE WAIT fileData.somethingHappened ; RETURN [ FALSE ] ; }; SetUpNodes: ENTRY PROC [fileData: FileDataHandle, fsData: FSDataHandle, fileByte: INT] RETURNS [success: BOOL _ TRUE, currentNode: BufferNodeHandle _ NIL, nextNode: BufferNodeHandle _ NIL] = { ENABLE UNWIND => NULL; nowNode: BufferNodeHandle _ fileData.firstBufferNode; availableNode: BufferNodeHandle _ NIL ; availableNodeLRUCount: INT _ 1000000; maxLRUCount: INT _ 0 ; node: BufferNodeHandle ; oldCurrentNode: BufferNodeHandle _ fsData.currentNode ; oldFirstByteInBuffer: INT = IF fsData.currentNode = NIL THEN -1 ELSE fsData.currentNode.firstFileByteInBuffer; bufferBytes: INT = nowNode.bufferBytes ; IF (node _ fsData.currentNode) # NIL THEN node.useCount _ node.useCount-1 ; IF (node _ fsData.readAheadNode) # NIL THEN node.useCount _ node.useCount-1 ; fsData.currentNode _ NIL ; fsData.readAheadNode _ NIL; UNTIL nowNode = NIL DO firstByte: INT _ nowNode.firstFileByteInBuffer; IF nowNode.LRUCount > maxLRUCount THEN maxLRUCount _ nowNode.LRUCount ; SELECT TRUE FROM firstByte = fileByte => { currentNode _ nowNode ; fsData.currentNode _ nowNode ; nowNode.useCount _ nowNode.useCount+1 ; }; firstByte = fileByte+bufferBytes => { IF fileData.streamBufferParms.nBuffers > 1 THEN { nowNode.useCount _ nowNode.useCount+1 ; fsData.readAheadNode _ nowNode ; } ELSE { IF( nowNode.status = valid OR nowNode.status = invalid) AND nowNode.useCount = 0 AND nowNode.LRUCount <= availableNodeLRUCount THEN { availableNodeLRUCount _ nowNode.LRUCount ; availableNode _ nowNode ; }; }; }; ( nowNode.status = valid OR nowNode.status = invalid) AND nowNode.useCount = 0 AND nowNode.LRUCount <= availableNodeLRUCount => { availableNodeLRUCount _ nowNode.LRUCount ; availableNode _ nowNode ; }; ENDCASE; -- SELECT TRUE nowNode _ nowNode.nextBufferNode ; ENDLOOP; IF currentNode = NIL THEN { IF availableNode = NIL THEN RETURN[FALSE]; currentNode _ availableNode ; currentNode.LRUCount _ maxLRUCount + 1 ; currentNode.useCount _ currentNode.useCount+1 ; currentNode.status _ invalid ; currentNode.firstFileByteInBuffer _ fileByte ; availableNode _ NIL ; fsData.currentNode _ currentNode ; }; IF fileData.streamBufferParms.nBuffers > 1 AND (oldFirstByteInBuffer+bufferBytes = fileByte) AND (fsData.lastFirstByteInBuffer+bufferBytes = oldFirstByteInBuffer) AND (fileData.fileLength > fileByte+bufferBytes) AND (fileData.validBytesOnDisk > fsData.lastFirstByteInBuffer+bufferBytes) THEN { IF fsData.readAheadNode # NIL THEN { IF fsData.readAheadNode.status = invalid THEN { nextNode _ fsData.readAheadNode ; nextNode.status _ needsParallelRead ; }; } ELSE { IF availableNode = NIL THEN { availableNodeLRUCount _ 1000000; nowNode _ fileData.firstBufferNode; UNTIL nowNode = NIL DO IF ( nowNode.status = valid OR nowNode.status = invalid) AND nowNode.useCount = 0 AND nowNode.LRUCount <= availableNodeLRUCount THEN availableNode _ nowNode ; nowNode _ nowNode.nextBufferNode ; ENDLOOP; }; IF availableNode # NIL THEN { nextNode _ availableNode ; nextNode.status _ needsParallelRead ; nextNode.LRUCount _ maxLRUCount + 1 ; nextNode.useCount _ nextNode.useCount+1 ; nextNode.firstFileByteInBuffer _ fileByte + bufferBytes ; fsData.readAheadNode _ nextNode ; }; }; }; fsData.lastFirstByteInBuffer _ oldFirstByteInBuffer ; fsData _ NIL ; }; ForceOut: PROC [fsData: FSDataHandle] = { fileData: FileDataHandle = fsData.fileData; node: BufferNodeHandle _ fsData.currentNode ; IF fsData.isWriteStream THEN CleanupAfterPut[fileData: fileData, selfData: fsData]; IF node.dataBytesInBuffer + node.firstFileByteInBuffer > fileData.byteSize THEN { fileData.byteSize _ node.dataBytesInBuffer + node.firstFileByteInBuffer ; SetFileSize[fileData.fileHandle, fileData.byteSize] ; }; IF fileData.validBytesOnDisk < node.dataBytesInBuffer + node.firstFileByteInBuffer THEN fileData.validBytesOnDisk _ node.dataBytesInBuffer + node.firstFileByteInBuffer ; FinishWrites[ fileData: fileData, fsData: fsData, currentNode: NIL]; IF fsData.isWriteStream AND fileData.accessRights = $write AND fileData.fileLength # fileData.byteLength THEN { fileData.byteLength _ fileData.fileLength; fileData.fileHandle.SetByteCountAndCreatedTime[fileData.byteLength] }; fsData _ NIL ; }; SaveStreamError: PUBLIC PROCEDURE [self: STREAM, error: FS.ErrorDesc] = { WITH self.streamData SELECT FROM fsData: FSDataHandle => fsData.FSErrorDesc _ error ; ENDCASE => ERROR IO.Error[NotImplementedForThisStream, self]; }; ErrorFromStream: PUBLIC PROCEDURE [self: STREAM] RETURNS [FS.ErrorDesc] = { WITH self.streamData SELECT FROM fsData: FSDataHandle => RETURN [fsData.FSErrorDesc]; ENDCASE => ERROR IO.Error[NotImplementedForThisStream, self]; }; SetStreamClassData: PUBLIC PROCEDURE [self: STREAM, data: REF ANY] = { WITH self.streamData SELECT FROM fsData: FSDataHandle => fsData.StreamClassData _ data ; ENDCASE => ERROR IO.Error[NotImplementedForThisStream, self]; }; GetStreamClassData: PUBLIC PROCEDURE [self: STREAM] RETURNS [data: REF ANY] = { WITH self.streamData SELECT FROM fsData: FSDataHandle => RETURN [fsData.StreamClassData] ; ENDCASE => ERROR IO.Error[NotImplementedForThisStream, self]; }; SetFinalizationProc: PUBLIC PROCEDURE [self: STREAM, proc: FileStream.FinalizationProc] = { WITH self.streamData SELECT FROM fsData: FSDataHandle => fsData.FinalizationProc _ proc ; ENDCASE => ERROR IO.Error[NotImplementedForThisStream, self]; }; GetFinalizationProc: PUBLIC PROCEDURE [self: STREAM] RETURNS [proc: FileStream.FinalizationProc] = { WITH self.streamData SELECT FROM fsData: FSDataHandle => RETURN [fsData.FinalizationProc] ; ENDCASE => ERROR IO.Error[NotImplementedForThisStream, self]; }; bufferBad: PROCEDURE [status: FileStreamPrivate.NodeStatus] RETURNS [bad: BOOL] = INLINE { SELECT status FROM valid, needsParallelWrite, parallelWriteActive, needsSequentialWrite, sequentialWriteActive => RETURN[FALSE]; ENDCASE => RETURN[TRUE]; }; ReadFilePages: PROC [f: FS.OpenFile, from: ByteNumber, numPages: INT, to: LONG POINTER, interval: VM.Interval] = INLINE { p: PageNumber = from/bytesPerFilePage; TRUSTED{f.Read[from: p, nPages: numPages, to: to]}; VM.MakeUnchanged[interval]; }; WriteFilePages: PROC [f: FS.OpenFile, node: BufferNodeHandle] = { numPages: INT = PagesForRoundUpBytes[node.dataBytesInBuffer]; interval: VM.Interval = node.bufferInterval; firstPage: VM.PageNumber _ 0 ; lastPage: VM.PageNumber _ 0 ; firstPage _ interval.page; lastPage _ interval.page+numPages-1; IF node.firstFileByteInBuffer < 0 THEN RETURN; -- not a valid buffer to write IF firstPage # 0 THEN { ENABLE UNWIND => VM.MakeChanged[interval]; from: LONG POINTER ; TRUSTED {from _ VM.AddressForPageNumber[firstPage]} ; VM.MakeUnchanged[interval]; f.Write[from: from, nPages: lastPage-firstPage+1, to: (node.firstFileByteInBuffer + VM.BytesForPages[firstPage-interval.page])/bytesPerFilePage]; }; }; SetFileSize: PROC [f: FS.OpenFile, byteSize: ByteCount] = { f.SetPageCount[pages: (byteSize+bytesPerFilePage-1)/bytesPerFilePage]; }; GetFileLock: PROC [f: FS.OpenFile] RETURNS [FS.Lock] = { RETURN [f.GetInfo[].lock] }; ProcHandleFromAccessRights: PUBLIC PROC [accessRights: FS.Lock] RETURNS [ procs: FileStreamPrivate.ProcHandle] = { SELECT accessRights FROM read => RETURN [nucleusFileIOReadProcs]; write => RETURN [nucleusFileIOAllProcs]; ENDCASE => RETURN[NIL]; }; StreamFromOpenFile: PUBLIC PROC [openFile: FS.OpenFile, accessRights: FS.Lock, initialPosition: FS.InitialPosition, streamOptions: FS.StreamOptions, streamBufferParms: FS.StreamBufferParms, extendFileProc: FS.ExtendFileProc] RETURNS [stream: STREAM] = { pageAllocation: PageCount; byteLength: ByteCount; fileName: ROPE = openFile.GetName[].fullFName; fsData: FSDataHandle; fsDataFile: FileDataHandle ; node: BufferNodeHandle ; IF streamBufferParms.vmPagesPerBuffer = 128 THEN streamBufferParms.vmPagesPerBuffer _ 127; IF accessRights = $write AND GetFileLock[openFile] # $write THEN FSBackdoor.ProduceError[wrongLock, fileName]; [pages: pageAllocation, bytes: byteLength] _ openFile.GetInfo[]; fsData _ NEW[Data _ []]; fsDataFile _ NEW[FileData _ [ fileName: fileName, accessRights: accessRights, fileLength: byteLength, fileHandle: openFile, streamBufferParms: streamBufferParms, extendFileProc: extendFileProc, streamOptions: streamOptions, byteLength: byteLength, byteSize: pageAllocation*bytesPerFilePage, validBytesOnDisk: byteLength] ]; IF fsDataFile.byteLength > fsDataFile.byteSize THEN ERROR; fsData.fileData _ fsDataFile ; fsDataFile.firstBufferNode _ node _ CreateBufferSpace[streamBufferParms.vmPagesPerBuffer, accessRights]; FOR i: INT IN [2..streamBufferParms.nBuffers] DO node.nextBufferNode _ CreateBufferSpace[streamBufferParms.vmPagesPerBuffer, accessRights]; node _ node.nextBufferNode ; ENDLOOP; stream _ IO.CreateStream[FileStreamPrivate.ProcHandleFromAccessRights[accessRights], fsData]; IOUtils.StoreData[self: stream, key: $Name, data: fsDataFile.fileName]; IF accessRights = $write THEN { fsDataFile.writeStreamData _ fsData ; fsData.isWriteStream _ TRUE ; IF fsDataFile.byteSize = 0 THEN { fsDataFile.byteSize _ NewByteSize[fsDataFile.byteSize]; SetFileSize[fsDataFile.fileHandle, fsDataFile.byteSize]; }; } ELSE { fsDataFile.firstReadStream _ fsData; }; IF initialPosition = start THEN { [] _ FileStreamPrivate.SetupBuffer[fileData: fsDataFile, fsData: fsData, fileByte: 0] } ELSE { node _ FileStreamPrivate.SetupBuffer[fileData: fsDataFile, fsData: fsData, fileByte: PageContainingLastByte[fsDataFile.fileLength]]; fsData.index _ node.dataBytesInBuffer; }; IF streamOptions[tiogaRead] AND byteLength > 0 THEN { isTioga: BOOL; len: INT; [yes: isTioga, len: len] _ IsThisThingATiogaFile[stream]; IF isTioga THEN { IF accessRights = $read THEN { FileStream.SetLength[stream, len]; fsDataFile.tiogaReader _ TRUE } ELSE { stream.Close[]; FSBackdoor.ProduceError[cantUpdateTiogaFile, fileName]; } } }; IF FileStreamPrivate.DoFinalization THEN { FSLock.RecordREF[fsData]; SafeStorage.EnableFinalization[fsData]; }; fsData.ConvertFStoIOErrors _ TRUE; fsData _ NIL ; RETURN[stream]; };--StreamFromOpenFile PageContainingLastByte: PROC [byteLen: INT] RETURNS [INT] = INLINE { IF byteLen = 0 THEN RETURN[0] ELSE { byteLen _ byteLen - 1; LOOPHOLE[byteLen, LongNumber[pair]].lo _ BITAND[LOOPHOLE[byteLen, LongNumber[pair]].lo, clearLowBits]; RETURN[byteLen] }; }; OpenFileFromStream: PUBLIC PROC [self: STREAM] RETURNS [FS.OpenFile] = { WITH self.streamData SELECT FROM fsData: FSDataHandle => RETURN [fsData.fileData.fileHandle]; ENDCASE => ERROR IO.Error[NotImplementedForThisStream, self]; }; StreamFromOpenStream: PUBLIC PROC [self: STREAM] RETURNS [stream: STREAM] = { newData: FSDataHandle ; filePos: INT ; WITH self.streamData SELECT FROM selfData: FSDataHandle => { fileData: FileDataHandle = selfData.fileData; IF NOT selfData.isWriteStream OR fileData.firstReadStream # NIL THEN FSBackdoor.ProduceError [code: notImplemented, explanation: "self is not a write stream, or there already is a read stream"]; newData _ NEW[Data _ [ ] ]; newData.fileData _ fileData ; stream _ IO.CreateStream[FileStreamPrivate.ProcHandleFromAccessRights[$read], newData]; IOUtils.StoreData[self: stream, key: $Name, data: fileData.fileName]; fileData.firstReadStream _ newData; filePos _ SetUpClonedStream[fileData: fileData, fsData: selfData]; [] _ FileStreamPrivate.SetupBuffer[fileData: fileData, fsData: newData, fileByte: selfData.currentNode.firstFileByteInBuffer] ; newData.index _ selfData.index ; newData.ConvertFStoIOErrors _ TRUE; newData _ NIL ; }; ENDCASE => ERROR IO.Error[NotImplementedForThisStream, self]; }; SetUpClonedStream: ENTRY PROC [fileData: FileDataHandle, fsData: FSDataHandle] RETURNS [filePos: INT]= { ENABLE UNWIND => NULL; node: BufferNodeHandle _ fileData.firstBufferNode; UNTIL node.nextBufferNode = NIL DO node _ node.nextBufferNode; ENDLOOP ; FOR i:INT IN [1..fileData.streamBufferParms.nBuffers] DO node.nextBufferNode _ CreateBufferSpace[fileData.streamBufferParms.vmPagesPerBuffer, read]; node _ node.nextBufferNode; ENDLOOP; fileData.numberOfStreams _ fileData.numberOfStreams + 1 ; filePos _ fsData.index + fsData.currentNode.firstFileByteInBuffer ; fsData _ NIL ; }; CreateBufferSpace: PROC [vmPagesPerBuffer: INT [1 .. 128], accessRights: FS.Lock] RETURNS [BufferNodeHandle] = { vmPages: INT _ MIN[vmPagesPerBuffer, maxVMPagesPerBuffer] ; newBuffer: BufferNodeHandle _ NEW[BufferNode]; allocateCounter: INT _ 0; newBuffer.bufferInterval _ VM.Allocate[count: vmPages ! VM.CantAllocate => { Process.Pause[4]; IF (allocateCounter _ allocateCounter + 1) < 100 THEN RETRY; }; ]; TRUSTED { newBuffer.buffer _ VM.AddressForPageNumber[newBuffer.bufferInterval.page]; IF accessRights = write THEN VM.SwapIn[newBuffer.bufferInterval]; }; newBuffer.bufferBytes _ VM.BytesForPages[pages: vmPages]; RETURN[newBuffer] }; NewByteSize: PROC [byteCount: ByteCount] RETURNS [ByteCount] = { RETURN [byteCount+5120]; }; IsThisThingATiogaFile: PROC [h: STREAM] RETURNS [yes: BOOL, len: INT] = { pos, length: INT; { -- block so EXITS code can use pos, len, and length. controlHeaderId: ARRAY [0..fileIdSize) OF CHAR = [235C,312C]; controlTrailerId: ARRAY [0..fileIdSize) OF CHAR = [205C,227C]; commentHeaderId: ARRAY [0..fileIdSize) OF CHAR = [0C,0C]; fileIdSize: NAT = 2; numTrailerLengths: NAT = 3; -- endSize: NAT = fileIdSize+numTrailerLengths*4; -- trailer plus three lengths ReadLen: PROC [h: STREAM] RETURNS [INT] = { start: PACKED ARRAY [0..3] OF CHARACTER; start[0] _ h.GetChar[]; start[1] _ h.GetChar[]; start[2] _ h.GetChar[]; start[3] _ h.GetChar[]; RETURN [LOOPHOLE[start]] }; commentStart, commentLen, propsLen, controlLen, controlEnd: INT; pos _ h.GetIndex[]; -- save position to restore later length _ h.GetLength[]; -- length including any trailer stuff controlEnd _ length-endSize; -- where the trailer info starts IF controlEnd <= 0 THEN GOTO fail; -- too small h.SetIndex[controlEnd]; -- set up to read the trailer FOR i:NAT IN [0..fileIdSize) DO -- read the controlTrailerId IF h.GetChar[] # controlTrailerId[i] THEN GOTO fail; ENDLOOP; IF (propsLen _ ReadLen[h]) NOT IN [0..controlEnd) THEN GOTO fail; IF (commentStart _ ReadLen[h]) NOT IN [0..controlEnd) THEN GOTO fail; IF ReadLen[h] # length THEN GOTO fail; IF commentStart > 0 THEN { -- may have padded text with a null h.SetIndex[commentStart-1]; len _ IF h.GetChar[]=0C THEN commentStart-1 ELSE commentStart } ELSE h.SetIndex[len _ commentStart]; FOR i:NAT IN [0..fileIdSize) DO -- read the commentHeaderId IF h.GetChar[] # commentHeaderId[i] THEN GOTO fail; ENDLOOP; commentLen _ ReadLen[h]; -- the length of the comment section IF commentStart+commentLen NOT IN [0..controlEnd) THEN GOTO fail; h.SetIndex[commentStart+commentLen]; -- go to start of control info FOR i:NAT IN [0..fileIdSize) DO -- check the controlHeaderId IF h.GetChar[] # controlHeaderId[i] THEN GOTO fail; ENDLOOP; controlLen _ ReadLen[h]; -- the length of the control section IF commentStart+commentLen+controlLen # length THEN GOTO fail; GOTO succeed; EXITS fail => { h.SetIndex[pos]; RETURN [FALSE, length] }; succeed => { h.SetIndex[pos]; RETURN [TRUE, len] }; }; }; nucleusFileIOReadProcs: PUBLIC FileStreamPrivate.ProcHandle = IO.CreateStreamProcs[ variety: $input, class: $File, getChar: GetChar, endOf: EndOf, charsAvail: CharsAvail, getBlock: GetBlock, unsafeGetBlock: UnsafeGetBlock, putChar: NIL, -- not implemented putBlock: NIL, -- call PutChar unsafePutBlock: NIL, -- call PutChar flush: Flush, reset: Reset, close: Close, getIndex: GetIndex, setIndex: SetIndex, backup: Backup, getLength: GetLength ]; nucleusFileIOAllProcs: PUBLIC FileStreamPrivate.ProcHandle = IO.CreateStreamProcs[ variety: $inputOutput, class: $File, getChar: GetChar, endOf: EndOf, charsAvail: CharsAvail, getBlock: GetBlock, unsafeGetBlock: UnsafeGetBlock, putChar: PutChar, putBlock: PutBlock, unsafePutBlock: UnsafePutBlock, flush: Flush, reset: Reset, close: Close, getIndex: GetIndex, setIndex: SetIndex, backup: Backup, getLength: GetLength, setLength: SetLength, eraseChar: EraseChar ]; fQ: SafeStorage.FinalizationQueue; Finalize: PROC = BEGIN DO fsData: FSDataHandle _ NARROW [ SafeStorage.FQNext[fQ] ]; streamIsClosed: BOOL ; forceCleanUp: BOOL _ FALSE; FSLock.RemoveREF[fsData]; streamIsClosed _ fsData.streamIsClosed ; IF NOT fsData.streamIsClosed AND fsData.currentNode # NIL THEN { ForceOut[ fsData: fsData ! FS.Error => {forceCleanUp _ TRUE; CONTINUE}]; CloseFileDataForStream[fileData: fsData.fileData, fsData: fsData, forceCleanUp: forceCleanUp ! FS.Error => CONTINUE]; }; IF fsData.isWriteStream THEN { IF fsData.fileData.writeStreamData = fsData THEN fsData.fileData.writeStreamData _ NIL ; } ELSE { IF fsData.fileData.firstReadStream = fsData THEN fsData.fileData.firstReadStream _ NIL ; }; IF fsData.FinalizationProc # NIL THEN { fsData.FinalizationProc [ openFile: fsData.fileData.fileHandle, data: fsData.StreamClassData, closed: streamIsClosed]; }; fsData _ NIL; ENDLOOP; END; END. CHANGE LOG Created by MBrown on June 22, 1983 10:08 am Changed by MBrown on August 19, 1983 2:44 pm Changed by Birrell on August 23, 1983 3:14 pm Changed by MBrown on August 25, 1983 1:18 pm Changed by MBrown on September 17, 1983 8:41 pm Changed by Hagmann on November 22, 1983 4:29 pm Changed by Hagmann on November 28, 1983 12:00 pm Changed by Hagmann on December 6, 1983 4:52 pm Changed by Hagmann on December 27, 1983 4:52 pm Changed by Hagmann on December 29, 1983 12:54 pm Changed by Hagmann on January 3, 1984 11:34 am Changed by Hagmann on January 10, 1984 9:04 am Changed by Hagmann on January 20, 1984 5:11 pm Changed by Hagmann on February 6, 1984 2:19:10 pm PST Changed by Hagmann on May 9, 1984 9:12:21 am PDT Changed by December 18, 1984 5:40:06 pm PST Created by Hagmann on November 22, 1983 4:30 pm Changed by Hagmann on November 28, 1983 12:01 pm Changed by Hagmann on December 6, 1983 4:52 pm Changed by Hagmann on January 3, 1984 11:33 am 5¨FileStreamImpl.mesa Copyright Ó 1985, 1986 by Xerox Corporation. All rights reserved. MBrown on September 17, 1983 8:39 pm Rovner on August 15, 1983 1:02 pm Levin on September 22, 1983 3:22 pm Birrell on August 23, 1983 3:14 pm Schroeder on November 28, 1983 12:36 pm Russ Atkinson (RRA) February 4, 1985 3:09:10 pm PST Bob Hagmann February 13, 1989 10:12:32 am PST Doug Wyatt, December 12, 1986 5:19:17 pm PST Please maintain change log at end of file. This code does not protect itself from parallel use of a stream by concurrent processes. It assumes that the processes will synchronize at a higher level. Parallel use of different streams for the same open file is expected, but the read/write stream must be opened by StreamOpen or StreamFromOpenFile, and read stream by StreamFromOpenStream. Get and Put Restores dataBytesInBuffer and fileLength if they are messed up by a PutChar or PutBlock past the end of file. Same logic in SetLengthUnderMonitor. only call this routine with a write stream Paw through write stream info to find the file length. The new length is the true length modulo some uncertainity whether a put was done in parallel during the execution of this routine. The file length returned is as least as big as the file was when the monitor was acquired. This is fine because the notion of EOF or file length for a reader of a file that is in the process of being written is somewhat vague. A higher level protocol should keep this straight in the client (why are you reading bytes that might not be there yet?). Fix up dataBytesInBuffer if needed. (This is mostly intended to allow the read stream to look at the file size seen by the write stream.) Suspect that end-of-buffer or end-of-file has been reached. This may be false! However, the test is cheap and usually false. File length may be wrong if writer is using the same buffer as the reader, so get a good file length. This is not cheap: we have to get a monitor lock and maybe look inside the write stream. Note that we use the local variable fileLength and not selfData.fileData.fileLength. We are not at EOF. If we are at EOB, then get the next buffer. Not EOF and not EOB can occur if the writer has put some char's into the buffer and this was not reflected in dataBytesInBuffer until we did the EstablishFileLength call. Change use to IOUtils.AddNAT when the 0+0 gives NAT.LAST bug is fixed Bytes may be added concurrently with this get. EstablishFileLength gives a true file length (which may be different from what it was when we started this iteration) to see if there is more data to blt. The below IF is needed for the same reason we called EstablishFileLength above. Fail if startIndex<0 or stopIndexPlusOne<0. all designed to make the max number of bytes transferred an integral number of words, which is good scale block.startIndex into [0 .. bytesPerWord) Transfer at most maxBytesMoved bytes from the stream to block^. Assert block.startIndex IN [0 .. maxStopIndexPlusOne), < stopIndexPlusOne Assert countRemaining > 0 The following loop transfers from the stream to textBlock^ until textBlock^ is full or end of file is reached. Assert textBlock.stopIndexPlusOne = maxStopIndexPlusOne scale block.startIndex into [0 .. bytesPerWord) Transfer at most maxBytesMoved bytes from block^ to the stream. Assert block.startIndex IN [0 .. maxStopIndexPlusOne), < stopIndexPlusOne Assert countRemaining > 0 The following loop transfers textBlock^ to the stream. Assert textBlock.stopIndexPlusOne = maxStopIndexPlusOne On entry, index = dataBytesInBuffer = bufferBytes. Exit with same position in file, but index < dataBytesInBuffer or EOF. Handles implicit file extension. Called from GetChar, PutChar, GetBlock, PutBlock, UnsafeGetBlock, UnsafePutBlock. do cheap test to see if not at EOF Cheap test inconclusive. Find real file length. Make sure dataBytesInBuffer and fileLength are correct by calling CleanupAfterPut or EstablishFileLength ensure that page containing byte "index" is in the buffer I would like to do a CONTINUE, but that is a "GOTO" and thus will stop the UNWIND here. There is some bug that confuses the UNWIND machinery that trashes fsData from the local frame. We recompute it here. Procs that are called via the property list mechanism. Note: do not reduce the size of a shortened file until stream closed. If old index was past EOF, then move it to EOF. We leave the cloned read stream alone: if it does not do a setPosition then it will get an EOF on its next read. CleanupAfterPut logic is copied here, but we cannot call CleanupAfterPut because it is an ENTRY grow file if needed Look through nodes and adjust those past EOF or with EOF in buffer All of the buffer is past EOF. Set dataBytesInBuffer to 0 so that gets will find themselves at EOF, and clean the node to avoid redundant write EOF is in (or just past) this buffer all of node is in the file Processing for "Close" that must be done under the monitor. forceCleanUp being TRUE is when Close has caught UNWIND, and we should do all we can to blow away the data structures. make sure the read-ahead has completed We are now committed to closing the stream. Our last non-internal operation has completed (the SetFileSize). look for up to two buffers to free if another stream is around else, free all buffers Insure that all buffers, except the one corresponding to the currentNode, have been written to disk. Normal cases are too return immediately when no writes are outstanding, or to wait until one finishes. or when SetUpNodes can't get a node What has happened is that an asynchronous write has failed, or we are trying to flush all dirty pages in ForceOut. The parallel process has given up, and we are about to re-do the write under the monitor to get the signal and the stack correct so that the client sees a correct view of the error. a pre-read has failed. Mark it needsSequentialRead if a stream needs it to continue, or just ignore it if it was a real preread. By catching and ignoring FS errors, we insure that the write will later be done in the process of the client so that signals will look correct. on FS errors, invalidate the pre-read For write streams, didPut = FALSE if on entry (someone else called CleanupAfterPut). Arranges buffer so that fileByte (must be buffer-aligned) is the first byte in it. If buffer is dirty, writes it to file. Maintains invariants of dataBytesInBuffer, bufferBytes, and firstFileByteInBuffer in the face of all this. DOES NOT update index. Called from AdvanceBuffer, SetIndex, SetLength, StreamFromOpenStream and StreamFromOpenFile. write buffer if needed See if there are buffers that must be written sequentially. Extend file if we are about to write over it. Copy the status out of the node before the tests. Since the status can change at any time, it would be possible to have none of the arms of the SELECT executed when there was a pre-read to do. (This may not be necessary anymore since I changed the way the SELECT is done, but it doesn't hurt). wait around and try to get a node that is ok to re-use. Note that just because one becomes free does not mean that SetUpNodes will find it next time around the loop (another stream may have grabbed it). If the buffers get into an error state, then nodeNowAvailable returns FALSE and we use FinishWrites to clean up. Avoid read: the data is trash on disk. We are extending the file anyway. This procedure runs under the monitor. It looks for buffers for the current and next nodes. The node returned in currentNode is the one to use as current. If it is marked "active", then the caller must fill it before use. The node nextNode is returned as NIL if no preread is needed. If non-NIL, the caller should arrange to preread into this buffer. buffer already has correct position in file buffer is next after current buffer not "near" stream pointer (tested in above two cases) and it is not active, and it is not near the other stream (if it exits) preread if a sequence has been established. a node already points to the right place in the file Called from Flush for write streams, or Close for any stream This is the only proc that sets byte length, and only proc that finishes trans. This call does the writes under the monitor lock. This should be true for read streams since you want to stop the writer from dirting buffers. For write streams, you could get by without the lock provided you were extremely careful about a ForceOut on the read stream, a StreamFromOpenStream (it will allocate more buffers). The easy way out is to use the monitor. Talking to FS vmPage: VM.PageNumber ; FOR vmPage IN [interval.page..interval.page+numPages) DO IF VM.State[vmPage].dataState # unchanged THEN { lastPage _ vmPage ; IF firstPage = 0 THEN firstPage _ vmPage ; }; ENDLOOP; Stream creation no monitors are needed in this code since, until this proc returns, no other code can refer to the streams Index must always be less than 64K, so we have to clip off a page from the max. initialPosition = end make length look changed by sneaky call to SetLength (not in stream procs). since stream is opened for read only, this call won't change the length in the file. you can't incrementally update a Tioga file with IO! IF FileStreamPrivate.DoFinalization THEN { FSLock.RecordREF[newData]; SafeStorage.EnableFinalization[newData]; }; Find last node Allocate some more nodes. Buffer management We cannot accept anything but the right size interval. The program assumes that all buffers are the same size. Tioga Procedure records (never modified) Finalization code Initialization start code: start up finalization stuff IF FileStreamPrivate.DoFinalization THEN { fQ _ SafeStorage.NewFQ[]; SafeStorage.EstablishFinalization[CODE[Data], 2, fQ]; TRUSTED { Process.Detach[FORK Finalize[]] }; }; By editing FileIOAlpineImpl. Close FS file when closing stream (this should really be an option). In SetFileSize: byteSize/bytesPerFilePage -> (byteSize+bytesPerFilePage-1)/bytesPerFilePage. In SetIndex: fsData.byteSize < firstBufferByte+fsData.dataBytesInBuffer -> fsData.byteSize <= firstBufferByte+fsData.dataBytesInBuffer. Implemented GetFileLock (was stubbed waiting for FS). In StreamFromOpenFile, if stream open for write and file has no pages, extend it. Conversion to new IO interface. Implement multiple-page stream buffer. Implement coupled read and write streams on same open file. Changed data structures in FileStreamPrivate, and fixed references in this module to the new data structures. This meant changes to nearly every routine. Close file during StreamOpen if an error occurs in StreamFromOpenFile. Implement streamOptions and streamBufferParms features. Added finalization. Changed name from FileIOFSImpl. Split out create code to make FileStreamCreateImpl smaller since compiler blows up in pass 3 Fixed EndOf bug for multiple streams. Added test for DoFinalization to enable FileStream testing without making a boot file Added code for process cache Fixed bug (reported by Plass, found by Nix) in unsafegetblock and unsafeputblock for blocks > 32K. Fixed bug (reported and new code by Plass) in AddNAT Added conditional conversion for FS to IO Errors in convertFStoIOError. This makes FS errors that occur during stream open appear as FS errors, not IO errors (fixes bug reported by Nix). Fixed stream hung problem reported by Willie-Sue and Stewart. SetupBuffer was treating buffers that are in some stage of writing was not valid. It would wait for them to become valid (due to a parallel read), or do sequential reads if the parallel read failed. This caused buffers that were writing in parallel that had a write error to wait forever (the node never became valid or had a parallel read error). Added logic to use the changed/unchanged VM information for writing to disk Added check for read-only streams to ForceOut. This is to avoid a bug is the changed/unchanged VM information after a rollback (some pages looked changed for a read-only file). Added check to ensure that read aheads complete before the buffer is deallocated by VM during a close. This fixes a problem encountered by Frank Crow while running the TSetter, and narrowed down to FileStream by Willie-Sue. Added UNWIND in Close's call to ForceOut, and the forceCleanUp argument to CloseFileDataForStream. Reformatted file. Bob Hagmann January 31, 1985 5:41:22 pm PST remove use of FSLock changes to: Finalize Bob Hagmann May 14, 1985 11:09:42 am PDT put back in use of FSLock changes to: Finalize, Initialization Bob Hagmann September 18, 1985 07:06:15 PDT WriteFilePages and FinishWrites changes to avoid writing pages improperly after shortening the file Bob Hagmann October 25, 1985 2:02:44 pm PDT WriteFilePages to mark interval unchanged after write. Fixed subtile finalization bug Bob Hagmann June 9, 1986 5:10:02 pm PDT checked for node invalid on all get/put entries (previous error can get us into trouble) merged in FileStreamCreateImpl Change log for FileStreamCreateImpl: By cutting this out of FSFileIOImpl. Added test for DoFinalization to enable FileStream testing without making a boot file Removed code for process cache Added enable for ConvertFStoIOErrors. Russ Atkinson (RRA) May 9, 1985 1:56:42 pm PDT Added VM.SwapIn for write buffer pages to avoid bogus page faults changes to: StreamFromOpenFile, SetUpClonedStream, CreateBufferSpace Bob Hagmann May 14, 1985 11:10:58 am PDT put back in use of FSLock changes to: StreamFromOpenFile, StreamFromOpenStream Bob Hagmann November 13, 1986 1:02:46 pm PST modified to allow multiple parallel writes Doug Wyatt, December 12, 1986 5:18:31 pm PST changed Basics.LongNumber[num] to Basics.LongNumber[pair] changes to: RoundUpToPages, PageContainingLastByte Ê6˜codešœ™KšœB™BKšœ$™$Kšœ!™!Kšœ#™#Kšœ"™"Kšœ'™'J™3K™-K™,—˜Kšœ*™*—K˜šÏk ˜ Kšœœœ˜FKšœ œœ˜Kšœ œ˜/Kšœœ®˜ÅKšœœß˜çKšœ œ˜ Kšœœ˜%Kšœœfœ˜ƒKšœœ ˜-Kšœ œ˜Kšœœ ˜KšœœA˜NKšœœœ˜Kšœ œ˜!Kšœ œ1˜BKšœœ€˜ˆK˜—šÏnœœ˜Kšœœ˜8Kšœ'œ$œ.˜‰Kšœ œœ˜@K˜Kšœœœœ˜Kšœ œœ˜Kšœ œÏc˜8Kšœ œœ ˜!Kšœ œœ ˜ K˜Kšœœœ˜1Kšœœ œ˜=Kšœœœœ˜K˜—šœ˜Kšœ˜K˜——K˜K˜—š žœœœœœœ˜6šœœ ˜K˜!K˜—Kšœœ˜1K˜/Kšœœ˜Kšœ œ˜Kšœœœ˜8šœ*œ˜2Kšœ;™;Kšœ@™@˜@Kšœ;™;Kšœ@™@KšœB™BKšœ6™6Kšœ™—šœ8˜:šœœœ˜ Kšœ?™?Kšœ8™8KšœF™FKšœ*™*——Kšœ#œ"˜K—Kšœ"˜)K˜$Kšœ œ˜Kšœ˜ K˜K˜—š žœœœœœ˜3šœœ ˜K˜!K˜—Kšœœ˜1K˜/Kšœ#œ ˜IKšœœœ˜8Kšœ%˜,K˜$Kšœœ˜Kšœ œ˜K˜˜KšœE™E——š žœœœœœœ˜1Kš œœœœœœ˜0K˜K˜—šžœœœœ œœœ˜FKšœœ˜ Kšœœœ˜%šœœ ˜K˜!K˜—Kšœœ˜1K˜!Kšœœ˜Kšœœœ.˜IKšœ(œœ˜H˜ Kš œœœœœœ˜;K˜K˜$—˜Kšœœœ˜I—K˜šœ˜˜&K˜*K˜K˜:—Kšœœ˜šœ7˜=KšœK˜K—K˜3K˜+šœ:œœ˜FKšœI™IKšœC™CKšœ<™<—šœ5˜7˜;Kšœœ˜ ——˜?Kšœ4™4Kšœ™—Kšœ3œ˜WKšœ˜—Kšœœ(˜>Kšœ œ˜Kšœ˜K˜—šžœœœœ œœœœ˜OKšœœœ˜šœœ ˜K˜!K˜—Kšœœ˜1Kšœ+™+K˜!Kšœœ˜Kšœœ˜2Kšœ(œœ˜HKšœ$œ!˜K˜ Kš œœœœœœ˜;K˜K˜$—˜Kšœœœ˜I—šœ˜˜&K˜*K˜Kšœ5Ÿ˜S—Kšœœ;˜UK˜3Kšœœ˜#Kšœ:œœ˜FK˜?K˜Kšœ˜—Kšœ œ˜K˜K˜—Kšœœœœ˜9Kšœœ ˜2šœœ˜*KšœN™NKšœ™K˜—š žœœœœœ˜EKšœœ œ˜'šœœ ˜K˜!K˜—Kšœœ˜1K˜!Kšœœ˜Kšœ(œœ˜HKšœœœœ˜Ošœœ˜Kšœ œ˜Kšœ˜ K˜—šœ"œ˜*Kšœ/™/Kšœ œ#˜2K˜%K˜>K˜—K˜2K˜š˜Kšœ?™?KšœJ™JKšœœ˜˜ K˜K˜Kšœœ)˜>—K˜CKšœ™KšœS™SKšœ™š˜˜&K˜*K˜K˜:—Kšœœ˜šœ7˜=KšœK˜K—K˜3K˜+Kšœ:œœ˜Fšœ5˜7šœ<œ˜BKšœ œ˜Kšœ˜ K˜——K˜?Kšœ3œ˜WKšœ˜—šœ/œ˜7Kšœ œ˜Kšœ˜ K˜—Kšœ7™7K˜(K˜K˜4Kšœ˜—š˜Kšœ œ˜—K˜K˜—š žœœœœœ˜Jšœœ ˜K˜!K˜—Kšœœ˜1K˜!Kšœœ˜Kšœ(œœ˜HKšœœœœ˜Ošœ"œ˜*Kšœ/™/Kšœ œ#˜2K˜%K˜>K˜—K˜2š˜Kšœ?™?KšœJ™JKšœœ˜˜ K˜K˜Kšœœ)˜>—K˜CKšœ™Kšœ6™6š˜˜&K˜*K˜Kšœ5Ÿ˜S—Kšœœ;˜UK˜3Kšœœ˜#Kšœ:œœ˜FK˜?K˜Kšœ˜—Kšœ/œœ˜;Kšœ7™7K˜(K˜K˜4Kšœ˜ —Kšœ œ˜K˜K˜—šž œœœ˜NKšœN™NKšœ+™+Kšœ ™ KšœQ™QK˜,šœœ-˜EK˜—Kšœ œœ˜Kš œ!œœœœ˜FKšœœ7˜Sšœ*œ˜2Kšœ œ˜šœœ˜%K˜8—šœœ œœ˜Išœœ˜+K˜<——K˜6—K˜VK˜UKšœ œ˜K˜K˜K˜—š žœœœœœœ˜4Kšœœ˜1˜/Kšœ"™"—šœ*œ˜2Kšœ0™0Kšœ œ4˜Cšœ8˜:Kšœœœ˜—K˜Kšœ œ˜Kšœœ˜K˜K˜——šž œœœœœœœ˜DKšœœœ˜K˜—š žœœœœœ œ˜=Kšœœ˜1K˜EKšœ œ˜K˜K˜—š žœœœœ œ˜4šœœ ˜K˜!K˜—Kšœœ˜/K˜4Kšœœ%˜9K˜,Kšœ œ˜šœ œœœ˜1KšœA™AKšœ&™&—šœœ˜K˜6K˜#Kšœ7˜;—Kšœœœœ˜6Kšœ9™9šœœœ=˜Lšœ˜Kšœ!œ˜>K˜YK˜——K˜'Kšœ œ˜K˜K˜—šžœœœœ˜%K˜"K˜—šžœœœœ˜%šœœ ˜K˜!K˜—Kšœœ˜/Kšœœ˜8Kšœ œ˜K˜K˜—š žœœœœ œ˜2šœœ ˜K˜!K˜—Kšœœ˜/šœœœ˜5KšœÏsœ¡œœ™WKšœ$œK™uKšœœ˜/KšœPœ˜VKšœ œ˜Kšœœ˜K˜-K˜—Kšœ˜K˜BKšœ œ˜Kšœœ˜K˜-K˜K˜——šœ6™6K˜š ž œœœœœ œ˜?Kšœœ˜1Kšœœœœ˜CK˜œ˜Fšœ)œ˜1Kšœ™KšœD™DKšœ+™+K˜Kšœœ˜Kšœœ˜K˜šœ˜Kšœ$™$K˜Dšœœ˜Kšœœ˜Kšœœ˜K˜—K˜K˜——K˜šœ˜Kšœ™K˜1K˜——K˜—K˜"Kšœ˜—Kšœ œ˜K˜K˜—š ž œœœœœ˜5Kšœœ˜Kšœ œœœ˜6K˜Kšœœ*˜FKšœ˜K˜—š žœœœœœ˜2Kšœœ˜1Kšœœ˜ Kšœœœœ˜CK˜Kšœ œœœ˜6K˜Kšœœœœ˜AK˜Kšœ œ˜K˜K˜K˜—š žœœœ@œœ˜sKšœ;™;Kšœ1œ?™vK˜Kšœœ œ˜6šœ œœ˜2Kšœœ˜ Kšœ&˜*—Kšœœ˜"K˜K˜šœ!œœ˜-Kšœ&™&šœœ˜3šœœ#˜>Kšœ˜!Kšœ˜——K˜!K˜—Kšœœ˜K˜šœœœ ˜Pšœ-œ˜3K˜7K˜—K™m—Kšœœœ"˜KKšœœ˜˜šœ>™>Kšœ™——K˜!šœœœ˜'šœœ˜Kšœœ˜&šœ ˜"Kšœ/˜3Kšœ0˜4—K˜K˜šœ˜K˜K˜——K˜Kšœ˜—Kšœœ˜šœ?œ˜GKšœœ!˜1šœœ˜'šœ0œ˜8š œœ œœœ˜AKšŸ/˜/—K˜—K˜——Kš œ˜&Kšœ œ˜K˜K˜KšœI™IKšœ™KšœG™GKšœ ™ —šž œœœ1˜Išœ1œœ˜BKšœœœ˜K˜5Kšœœœ˜-šœœœŸ-˜KKšœ#™#šœ˜ Kšœ˜!Kšœ˜—K˜—šœ œ˜šœœ˜š œ'œ&œœ)œ)˜ÄKšœ˜ Kšœ˜—š œ(œœœœ˜eKšœ;™;Kšœ6™6Kšœ<™—K˜"Kšœ˜—Kšœ˜ Kšœ˜—K˜K˜K˜—šž œœœ˜2šœ$œœ˜3Kšœœœ˜K˜K˜%Kš œ˜%K˜K˜——šž œœœ6˜Ošœ˜Kšœœœ˜K˜K˜Kš œ˜%K˜˜KšœH™HKšœ<™<———šžœœœ5˜Qšœ˜Kšœœœ˜šœœœ.˜Všœœœ.œ˜WK˜"K˜Kš œ˜%K˜—šœœ˜šœœœ/œ˜ZKšœ)œ˜.K˜K˜—šœœœ/œ˜ZKšœ)œ˜.K˜K˜—K˜Kš œ˜%K˜——K˜K˜——š œœœ5˜Sšœ˜Kšœœœ˜K˜$Kšœœ˜K˜.Kš œ˜%K˜K˜——š œœœ5˜Pšœ˜Kšœœœ˜K˜K˜.Kš œ˜%K˜K˜——š œœœœ˜@Kšœœœ˜K˜.K˜K˜—šžœœœœ˜LKšœœœ˜šœ<˜CKšœ˜ Kšœ˜—K˜K˜—šžœœœ˜Ešœœ˜"Kšœœœ˜šœ"œ#˜NKšœ˜ Kšœ˜—K˜K˜——šž œœœ9˜Qšœ!œ˜)K˜"K˜+Kšœ˜K˜šœ"œ˜*K˜#K˜8Kšœ˜K˜—Kšœ˜—K˜K˜K˜—š œœ7˜Pšœœ ˜KšœA™AKšœ?™?Kšœ ™ K˜3Kšœ˜ K˜—Kšœ6˜=K˜0š˜Kšœ˜—˜K˜K˜——šž œœ7˜Fšœœ ˜Kšœ%™%K˜1Kšœ˜ K˜—Kšœ œ˜Kšœ œ˜,šœ˜Kšœ8˜?šœ6˜=K˜=K˜ ——K˜Dš˜Kšœ˜—˜K˜——šž œœœ˜3šœ œœ$˜PKšœT™TKšœR™RKšœ&™&Kšœ;™;KšœF™FKšœ/™/Kšœ,™,K˜K˜-K˜ K˜0Kšœ œœ˜K˜Kšœœœ"˜4K˜˜Kšœ™—šœœœ˜3Kšœ;™;šœVœ˜\Kšœ-™-—šœIœ˜QK˜IK˜5K˜—šœQ˜WK˜Q—K˜?K˜"Kšœœ˜K˜$K˜DK˜/K˜K˜—šœ œ˜˜'K˜C˜KšœC™CKšœ@™@Kšœ<™™>Kšœ%™%——šœ œ˜K˜(Kšœ˜—˜Xšœœœ˜K˜6K˜šœ˜˜K˜#K˜0K˜LK˜6K˜ K˜———K˜—˜Zšœœœ˜K˜LK˜—K˜—Kšœ˜ Kšœ œ˜—šœœ˜Kšœ7™7KšœE™EKšœL™LKšœK™KKšœ$™$šœœ&˜0Kšœ>œ˜C—K˜—šœ˜K˜K˜———š  œœ7˜JKšœ œ˜šœ˜šœ+œ˜3KšœœE˜Všœ9œ˜AKšœI™IK˜DK˜šœ˜šœœœ˜ K˜%˜!K˜,K˜K˜šœœ ˜K˜/K˜—K˜K˜—K˜DK˜—K˜———Kšœ˜—K˜K˜—š  œœœ3˜Hšœœœ˜Kšœœœ˜šœœ#œ˜DK˜$Kšœœ˜K˜—Kš œœœœœœ˜SKšœœ˜K˜K˜K˜——šž œœœ<œ˜Všœ œœ"œ˜Cšœœ˜%Kšœ&™&Kšœ4™4KšœH™HKšœ9™9KšœJ™JKšœ6™6Kšœœœ˜K˜5Kšœ"œ˜'Kšœœ ˜%Kšœ œ˜K˜K˜7š œœœœœ˜?Kšœ*˜.—Kšœ œ˜(K˜K˜Kšœœœ"˜KKšœ!œœ"˜MKšœœ˜Kšœœ˜K˜šœ œ˜Kšœ œ!˜/Kšœ œ!˜Gšœœ˜Kšœ+™+˜˜K˜˜'˜Kšœ™————˜%šœ)œ˜1K˜'K˜ K˜šœ˜šœœ˜7Kšœ˜šœ*œ˜0K˜*K˜K˜——K˜——˜Kšœ<™šœ*œ˜0K˜*K˜F——Kšœ œ˜K˜K˜—š žœœ œœ œ˜Išœœ˜ K˜4Kšœœœ*˜=—˜K˜——š žœœ œœœœ˜Kšœœ˜ Kšœœ˜4Kšœœœ*˜=—˜K˜——š žœœ œœœœ˜Fšœœ˜ K˜7Kšœœœ*˜=—˜K˜——šžœœ œœœœœ˜Ošœœ˜ Kšœœ˜9Kšœœœ*˜=—˜K˜——šžœœ œœ(˜[šœœ˜ K˜8Kšœœœ*˜=—˜K˜——š žœœ œœ˜<˜'šœœ˜ Kšœœ˜:Kšœœœ*˜=—K˜K˜——š   œ œ(œœœ˜Zšœ˜Kšœ_œœ˜mKšœœœ˜—K˜—˜˜K˜K˜———šœ ™ K˜šž œœœ'œœœ œ œ˜y˜&Kšœ,˜3Kšœ˜K˜K˜——šžœœœ'˜BKšœ œ0˜=Kšœ œ ˜,Kšœœ ™Kšœ œ˜Kšœ œ˜Kšœ˜Kšœ$˜$Kšœ œœŸ˜Mšœœ)™8šœœ%œ™0K™Kšœœ™*K™—Kšœ™ —šœœ˜Kšœœœ˜*Kšœœœ˜Kšœ œ#˜5Kšœ˜˜1˜!Kšœ;˜=——K˜—K˜K˜—šž œœœ#˜;K˜FK˜K˜—š ž œœœ œœ ˜8Kšœ˜K˜K˜K˜—šžœœœœ˜?šœ+˜2šœ˜Kšœœ˜(Kšœ œ˜(Kšœœœ˜—˜K˜————šœ™K˜šžœœœ œœœ!œ$œ$œœ œ˜þšœC™CKšœ&™&—K˜K˜Kšœ œ ˜.K˜K˜˜KšœO™O—šœ*˜0K˜)—šœœ˜@Kšœ-˜-—K˜@Kšœ œ ˜šœ œ ˜K˜K˜K˜K˜K˜%K˜K˜K˜K˜*K˜K˜—Kšœ-œœ˜:K˜˜#KšœD˜D—šœœœ!˜0˜KšœD˜D—K˜Kšœ˜—Kšœ œR˜]K˜Gšœ˜šœ˜K˜%Kšœœ˜šœœ˜!K˜7K˜8K˜—K˜——šœ˜K˜$K˜—šœ˜šœ˜K˜UK˜—šœ˜Kšœ™˜;K˜I—K˜(K˜——šœœœ˜5Kšœ œœ˜K˜9šœ œ˜šœ˜šœ˜KšœK™KKšœT™TK˜"Kšœ˜K˜—šœ˜Kšœ4™4K˜K˜7K˜——K˜—K˜—šœ"œ˜*Kšœ˜K˜'K˜—Kšœœ˜"Kšœ œ˜Kšœ ˜KšœŸ˜K˜—š žœœ œœœœ˜Dšœ œœœ˜$K˜šœ ˜(Kšœœ.˜=—Kšœ ˜Kšœ˜—šœ˜K˜——š žœœœœœœ˜Hšœœ˜ Kšœœ˜Kšœœœœ ˜9Kšœ œ˜KšœœŸ2˜NKšœ œ#Ÿ˜Lš žœœœœœ˜+Kš œœœœ œ˜(K˜/K˜/Kšœœ ˜Kšœ<œ˜@KšœŸ!˜5KšœŸ%˜=KšœŸ ˜=KšœœœŸ ˜/KšœŸ˜5š œœœœŸ˜Kšœ ˜ š˜Kšœœœ ˜5Kšœœœ ˜4—K˜——˜K˜———šœ"™"K˜šœœ œ˜SK˜K˜K˜K˜ K˜K˜K˜K˜Kšœ œŸ˜ Kšœ œŸ˜KšœœŸ˜$K˜ K˜K˜ K˜ K˜K˜K˜K˜K˜K˜K˜—šœœ œ˜RK˜$K˜K˜ K˜K˜K˜K˜K˜K˜K˜K˜ K˜K˜ K˜ K˜K˜K˜K˜K˜K˜K˜K˜K˜K˜K˜——šž œ™˜"K˜—šžœœ˜š˜Kšœœ˜9Kšœœ˜Kšœœœ˜Kšœ˜K˜(š œœœœœ˜A˜Kšœœœœ˜/—˜\Kšœœ œ˜K˜——šœœ˜šœ*˜0Kšœ"œ˜'—K˜šœ˜šœ*˜0Kšœ"œ˜'K˜———šœœœ˜'˜?K˜K˜—K˜—Kšœ œ˜ Kšœ˜—Kšœ˜K˜——™šœ'™'K˜šœ"œ™*K™Kšœ"œ™5Kšœœ™,K™K™——Kšœ˜—K˜K˜šœ˜ K˜K˜+Kšœ™K˜K˜,KšœD™DK˜K˜-Kšœ\™\K˜K˜,Kšœ‘™‘K˜K˜/Kšœ™K˜K˜/Kšœ&™&Kšœ;™;KšœQ™QKšœH™HKšœF™FKšœ7™7Kšœ™Kšœ™Kšœ\™\K˜K˜0Kšœ%™%KšœU™UK˜K˜.Kšœ™K˜K˜/KšœP™PKšœ™K˜K˜0Kšœ4™4K˜˜.KšœG™GKšœU™UKšœ™K˜—˜.KšœW™WKšœZ™ZKšœ`™`Kšœ\™\Kšœ-™-K˜—˜.KšœK™KK˜—šœ2˜5KšœF™FKšœK™KKšœ™K˜—šœ-˜0KšœV™VKšœZ™ZKšœ.™.—K˜šœ+˜+Kšœu™u™+K™Kšœ Ïr™——K™—™(K™Kšœ ¢œ™$—šœ+™+Kšœe™e—™+KšœV™V—™'Kšœw™wKš¢œ¢™$™K˜/Kšœ$™$K˜K˜0KšœU™UK˜K˜.Kšœ™K˜K˜.Kšœ%™%K˜K˜—™.K™AKšœ ¢8™D—K™™(K™Kšœ4™4—K™—™,K™*—K™™,K™9Kšœ ¢&™2—K™—…—µX!