-- FileStreamImpl.mesa
-- Please maintain change log at end of file.
-- Last Edited by
-- MBrown on September 17, 1983 8:39 pm
-- Rovner on August 15, 1983 1:02 pm
-- Levin on September 22, 1983 3:22 pm
-- Birrell on August 23, 1983 3:14 pm
-- Schroeder on November 28, 1983 12:36 pm
-- Hagmann on December 6, 1983 4:51 pm
DIRECTORY
BasicTime USING [GMT],
Basics USING [bytesPerWord, LongNumber, BITAND, LowHalf],
FS,
FileStream,
FileStreamPrivate USING[ Data, DoFinalization, FSDataHandle, BufferNodeHandle, BufferNode,
FileDataHandle, FileData, NodeStatus, ProcHandle, StartRequest ],
FSLock USING [RemoveREF ],
PrincOps USING [ByteBltBlock],
PrincOpsUtils USING [ByteBlt],
Process USING [Detach, GetPriority, Priority, priorityForeground, SetPriority],
IO,
IOUtils,
Rope,
RuntimeError USING [BoundsFault],
SafeStorage USING [EstablishFinalization, FinalizationQueue,
FQNext, NewFQ],
VM USING [ Free, PageNumber]
;
FileStreamImpl: CEDAR MONITOR
LOCKS fileData.lockRecord USING fileData: FileDataHandle
IMPORTS
Basics,
FileStreamPrivate ,
FS,
FSLock,
I: PrincOpsUtils,
IO,
IOUtils,
Process,
RuntimeError,
SafeStorage,
VM
EXPORTS
FileStreamPrivate,
FileStream =
BEGIN OPEN Basics;
ROPE: TYPE = Rope.ROPE;
STREAM: TYPE = IO.STREAM;
ByteCount: TYPE = INT;
ByteNumber: TYPE = ByteCount; -- index rather than count
PageNumber: TYPE = VM.PageNumber;
bytesPerFilePage: CARDINAL = FS.BytesForPages[1];
minFileExtend: INT = 10*bytesPerFilePage;
Data: TYPE = FileStreamPrivate.Data;
FSDataHandle: TYPE = FileStreamPrivate.FSDataHandle;
BufferNode: TYPE = FileStreamPrivate.BufferNode;
BufferNodeHandle: TYPE = FileStreamPrivate.BufferNodeHandle;
FileDataHandle: TYPE = FileStreamPrivate.FileDataHandle;
FileData: TYPE = FileStreamPrivate.FileData;
ProcHandle: TYPE = FileStreamPrivate.ProcHandle;
DebugQueueItem: TYPE = RECORD [
current: BufferNodeHandle,
currentStatus: FileStreamPrivate.NodeStatus,
currentFirstFileByteInBuffer: INT,
next: BufferNodeHandle,
nextStatus: FileStreamPrivate.NodeStatus,
nextFirstFileByteInBuffer: INT
];
DebugQueue: ARRAY [0..20) OF DebugQueueItem ;
DebugQueuePut: INT ← 0 ;
-- This code does not protect itself from parallel use of a stream by concurrent
-- processes. It assumes that the processes will synchronize at a higher level.
-- Parallel use of different streams for the same open file is expected, but the
-- read/write stream must be opened by StreamOpen or StreamFromOpenFile,
-- and read stream by StreamFromOpenStream.
-- Get and Put
CleanupAfterPut: ENTRY PROC [fileData: FileDataHandle, selfData: FSDataHandle] = INLINE {
-- Restores dataBytesInBuffer and fileLength if they are messed up by a PutChar or
-- PutBlock past the end of file.
-- Same logic in SetLengthUnderMonitor.
-- only call this routine with a write stream
currentNode: BufferNodeHandle = selfData.currentNode;
IF currentNode.didPut THEN {
currentNode.bufferDirty ← TRUE;
IF selfData.index > currentNode.dataBytesInBuffer THEN {
currentNode.dataBytesInBuffer ← selfData.index;
fileData.fileLength ← currentNode.firstFileByteInBuffer + selfData.index };
currentNode.didPut ← FALSE }};
EstablishFileLength: ENTRY PROC[fileData: FileDataHandle ]
RETURNS [fileLength: INT] = INLINE {
-- Paw through write stream info to find the file length. The new length is
-- the true length modulo some uncertainity whether a put was done in parallel
-- during the execution of this routine. The file length returned is
-- as least as big as the file was when the monitor was acquired. This
-- is fine because the notion of EOF or file length for a reader of a file that is
-- in the process of being written is somewhat vague. A higher level protocol
-- should keep this straight in the client (why are you reading bytes that
-- might not be there yet?). Fix up dataBytesInBuffer if needed.
-- (This is mostly intended to allow the read stream to look at the file size seen
-- by the write stream.)
ENABLE UNWIND => NULL;
writeData: FSDataHandle ← fileData.writeStreamData;
IF writeData = NIL OR writeData.streamIsClosed THEN {
writeData ← NIL ;
RETURN[fileData.fileLength] ;
}
ELSE {
writeNode: BufferNodeHandle = writeData.currentNode ;
writeNode.dataBytesInBuffer ← MAX[ writeData.index, writeNode.dataBytesInBuffer] ;
fileLength ← MAX[
fileData.fileLength,
writeNode.firstFileByteInBuffer + writeNode.dataBytesInBuffer ] ;
fileData.fileLength ← fileLength;
};
writeData ← NIL ;
};
convertFStoIOError: PROC [self: STREAM, error: FS.ErrorDesc] = INLINE {
selfData: FSDataHandle ← NARROW[self.streamData];
selfData.FSErrorDesc ← error ;
IO.Error[$Failure, self];
};
GetChar: PUBLIC PROC [self: STREAM] RETURNS [CHAR] = {
ENABLE FS.Error => {
convertFStoIOError [self, error];
};
selfData: FSDataHandle ← NARROW[self.streamData];
node: BufferNodeHandle ← selfData.currentNode ;
c: CHAR;
fileLength: INT ;
IF selfData.index >= node.dataBytesInBuffer THEN {
-- Suspect that end-of-buffer or end-of-file has been reached.
-- This may be false! However, the test is cheap and usually false.
fileLength ← EstablishFileLength[fileData: selfData.fileData ] ;
-- File length may be wrong if writer is using the same buffer
-- as the reader, so get a good file length. This is not cheap: we
-- have to get a monitor lock and maybe look inside the write stream.
-- Note that we use the local variable fileLength and not
-- selfData.fileData.fileLength.
IF fileLength <= selfData.index+node.firstFileByteInBuffer
THEN ERROR IO.EndOfStream[self];
-- We are not at EOF. If we are at EOB, then get the next buffer.
-- Not EOF and not EOB can occur if the writer has put some
-- char's into the buffer and this was not reflected in dataBytesInBuffer
-- until we did the EstablishFileLength call.
IF selfData.index = node.bufferBytes THEN node ← AdvanceBuffer[selfData] };
TRUSTED{c ← node.buffer[selfData.index]};
selfData.index ← selfData.index + 1;
selfData ← NIL ;
RETURN[c] ;
};
PutChar: PUBLIC PROC [self: STREAM, char: CHAR] = {
ENABLE FS.Error => {
convertFStoIOError [self, error];
};
selfData: FSDataHandle ← NARROW[self.streamData];
node: BufferNodeHandle ← selfData.currentNode ;
IF selfData.index = node.bufferBytes THEN node ← AdvanceBuffer[selfData];
TRUSTED{node.buffer[selfData.index] ← char};
selfData.index ← selfData.index + 1;
node.didPut ← TRUE ;
selfData ← NIL ;
};
-- Change use to IOUtils.AddNAT when the 0+0 gives NAT.LAST bug is fixed
AddNAT: PROC [a, b: NAT] RETURNS [NAT] = INLINE {
sum: CARDINAL = LOOPHOLE [a, CARDINAL] + LOOPHOLE [b, CARDINAL];
RETURN [IF sum > 0 THEN LOOPHOLE [sum, NAT]
ELSE IF sum = 0 THEN 0 ELSE NAT.LAST];
};
GetBlock: PUBLIC PROC [self: STREAM, block: REF TEXT, startIndex: NAT,
count: NAT]
RETURNS [nBytesRead: NAT] = TRUSTED {
ENABLE FS.Error => {
convertFStoIOError [self, error];
};
selfData: FSDataHandle ← NARROW[self.streamData];
textBlock: PrincOps.ByteBltBlock;
countRemaining: NAT;
stopIndexPlusOne: NAT = MIN [block.maxLength, AddNAT[startIndex, count]];
textBlock ← [
blockPointer: LOOPHOLE[block, LONG POINTER] + TEXT[0].SIZE,
startIndex: startIndex,
stopIndexPlusOne: stopIndexPlusOne];
countRemaining ←
IF startIndex > stopIndexPlusOne THEN 0 ELSE stopIndexPlusOne-startIndex;
nBytesRead ← 0;
WHILE countRemaining # 0 DO
bufferBlock: PrincOps.ByteBltBlock ← [
blockPointer: selfData.currentNode.buffer,
startIndex: selfData.index,
stopIndexPlusOne: selfData.currentNode.dataBytesInBuffer];
countTransferred: CARDINAL ← 0;
IF bufferBlock.startIndex < bufferBlock.stopIndexPlusOne THEN
countTransferred ← I.ByteBlt[from: bufferBlock, to: textBlock];
selfData.index ← selfData.index + countTransferred;
nBytesRead ← nBytesRead + countTransferred;
IF (countRemaining ← countRemaining - countTransferred) = 0 THEN EXIT;
-- Bytes may be added concurrently with this get. EstablishFileLength gives
-- a true file length (which may be different from what it was when we
-- started this iteration) to see if there is more data to blt.
IF EstablishFileLength[fileData: selfData.fileData ] <=
selfData.index + selfData.currentNode.firstFileByteInBuffer
THEN EXIT;
textBlock.startIndex ← textBlock.startIndex + countTransferred;
-- The below IF is needed for the same reason we called
-- EstablishFileLength above.
IF selfData.index = selfData.currentNode.bufferBytes THEN [] ← AdvanceBuffer[selfData];
ENDLOOP;
IF nBytesRead # 0 THEN block.length ← startIndex + nBytesRead;
selfData ← NIL ;
RETURN[nBytesRead] };
PutBlock: PUBLIC PROC [self: STREAM, block: REF READONLY TEXT, startIndex: NAT,
count: NAT] = TRUSTED {
ENABLE FS.Error => {
convertFStoIOError [self, error];
};
selfData: FSDataHandle ← NARROW[self.streamData];
-- Fail if startIndex<0 or stopIndexPlusOne<0.
textBlock: PrincOps.ByteBltBlock;
countRemaining: NAT;
stopIndexPlusOne: NAT ← AddNAT[startIndex, count];
IF stopIndexPlusOne > block.maxLength THEN stopIndexPlusOne ← block.length;
textBlock ← [
blockPointer: LOOPHOLE[block, LONG POINTER] + TEXT[0].SIZE,
startIndex: startIndex,
stopIndexPlusOne: stopIndexPlusOne];
countRemaining ←
IF startIndex > stopIndexPlusOne THEN 0 ELSE stopIndexPlusOne-startIndex;
WHILE countRemaining # 0 DO
bufferBlock: PrincOps.ByteBltBlock ← [
blockPointer: selfData.currentNode.buffer,
startIndex: selfData.index,
stopIndexPlusOne: selfData.currentNode.bufferBytes]; -- allow put past current eof.
countTransferred: CARDINAL ← I.ByteBlt[from: textBlock, to: bufferBlock];
selfData.index ← selfData.index + countTransferred;
selfData.currentNode.didPut ← TRUE;
IF (countRemaining ← countRemaining - countTransferred) = 0 THEN EXIT;
textBlock.startIndex ← textBlock.startIndex + countTransferred;
[] ← AdvanceBuffer[selfData];
ENDLOOP;
selfData ← NIL ;
};
maxWordsMoved: INT = (LAST[CARDINAL] / bytesPerWord) - 1;
maxBytesMoved: INT = maxWordsMoved * bytesPerWord;
maxStopIndexPlusOne: INT = maxBytesMoved + 1;
-- all designed to make the max number of bytes transferred an integral number of
--words, which is good
UnsafeGetBlock: PUBLIC UNSAFE PROC [self: STREAM, block: IO.UnsafeBlock]
RETURNS [nBytesRead: INT] = UNCHECKED {
ENABLE FS.Error => {
convertFStoIOError [self, error];
};
selfData: FSDataHandle ← NARROW[self.streamData];
textBlock: PrincOps.ByteBltBlock;
stopIndexPlusOne: INT;
IF block.startIndex < 0 OR block.count < 0 THEN ERROR RuntimeError.BoundsFault;
IF block.count = 0 THEN {
selfData ← NIL ;
RETURN [0];
};
IF block.startIndex > maxBytesMoved THEN {
-- scale block.startIndex into [0 .. bytesPerWord)
wordOffset: INT = block.startIndex / bytesPerWord;
block.base ← block.base + wordOffset;
block.startIndex ← block.startIndex - wordOffset*bytesPerWord;
};
stopIndexPlusOne ← block.startIndex + block.count;
nBytesRead ← 0;
DO
-- Transfer at most maxBytesMoved bytes from the stream to block^.
-- Assert block.startIndex IN [0 .. maxStopIndexPlusOne), < stopIndexPlusOne
countRemaining: CARDINAL;
textBlock ← [
blockPointer: block.base,
startIndex: block.startIndex,
stopIndexPlusOne: MIN[maxStopIndexPlusOne, stopIndexPlusOne]];
countRemaining ← textBlock.stopIndexPlusOne - textBlock.startIndex;
-- Assert countRemaining > 0
-- The following loop transfers from the stream to textBlock^ until textBlock^ is full
--or end of file is reached.
DO
bufferBlock: PrincOps.ByteBltBlock ← [
blockPointer: selfData.currentNode.buffer,
startIndex: selfData.index,
stopIndexPlusOne: selfData.currentNode.dataBytesInBuffer];
countTransferred: CARDINAL ← 0;
IF bufferBlock.startIndex < bufferBlock.stopIndexPlusOne THEN
countTransferred ← I.ByteBlt[from: bufferBlock, to: textBlock];
selfData.index ← selfData.index + countTransferred;
nBytesRead ← nBytesRead + countTransferred;
IF (countRemaining ← countRemaining - countTransferred) = 0 THEN EXIT;
IF EstablishFileLength[fileData: selfData.fileData ] <=
selfData.index + selfData.currentNode.firstFileByteInBuffer THEN {
selfData ← NIL ;
GOTO return;
};
textBlock.startIndex ← textBlock.startIndex + countTransferred;
IF selfData.index = selfData.currentNode.bufferBytes THEN [] ← AdvanceBuffer[selfData];
ENDLOOP;
IF textBlock.stopIndexPlusOne = stopIndexPlusOne THEN {
selfData ← NIL ;
GOTO return;
};
-- Assert textBlock.stopIndexPlusOne = maxStopIndexPlusOne
block.base ← block.base + maxWordsMoved;
block.startIndex ← 0;
stopIndexPlusOne ← stopIndexPlusOne - maxBytesMoved;
ENDLOOP;
EXITS
return => RETURN [nBytesRead]
};
UnsafePutBlock: PUBLIC PROC [self: STREAM, block: IO.UnsafeBlock] = TRUSTED {
ENABLE FS.Error => {
convertFStoIOError [self, error];
};
selfData: FSDataHandle ← NARROW[self.streamData];
textBlock: PrincOps.ByteBltBlock;
stopIndexPlusOne: INT;
IF block.startIndex < 0 OR block.count < 0 THEN ERROR RuntimeError.BoundsFault;
IF block.startIndex > maxBytesMoved THEN {
-- scale block.startIndex into [0 .. bytesPerWord)
wordOffset: INT = block.startIndex / bytesPerWord;
block.base ← block.base + wordOffset;
block.startIndex ← block.startIndex - wordOffset*bytesPerWord;
};
stopIndexPlusOne ← block.startIndex + block.count;
DO
-- Transfer at most maxBytesMoved bytes from block^ to the stream.
-- Assert block.startIndex IN [0 .. maxStopIndexPlusOne), < stopIndexPlusOne
countRemaining: CARDINAL;
textBlock ← [
blockPointer: block.base,
startIndex: block.startIndex,
stopIndexPlusOne: MIN[maxStopIndexPlusOne, stopIndexPlusOne]];
countRemaining ← textBlock.stopIndexPlusOne - textBlock.startIndex;
-- Assert countRemaining > 0
-- The following loop transfers textBlock^ to the stream.
DO
bufferBlock: PrincOps.ByteBltBlock ← [
blockPointer: selfData.currentNode.buffer,
startIndex: selfData.index,
stopIndexPlusOne: selfData.currentNode.bufferBytes]; -- allow put past current eof.
countTransferred: CARDINAL ← I.ByteBlt[from: textBlock, to: bufferBlock];
selfData.index ← selfData.index + countTransferred;
selfData.currentNode.didPut ← TRUE;
IF (countRemaining ← countRemaining - countTransferred) = 0 THEN EXIT;
textBlock.startIndex ← textBlock.startIndex + countTransferred;
[] vanceBuffer[selfData];
ENDLOOP;
IF textBlock.stopIndexPlusOne = stopIndexPlusOne THEN EXIT;
-- Assert textBlock.stopIndexPlusOne = maxStopIndexPlusOne
block.base ← block.base + maxWordsMoved;
block.startIndex ← 0;
stopIndexPlusOne ← stopIndexPlusOne - maxBytesMoved;
ENDLOOP ;
selfData ← NIL ;
};
AdvanceBuffer: PROC [fsData: FSDataHandle] RETURNS [node: BufferNodeHandle]= {
-- On entry, index = dataBytesInBuffer = bufferBytes. Exit with same position in
-- file, but index < dataBytesInBuffer or EOF.
-- Handles implicit file extension.
-- Called from GetChar, PutChar, GetBlock, PutBlock, UnsafeGetBlock, UnsafePutBlock.
fileData: FileDataHandle = fsData.fileData;
firstByteOfNextPage: INT = fsData.currentNode.firstFileByteInBuffer +
fsData.currentNode.bufferBytes;
changeSize: BOOL ← FALSE;
IF firstByteOfNextPage = maxLength THEN ERROR IO.Error[$Failure, NIL];
IF fsData.isWriteStream THEN CleanupAfterPut[fileData: fileData, selfData: fsData];
IF firstByteOfNextPage >= fileData.byteSize THEN {
newSize: INT ← 0;
IF fileData.extendFileProc # NIL THEN
newSize ← fileData.extendFileProc[firstByteOfNextPage] ;
fileData.byteSize ← IF newSize # 0 THEN MAX[newSize, firstByteOfNextPage]
ELSE fileData.byteSize + MAX[minFileExtend,
((fileData.byteSize/10)/bytesPerFilePage)*bytesPerFilePage];
SetFileSize[fileData.fileHandle, fileData.byteSize] };
node ← SetupBuffer[fileData: fileData, fsData: fsData, fileByte: firstByteOfNextPage];
fsData.index ← LowHalf[firstByteOfNextPage-fsData.currentNode.firstFileByteInBuffer];
fsData ← NIL ;
};
EndOf: PUBLIC PROC [self: STREAM] RETURNS[BOOL] = {
selfData: FSDataHandle ← NARROW[self.streamData];
node: BufferNodeHandle ← selfData.currentNode ;
-- do cheap test to see if not at EOF
IF selfData.index >= node.dataBytesInBuffer THEN {
-- Cheap test inconclusive. Find real file length.
fileLength: INT = EstablishFileLength[fileData: selfData.fileData];
IF fileLength <= selfData.index+node.firstFileByteInBuffer
THEN RETURN[TRUE];
};
selfData ← NIL ;
RETURN[FALSE];
};
CharsAvail: PUBLIC PROC [self: STREAM, wait: BOOL] RETURNS [INT] = {
RETURN[INT.LAST] };
GetIndex: PUBLIC PROC [self: STREAM] RETURNS [index: INT] = {
selfData: FSDataHandle ← NARROW[self.streamData];
index ← selfData.currentNode.firstFileByteInBuffer + selfData.index ;
selfData ← NIL ;
};
SetIndex: PUBLIC PROC [self: STREAM, index: INT] = {
ENABLE FS.Error => {
convertFStoIOError [self, error];
};
fsData: FSDataHandle ← NARROW[self.streamData];
currentNode: BufferNodeHandle ← fsData.currentNode ;
firstBufferByte: INT ← currentNode.firstFileByteInBuffer;
fileData: FileDataHandle = fsData.fileData ;
fileLength: INT ;
IF index < 0 THEN ERROR IO.Error[BadIndex, self];
-- Make sure dataBytesInBuffer and fileLength are correct by calling
-- CleanupAfterPut or EstablishFileLength
IF fsData.isWriteStream THEN {
CleanupAfterPut[fileData: fileData, selfData: fsData];
fileLength ← fileData.fileLength; }
ELSE fileLength ← EstablishFileLength[fileData: fileData ];
IF index > fileLength THEN ERROR IO.EndOfStream[self];
-- ensure that page containing byte "index" is in the buffer
IF index NOT IN [firstBufferByte .. firstBufferByte+currentNode.bufferBytes)
THEN {
firstBufferByte ← index - (index MOD currentNode.bufferBytes);
currentNode ← SetupBuffer[fileData: fileData, fsData: fsData, fileByte: firstBufferByte];
};
fsData.index ← index - firstBufferByte;
fsData ← NIL ;
};
Reset: PUBLIC PROC [self: STREAM] = {
SetIndex[self, GetLength[self]] };
Flush: PUBLIC PROC [self: STREAM] = {
ENABLE FS.Error => {
convertFStoIOError [self, error];
};
fsData: FSDataHandle ← NARROW[self.streamData];
IF fsData.isWriteStream THEN ForceOut[ fsData: fsData ];
fsData ← NIL ;
};
Close: PUBLIC PROC [self: STREAM, abort: BOOL] = {
ENABLE FS.Error => {
convertFStoIOError [self, error];
};
fsData: FSDataHandle ← NARROW[self.streamData];
ForceOut[ fsData: fsData ];
CloseFileDataForStream[fileData: fsData.fileData, fsData: fsData];
fsData ← NIL ;
self.streamData ← NIL ;
self.streamProcs ← IOUtils.closedStreamProcs };
-- Procs that are called via the property list mechanism.
GetLength: PUBLIC PROC [self: STREAM] RETURNS [length: INT] = {
selfData: FSDataHandle ← NARROW[self.streamData];
IF selfData.streamIsClosed THEN ERROR IO.Error[StreamClosed, self];
length ← EstablishFileLength[fileData: selfData.fileData ] ;
selfData ← NIL ;
};
clearLowBits: CARDINAL = CARDINAL.LAST-(bytesPerFilePage-1);
clearHighBits: CARDINAL = (bytesPerFilePage-1);
maxLength: INT = INT.LAST - bytesPerFilePage;
SetLength: PUBLIC PROC [self: STREAM, length: INT] = {
-- Note: do not reduce the size of a shortened file until stream closed.
ENABLE FS.Error => {
convertFStoIOError [self, error];
};
fsData: FSDataHandle ← NARROW[self.streamData];
IF fsData.streamIsClosed THEN ERROR IO.Error[StreamClosed, self];
IF length NOT IN [0 .. maxLength] THEN ERROR IO.Error[BadIndex, self];
SetLengthUnderMonitor[fileData: fsData.fileData, length: length];
IF fsData.index+fsData.currentNode.firstFileByteInBuffer > length THEN {
-- If old index was past EOF, then move it to EOF.
-- We leave the cloned read stream alone: if it does not do a setPosition then
-- it will get an EOF on its next read.
fsData.index ← 0 ;
SetIndex[self: self, index: length];
};
fsData ← NIL ;
};
RoundUpToPages: PROC [bytes: INT] RETURNS [INT] = INLINE {
bytes ← bytes + (bytesPerFilePage-1);
LOOPHOLE[bytes, LongNumber[num]].lowbits ←
BITAND[LOOPHOLE[bytes, LongNumber[num]].lowbits, clearLowBits];
RETURN[bytes];
};
PagesForRoundUpBytes: PROC [bytes: INT] RETURNS [INT] = INLINE {
RETURN[RoundUpToPages[bytes]/bytesPerFilePage];
};
SetLengthUnderMonitor: ENTRY PROC [fileData: FileDataHandle, length: INT] = {
ENABLE UNWIND => NULL;
newFileBytes: INT = RoundUpToPages[length];
oldFileLength: INT ;
nowNode: BufferNodeHandle ← fileData.firstBufferNode;
writeData: FSDataHandle ← fileData.writeStreamData ;
IF writeData # NIL AND writeData.currentNode.didPut THEN {
-- CleanupAfterPut logic is copied here, but we cannot call CleanupAfterPut
-- because it is an ENTRY
currentNode: BufferNodeHandle = writeData.currentNode;
currentNode.bufferDirty ← TRUE;
currentNode.didPut ← FALSE;
IF writeData.index > currentNode.dataBytesInBuffer THEN {
currentNode.dataBytesInBuffer ← writeData.index;
fileData.fileLength ← currentNode.firstFileByteInBuffer + writeData.index };
currentNode.didPut ← FALSE };
oldFileLength ← fileData.fileLength ;
fileData.fileLength ← length;
IF length < fileData.validBytesOnDisk THEN fileData.validBytesOnDisk ← length ;
-- grow file if needed
IF length > fileData.byteSize THEN {
fileData.byteSize ← newFileBytes;
SetFileSize[fileData.fileHandle, fileData.byteSize];
};
-- Look through nodes and adjust those past EOF or with EOF in buffer
UNTIL nowNode = NIL DO
IF (nowNode.status # invalid) THEN {
IF (nowNode.firstFileByteInBuffer+nowNode.bufferBytes > length) THEN {
IF nowNode.firstFileByteInBuffer >= length THEN {
-- All of the buffer is past EOF.
-- Set dataBytesInBuffer to 0 so that gets will find themselves at EOF,
-- and clean the node to avoid redundant write
nowNode.dataBytesInBuffer ← 0;
nowNode.bufferDirty ← FALSE ;
nowNode.didPut ← FALSE ;
}
ELSE {
-- EOF is in (or just past) this buffer
nowNode.dataBytesInBuffer ← length - nowNode.firstFileByteInBuffer ;
IF nowNode.didPut THEN {
nowNode.didPut ← FALSE ;
nowNode.bufferDirty ← TRUE ;
};
};
}
ELSE {
-- all of node is in the file
nowNode.dataBytesInBuffer ← nowNode.bufferBytes ;
};
};
nowNode ← nowNode.nextBufferNode ;
ENDLOOP;
writeData ← NIL ;
};
EraseChar: PUBLIC PROC [self: STREAM, char: CHAR] = {
index: INT = GetIndex[self];
IF index = 0 THEN ERROR IO.Error[IllegalBackup, self];
SetIndex[self, index-1];
IF GetChar[self] # char THEN {PutChar[self, '\\]; PutChar[self, char]}
ELSE SetIndex[self, index-1] };
Backup: PUBLIC PROC [self: STREAM, char: CHAR] = {
selfData: FSDataHandle ← NARROW[self.streamData];
index: INT;
IF selfData.streamIsClosed THEN ERROR IO.Error[StreamClosed, self];
index ← GetIndex[self];
IF index = 0 THEN ERROR IO.Error[IllegalBackup, self];
SetIndex[self, index-1];
IF GetChar[self] # char THEN ERROR IO.Error[IllegalBackup, self];
SetIndex[self, index-1];
selfData ← NIL ;
};
CloseFileDataForStream: ENTRY PROC [fileData: FileDataHandle, fsData: FSDataHandle] = {
-- Processing for "Close" that must be done under the monitor
needDeleted: INT ← IF fileData.numberOfStreams = 0
THEN INT.LAST
ELSE fileData.streamBufferParms.nBuffers ;
lastNode: BufferNodeHandle ← NIL ;
node: BufferNodeHandle ;
IF (node ← fsData.currentNode) # NIL THEN node.useCount ← node.useCount-1 ;
IF (node ← fsData.readAheadNode) # NIL THEN node.useCount ← node.useCount-1 ;
fsData.currentNode ← NIL ;
fsData.readAheadNode ← NIL;
IF fsData.isWriteStream AND fileData.accessRights = $write AND
fileData.streamOptions[truncatePagesOnClose] THEN {
SetFileSize[fileData.fileHandle, fileData.fileLength] ;
};
IF (fileData.numberOfStreams ← fileData.numberOfStreams - 1) = 0 THEN {
IF fileData.streamOptions[closeFSOpenFileOnClose] THEN fileData.fileHandle.Close[];
fileData.fileHandle ← FS.nullOpenFile ;
};
-- look for up to two buffers to free if another stream is around
-- else, free all buffers
node ← fileData.firstBufferNode ;
UNTIL node = NIL OR needDeleted = 0 DO
IF node.useCount = 0 THEN {
TRUSTED{VM.Free[node.bufferInterval]};
IF fileData.firstBufferNode = node
THEN fileData.firstBufferNode ← node.nextBufferNode
ELSE lastNode.nextBufferNode ← node.nextBufferNode ;
needDeleted ← needDeleted - 1 ;
}
ELSE {
lastNode ← node ;
};
node ← node.nextBufferNode ;
ENDLOOP;
fsData.streamIsClosed ← TRUE;
BROADCAST fileData.somethingHappened ;
fsData ← NIL ;
};
-- Insure that all buffers, except the one corresponding to the currentNode,
-- have been written to disk.
-- Normal cases are too return immediately when no writes are outstanding,
-- or to wait until one finishes.
FinishWrites: ENTRY PROC [fileData: FileDataHandle, fsData: FSDataHandle,
currentNode: BufferNodeHandle] = {
ENABLE UNWIND => NULL;
nowNode: BufferNodeHandle ← fileData.firstBufferNode;
IF currentNode = NIL THEN { -- this is only true when called via ForceOut
WHILE fileData.writeCount # 0 DO
WAIT fileData.somethingHappened ;
ENDLOOP;
};
UNTIL nowNode = NIL DO
IF nowNode # currentNode AND nowNode.bufferDirty AND
(nowNode.status = needsSequentialWrite OR currentNode = NIL) THEN {
-- What has happened is that an asynchronous write has failed,
-- or we are trying to flush all dirty pages in ForceOut.
-- The parallel process has given up, and we are about to re-do
-- the write under the monitor to get the signal and the stack
-- correct so that the client sees a correct view of the error.
nowNode.status ← sequentialWriteActive ;
TRUSTED{WriteFilePages[f: fsData.fileData.fileHandle,
to: nowNode.firstFileByteInBuffer,
numPages: PagesForRoundUpBytes[nowNode.dataBytesInBuffer],
from: nowNode.buffer]
};
nowNode.bufferDirty ← FALSE ;
nowNode.status ← valid ;
};
nowNode ← nowNode.nextBufferNode ;
ENDLOOP;
fsData ← NIL ;
};
FinishRead: ENTRY PROC [fileData: FileDataHandle,
node: BufferNodeHandle, bufferSize: INT] = INLINE {
ENABLE UNWIND => NULL;
node.status ← valid;
node.dataBytesInBuffer ← bufferSize ;
BROADCAST fileData.somethingHappened;
};
FinishBadRead: ENTRY PROC [fileData: FileDataHandle, node: BufferNodeHandle] =
INLINE {
ENABLE UNWIND => NULL;
node.status ← invalid;
node.dataBytesInBuffer ← 0 ;
BROADCAST fileData.somethingHappened;
};
FinishBadPreRead: ENTRY PROC [fileData: FileDataHandle, node: BufferNodeHandle] =
INLINE {
ENABLE UNWIND => NULL;
node.status ← needsSequentialRead;
node.dataBytesInBuffer ← 0 ;
BROADCAST fileData.somethingHappened;
};
markNodeNotWritten: ENTRY PROC [fileData: FileDataHandle, node: BufferNodeHandle] =
INLINE {
ENABLE UNWIND => NULL;
node.status ← needsSequentialWrite ;
node.bufferDirty ← TRUE ;
fileData.writeCount ← fileData.writeCount - 1;
BROADCAST fileData.somethingHappened;
};
markNodeWritten: ENTRY PROC [fileData: FileDataHandle, node: BufferNodeHandle] =
INLINE {
ENABLE UNWIND => NULL;
node.status ← valid ;
fileData.writeCount ← fileData.writeCount - 1;
BROADCAST fileData.somethingHappened;
};
bumpWriteCount: ENTRY PROC [fileData: FileDataHandle] = INLINE {
ENABLE UNWIND => NULL;
fileData.writeCount ← fileData.writeCount + 1;
};
WaitForOneBufferNotWriting: ENTRY PROC [fileData: FileDataHandle] = INLINE {
ENABLE UNWIND => NULL;
WHILE fileData.writeCount >= fileData.streamBufferParms.nBuffers DO
WAIT fileData.somethingHappened;
ENDLOOP;
};
ProcessNode: PUBLIC PROC [ fileData: FileDataHandle, node: BufferNodeHandle ] = {
IF node.status = needsParallelRead THEN {
node.status ← parallelReadActive ;
ReadAhead [node: node, fileData: fileData];
RETURN ;
};
IF node.status = needsParallelWrite THEN {
node.status ← parallelWriteActive ;
parallelWriteBuffer [ node: node, fileData: fileData ] ;
RETURN ;
};
ERROR ;
};
parallelWriteBuffer: PROC [node: BufferNodeHandle, fileData: FileDataHandle] = {
ENABLE FS.Error => {
-- By catching and ignoring FS errors, we insure that the write will
-- later be done in the process of the client so that signals will
-- look correct.
markNodeNotWritten[fileData: fileData, node: node];
GOTO done;
};
TRUSTED{WriteFilePages[f: fileData.fileHandle,
to: node.firstFileByteInBuffer,
numPages: PagesForRoundUpBytes[node.dataBytesInBuffer],
from: node.buffer];};
markNodeWritten[fileData: fileData, node: node];
EXITS
done => RETURN
};
ReadAhead: PROC [node: BufferNodeHandle, fileData: FileDataHandle] = {
ENABLE FS.Error => {
-- on FS errors, invalidate the pre-read
FinishBadPreRead[fileData: fileData, node: node];
GOTO done;
};
bytesToRead: INT ;
fileByte: INT = node.firstFileByteInBuffer ;
IF (bytesToRead ←
MIN[fileData.fileLength - fileByte, node.bufferBytes]) > 0 THEN
TRUSTED{ReadFilePages[f: fileData.fileHandle, from: fileByte,
numPages: PagesForRoundUpBytes[bytesToRead], to: node.buffer]};
FinishRead[fileData: fileData, node: node, bufferSize: bytesToRead];
EXITS
done => RETURN
};
SetupBuffer: PUBLIC PROC [fileData: FileDataHandle,
fsData: FSDataHandle, fileByte: INT] RETURNS [currentNode: BufferNodeHandle] = {
-- For write streams, didPut = FALSE if on entry (someone else called CleanupAfterPut).
-- Arranges buffer so that fileByte (must be buffer-aligned) is the first byte in it.
-- If buffer is dirty, writes it to file.
-- Maintains invariants of dataBytesInBuffer, bufferBytes, and
-- firstFileByteInBuffer in the face of all this. DOES NOT update index.
-- Called from AdvanceBuffer, SetIndex, SetLength,
-- StreamFromOpenStream and StreamFromOpenFile.
node: BufferNodeHandle ← fsData.currentNode ;
readAheadNode: BufferNodeHandle;
currentNodeStatus: FileStreamPrivate.NodeStatus;
IF node = NIL THEN node ← fileData.firstBufferNode ;
-- write buffer if needed
IF node.bufferDirty AND fsData.isWriteStream THEN {
-- See if there are buffers that must be written sequentially.
FinishWrites[fileData: fileData, fsData: fsData, currentNode: node];
-- Extend file if we are about to write over it.
IF node.dataBytesInBuffer + node.firstFileByteInBuffer > fileData.byteSize THEN {
fileData.byteSize ← node.dataBytesInBuffer + node.firstFileByteInBuffer ;
SetFileSize[fileData.fileHandle, fileData.byteSize] ;
};
IF fileData.validBytesOnDisk < node.dataBytesInBuffer + node.firstFileByteInBuffer THEN
fileData.validBytesOnDisk ← node.dataBytesInBuffer + node.firstFileByteInBuffer ;
node.status ← needsParallelWrite ;
node.bufferDirty ← FALSE ;
bumpWriteCount[ fileData: fileData];
FileStreamPrivate.StartRequest [ fileData: fileData, node: node ] ;
WaitForOneBufferNotWriting[fileData: fileData];
};
[currentNode, readAheadNode] ←
SetUpNodes[fileData: fileData, fsData: fsData, fileByte: fileByte];
-- Copy the status out of the node before the tests. Since the status
-- can change at any time, it would be possible to have none of the
-- arms of the SELECT executed when there was a pre-read to do.
currentNodeStatus ← currentNode.status ;
SELECT TRUE FROM
currentNodeStatus # valid AND readAheadNode = NIL => {
makeNodeValid[fileData: fileData, node: currentNode ];
};
currentNodeStatus = valid AND readAheadNode # NIL => {
FileStreamPrivate.StartRequest [ fileData: fileData, node: readAheadNode ];
};
currentNodeStatus # valid AND readAheadNode # NIL => {
myPriority: Process.Priority ;
myPriority ← Process.GetPriority[];
Process.SetPriority[Process.priorityForeground];
FileStreamPrivate.StartRequest [ fileData: fileData, node: readAheadNode ];
makeNodeValid[fileData: fileData, node: currentNode ];
Process.SetPriority[myPriority];
};
ENDCASE ; -- falls through if currentNode.status = valid AND readAheadNode = NIL
fsData ← NIL ;
};
makeNodeValid: PROC [fileData: FileDataHandle, node: BufferNodeHandle] = {
bytesToRead: INT ;
WHILE node.status # valid DO
IF doTheRead[fileData: fileData, node: node] THEN {
bytesToRead ← MIN[fileData.fileLength - node.firstFileByteInBuffer, node.bufferBytes];
IF fileData.validBytesOnDisk <= node.firstFileByteInBuffer THEN {
-- Avoid read: the data is trash on disk. We are extending the file anyway.
FinishRead[fileData: fileData, node: node, bufferSize: bytesToRead];
}
ELSE {
IF bytesToRead > 0 THEN TRUSTED{
ReadFilePages[f: fileData.fileHandle,
from: node.firstFileByteInBuffer,
numPages: PagesForRoundUpBytes[bytesToRead],
to: node.buffer
! FS.Error => {
FinishBadRead[ fileData: fileData, node: node];
};
];
};
FinishRead[fileData: fileData, node: node, bufferSize: bytesToRead];
};
};
ENDLOOP;
};
doTheRead: ENTRY PROC [fileData: FileDataHandle, node: BufferNodeHandle]
RETURNS [BOOL] = INLINE {
ENABLE UNWIND => NULL;
IF node.status = invalid OR node.status = needsSequentialRead THEN {
node.status ← sequentialReadActive ;
RETURN [TRUE];
};
IF node.status = valid THEN RETURN [ FALSE ] ELSE WAIT fileData.somethingHappened ;
RETURN [ FALSE ] ;
};
SetUpNodes: ENTRY PROC [fileData: FileDataHandle, fsData: FSDataHandle, fileByte: INT]
RETURNS [currentNode: BufferNodeHandle ← NIL, nextNode: BufferNodeHandle ← NIL] = {
-- This procedure runs under the monitor.
-- It looks for buffers for the current and next nodes.
-- The node returned in currentNode is the one to use as current. If it is
-- marked "active", then the caller must fill it before use.
-- The node nextNode is returned as NIL if no preread is needed. If non-NIL,
-- the caller should arrange to preread into this buffer.
ENABLE UNWIND => NULL;
nowNode: BufferNodeHandle ← fileData.firstBufferNode;
availableNode: BufferNodeHandle ← NIL ;
availableNodeLRUCount: INT ← 1000000;
maxLRUCount: INT ← 0 ;
node: BufferNodeHandle ;
oldCurrentNode: BufferNodeHandle ← fsData.currentNode ;
oldFirstByteInBuffer: INT = IF fsData.currentNode = NIL THEN -1
ELSE fsData.currentNode.firstFileByteInBuffer;
bufferBytes: INT = nowNode.bufferBytes ;
IF (node ← fsData.currentNode) # NIL THEN node.useCount ← node.useCount-1 ;
IF (node ← fsData.readAheadNode) # NIL THEN node.useCount ← node.useCount-1 ;
fsData.currentNode ← NIL ;
fsData.readAheadNode ← NIL;
UNTIL nowNode = NIL DO
firstByte: INT ← nowNode.firstFileByteInBuffer;
IF nowNode.LRUCount > maxLRUCount THEN maxLRUCount ← nowNode.LRUCount ;
SELECT TRUE FROM
-- buffer already has correct position in file
firstByte = fileByte => {
currentNode ← nowNode ;
fsData.currentNode ← nowNode ;
nowNode.useCount ← nowNode.useCount+1 ;
};
-- buffer is next after current
firstByte = fileByte+bufferBytes => {
IF fileData.streamBufferParms.nBuffers > 1 THEN {
nowNode.useCount ← nowNode.useCount+1 ;
fsData.readAheadNode ← nowNode ;
}
ELSE {
IF( nowNode.status = valid OR nowNode.status = invalid)
AND nowNode.useCount = 0 AND
nowNode.LRUCount <= availableNodeLRUCount THEN {
availableNodeLRUCount ← nowNode.LRUCount ;
availableNode ← nowNode ;
};
};
};
-- buffer not "near" stream pointer (tested in above two cases)
-- and it is not active, and it is not near the other stream (if it exits)
( nowNode.status = valid OR nowNode.status = invalid) AND
nowNode.useCount = 0 AND nowNode.LRUCount <= availableNodeLRUCount => {
availableNodeLRUCount ← nowNode.LRUCount ;
availableNode ← nowNode ;
};
ENDCASE; -- SELECT TRUE
nowNode ← nowNode.nextBufferNode ;
ENDLOOP;
IF currentNode = NIL THEN {
WHILE availableNode = NIL DO
IF oldCurrentNode # NIL THEN {
-- re-establish invariant about currentNode always being valid outside monitor
-- (except during stream creation)
fsData.currentNode ← oldCurrentNode ;
oldCurrentNode.useCount ← oldCurrentNode.useCount + 1 ;
};
WAIT fileData.somethingHappened;
IF oldCurrentNode # NIL THEN {
fsData.currentNode ← NIL ;
oldCurrentNode.useCount ← oldCurrentNode.useCount - 1 ;
};
nowNode ← fileData.firstBufferNode;
UNTIL nowNode = NIL DO
IF ( nowNode.status = valid OR nowNode.status = invalid)
AND nowNode.useCount = 0 THEN
availableNode ← nowNode ;
nowNode.LRUCount ← 0 ;
nowNode ← nowNode.nextBufferNode ;
ENDLOOP;
ENDLOOP;
currentNode ← availableNode ;
currentNode.LRUCount ← maxLRUCount + 1 ;
currentNode.useCount ← currentNode.useCount+1 ;
currentNode.status ← invalid ;
currentNode.firstFileByteInBuffer ← fileByte ;
availableNode ← NIL ;
fsData.currentNode ← currentNode ;
};
-- preread if there is not a preread in progress, and
-- a sequence has been established.
IF (NOT fileData.preReadInProgress) AND
fileData.streamBufferParms.nBuffers > 1 AND
(oldFirstByteInBuffer+bufferBytes = fileByte) AND
(fsData.lastFirstByteInBuffer+bufferBytes = oldFirstByteInBuffer) AND
(fileData.fileLength > fileByte+bufferBytes) AND
(fileData.validBytesOnDisk > fsData.lastFirstByteInBuffer+bufferBytes) THEN {
IF fsData.readAheadNode # NIL THEN {
-- a node already points to the right place in the file
IF fsData.readAheadNode.status = invalid THEN {
nextNode ← fsData.readAheadNode ;
nextNode.status ← needsParallelRead ;
};
}
ELSE {
IF availableNode = NIL THEN {
availableNodeLRUCount ← 1000000;
nowNode ← fileData.firstBufferNode;
UNTIL nowNode = NIL DO
IF ( nowNode.status = valid OR nowNode.status = invalid) AND
nowNode.useCount = 0 AND
nowNode.LRUCount <= availableNodeLRUCount THEN
availableNode ← nowNode ;
nowNode ← nowNode.nextBufferNode ;
ENDLOOP;
};
IF availableNode # NIL THEN {
nextNode ← availableNode ;
nextNode.status ← needsParallelRead ;
nextNode.LRUCount ← maxLRUCount + 1 ;
nextNode.useCount ← nextNode.useCount+1 ;
nextNode.firstFileByteInBuffer ← fileByte + bufferBytes ;
fsData.readAheadNode ← nextNode ;
};
};
};
fsData.lastFirstByteInBuffer ← oldFirstByteInBuffer ;
fsData ← NIL ;
DebugQueue[DebugQueuePut].current ← currentNode;
DebugQueue[DebugQueuePut].currentStatus ← currentNode.status;
DebugQueue[DebugQueuePut].currentFirstFileByteInBuffer ← currentNode.firstFileByteInBuffer;
DebugQueue[DebugQueuePut].next ← nextNode;
IF nextNode = NIL THEN {
DebugQueue[DebugQueuePut].nextStatus ← invalid;
DebugQueue[DebugQueuePut].nextFirstFileByteInBuffer ← -123;
} ELSE {
DebugQueue[DebugQueuePut].nextStatus ← nextNode.status;
DebugQueue[DebugQueuePut].nextFirstFileByteInBuffer ← nextNode.firstFileByteInBuffer;
};
DebugQueuePut ← DebugQueuePut + 1 ;
IF DebugQueuePut >= 20 THEN DebugQueuePut ← 0 ;
};
ForceOut: PROC [fsData: FSDataHandle] = {
-- Called from Flush for write streams, or Close for any stream
-- This is the only proc that sets byte length, and only proc that finishes trans.
fileData: FileDataHandle = fsData.fileData;
node: BufferNodeHandle ← fsData.currentNode ;
IF fsData.isWriteStream THEN CleanupAfterPut[fileData: fileData, selfData: fsData];
IF node.dataBytesInBuffer + node.firstFileByteInBuffer > fileData.byteSize THEN {
fileData.byteSize ← node.dataBytesInBuffer + node.firstFileByteInBuffer ;
SetFileSize[fileData.fileHandle, fileData.byteSize] ;
};
IF fileData.validBytesOnDisk < node.dataBytesInBuffer + node.firstFileByteInBuffer THEN
fileData.validBytesOnDisk ← node.dataBytesInBuffer + node.firstFileByteInBuffer ;
-- This call does the writes under the monitor lock.
-- This should be true for read streams since you want to stop the writer from
-- dirting buffers.
-- For write streams, you could get by without the lock provided you were
-- extremely careful about a ForceOut on the read stream, a StreamFromOpenStream
-- (it will allocate more buffers). The easy way out is to use the monitor.
FinishWrites[ fileData: fileData, fsData: fsData, currentNode: NIL];
IF fsData.isWriteStream AND fileData.accessRights = $write AND
fileData.fileLength # fileData.byteLength THEN {
fileData.byteLength ← fileData.fileLength;
fileData.fileHandle.SetByteCountAndCreatedTime[fileData.byteLength] };
fsData ← NIL ;
};
SaveStreamError: PUBLIC PROCEDURE [self: STREAM, error: FS.ErrorDesc] = {
WITH self.streamData SELECT FROM
fsData: FSDataHandle => fsData.FSErrorDesc ← error ;
ENDCASE => ERROR IO.Error[NotImplementedForThisStream, self];
};
ErrorFromStream: PUBLIC PROCEDURE [self: STREAM] RETURNS [FS.ErrorDesc] = {
WITH self.streamData SELECT FROM
fsData: FSDataHandle => RETURN [fsData.FSErrorDesc];
ENDCASE => ERROR IO.Error[NotImplementedForThisStream, self];
};
SetStreamClassData: PUBLIC PROCEDURE [self: STREAM, data: REF ANY] = {
WITH self.streamData SELECT FROM
fsData: FSDataHandle => fsData.StreamClassData ← data ;
ENDCASE => ERROR IO.Error[NotImplementedForThisStream, self];
};
GetStreamClassData: PUBLIC PROCEDURE [self: STREAM] RETURNS [data: REF ANY] = {
WITH self.streamData SELECT FROM
fsData: FSDataHandle => RETURN [fsData.StreamClassData] ;
ENDCASE => ERROR IO.Error[NotImplementedForThisStream, self];
};
SetFinalizationProc: PUBLIC PROCEDURE [self: STREAM, proc: FileStream.FinalizationProc] = {
WITH self.streamData SELECT FROM
fsData: FSDataHandle => fsData.FinalizationProc ← proc ;
ENDCASE => ERROR IO.Error[NotImplementedForThisStream, self];
};
GetFinalizationProc: PUBLIC PROCEDURE [self: STREAM] RETURNS
[proc: FileStream.FinalizationProc] = {
WITH self.streamData SELECT FROM
fsData: FSDataHandle => RETURN [fsData.FinalizationProc] ;
ENDCASE => ERROR IO.Error[NotImplementedForThisStream, self];
};
-- Talking to FS
ReadFilePages: PROC [f: FS.OpenFile, from: ByteNumber, numPages: INT,
to: LONG POINTER] = INLINE {
p: PageNumber = from/bytesPerFilePage;
TRUSTED{f.Read[from: p, nPages: numPages, to: to]};
};
WriteFilePages: PROC [f: FS.OpenFile, to: ByteNumber, numPages: INT,
from: LONG POINTER] = INLINE {
p: PageNumber = to/bytesPerFilePage;
f.Write[from: from, nPages: numPages, to: p];
};
SetFileSize: PROC [f: FS.OpenFile, byteSize: ByteCount] = {
f.SetPageCount[pages: (byteSize+bytesPerFilePage-1)/bytesPerFilePage];
};
GetFileLock: PROC [f: FS.OpenFile] RETURNS [FS.Lock] = {
RETURN [f.GetInfo[].lock]
};
ProcHandleFromAccessRights: PUBLIC PROC [accessRights: FS.Lock]
RETURNS [ procs: FileStreamPrivate.ProcHandle] = {
SELECT accessRights FROM
read => RETURN [nucleusFileIOReadProcs];
write => RETURN [nucleusFileIOAllProcs];
ENDCASE => RETURN[NIL];
};
-- Procedure records (never modified)
nucleusFileIOReadProcs: PUBLIC FileStreamPrivate.ProcHandle = IO.CreateStreamProcs[
variety: $input, class: $File,
getChar: GetChar,
endOf: EndOf,
charsAvail: CharsAvail,
getBlock: GetBlock,
unsafeGetBlock: UnsafeGetBlock,
putChar: NIL, -- not implemented
putBlock: NIL, -- call PutChar
unsafePutBlock: NIL, -- call PutChar
flush: Flush,
reset: Reset,
close: Close,
getIndex: GetIndex,
setIndex: SetIndex,
backup: Backup,
getLength: GetLength
];
nucleusFileIOAllProcs: PUBLIC FileStreamPrivate.ProcHandle = IO.CreateStreamProcs[
variety: $inputOutput, class: $File,
getChar: GetChar,
endOf: EndOf,
charsAvail: CharsAvail,
getBlock: GetBlock,
unsafeGetBlock: UnsafeGetBlock,
putChar: PutChar,
putBlock: PutBlock,
unsafePutBlock: UnsafePutBlock,
flush: Flush,
reset: Reset,
close: Close,
getIndex: GetIndex,
setIndex: SetIndex,
backup: Backup,
getLength: GetLength,
setLength: SetLength,
eraseChar: EraseChar
];
fQ: SafeStorage.FinalizationQueue;
Finalize: PROC = BEGIN
DO
fsData: FSDataHandle ← NARROW [ SafeStorage.FQNext[fQ] ];
streamIsClosed: BOOL ;
FSLock.RemoveREF[fsData];
streamIsClosed ← fsData.streamIsClosed ;
IF NOT fsData.streamIsClosed THEN {
ForceOut[ fsData: fsData
! FS.Error => CONTINUE];
CloseFileDataForStream[fileData: fsData.fileData, fsData: fsData
! FS.Error => CONTINUE];
};
IF fsData.isWriteStream THEN {
IF fsData.fileData.writeStreamData = fsData THEN
fsData.fileData.writeStreamData ← NIL ;
}
ELSE {
IF fsData.fileData.firstReadStream = fsData THEN
fsData.fileData.firstReadStream ← NIL ;
};
IF fsData.FinalizationProc # NIL THEN {
fsData.FinalizationProc [ openFile: fsData.fileData.fileHandle,
data: fsData.StreamClassData,
closed: streamIsClosed];
};
fsData ← NIL;
ENDLOOP;
END;
-- start code: start up finalization stuff
IF FileStreamPrivate.DoFinalization THEN {
fQ ← SafeStorage.NewFQ[];
SafeStorage.EstablishFinalization[CODE[Data], 2, fQ];
TRUSTED { Process.Detach[FORK Finalize[]] };
};
END.
CHANGE LOG
Created by MBrown on June 22, 1983 10:08 am
-- By editing FileIOAlpineImpl.
Changed by MBrown on August 19, 1983 2:44 pm
-- Close FS file when closing stream (this should really be an option).
Changed by Birrell on August 23, 1983 3:14 pm
-- In SetFileSize: byteSize/bytesPerFilePage -> (byteSize+bytesPerFilePage-1)/bytesPerFilePage.
Changed by MBrown on August 25, 1983 1:18 pm
-- In SetIndex: fsData.byteSize < firstBufferByte+fsData.dataBytesInBuffer -> fsData.byteSize <= firstBufferByte+fsData.dataBytesInBuffer. Implemented GetFileLock (was stubbed waiting for FS). In StreamFromOpenFile, if stream open for write and file has no pages, extend it.
Changed by MBrown on September 17, 1983 8:41 pm
-- Conversion to new IO interface.
Changed by Hagmann on November 22, 1983 4:29 pm
-- Implement multiple-page stream buffer.
-- Implement coupled read and write streams on same open file.
-- Changed data structures in FileStreamPrivate, and fixed references in this module
-- to the new data structures. This meant changes to nearly every routine.
-- Close file during StreamOpen if an error occurs in StreamFromOpenFile.
-- Implement streamOptions and streamBufferParms features.
-- Added finalization.
-- Changed name from FileIOFSImpl.
-- Split out create code to make FileStreamCreateImpl smaller since compiler blows up in pass 3
Changed by Hagmann on November 28, 1983 12:00 pm
-- Fixed EndOf bug for multiple streams.
-- Added test for DoFinalization to enable FileStream testing without making a boot file
Changed by Hagmann on December 6, 1983 4:52 pm
-- Added code for process cache