-- PilotSSImpl.mesa Edited by: Johnsson on 8-Jan-81 18:12:59 -- Edited by: Russ Atkinson on 20-Mar-81 18:07:23 -- Edited by: Paul Rovner on 30-Jul-81 8:51:01 (seg.spare1 on => file is defaultwindow) -- Edited by: Levin on 8-Dec-81 9:45:40 (bug fixes; monitorize; merge in DummyFileCache) DIRECTORY DCSFileTypes USING [tLeaderPage], Directory USING [ CreateFile, DeleteFile, Error, GetNext, GetProps, ignore, Lookup], Environment USING [PageCount, PageNumber, bytesPerPage, wordsPerPage], File USING [ Capability, GetAttributes, GetSize, grow, Permissions, SetSize, ShowCapability, Type], FileStream USING [ Create, GetCapability, GetLeaderProperties, GetLeaderPropertiesForCapability, InvalidOperation, NoLeaderPage, SetLeaderProperties, SetLeaderPropertiesForCapability], Inline USING [BITAND, BITOR, DIVMOD], Segments, SegmentsExtra USING[], -- exports: NewSegmentForSpace Space USING [ Activate, Create, CreateUniformSwapUnits, Deactivate, Delete, ForceOut, GetAttributes, GetHandle, Handle, Kill, LongPointer, MakeReadOnly, Map, mds, nullHandle, PageFromLongPointer, Unmap, virtualMemory], Storage USING [Node, Pages], Stream USING [InputOptions, SendNow], Streams USING [Address, Handle], String USING [AppendString, AppendSubString, SubStringDescriptor], Time USING [Current, Packed]; PilotSSImpl: MONITOR IMPORTS Directory, File, FileStream, Inline, Segments, Space, Storage, Stream, String, Time EXPORTS SegmentsExtra, Segments, Streams SHARES File, Segments = BEGIN OPEN Segments; -- This module has been more-or-less monitorized, but since the interface isn't well-suited -- to monitors (e.g., consider the enumeration procedures), I've done it in a rather simple -- way. Simply stated, the monitor is intended to protect separate, independent clients of -- this package from screwing each other. However, two clients who share a segment object -- must cooperate externally; this monitor won't protect them. Only the procedures which -- actually manipulate the segTables and fileTables are actually monitored, including the -- enumeration guys. See comments in the enumeration procedures for appropriate caveats. -- I claim that the existing functionality has in no way been reduced, only the potential -- for evil. (RL) SwapError: PUBLIC SIGNAL [s: SHandle] = CODE; InvalidSegment: PUBLIC SIGNAL [s: UNSPECIFIED] = CODE; DeleteSegment: PUBLIC PROCEDURE [seg: SHandle] = BEGIN file: FHandle _ seg.file; SwapOut[seg]; IF seg.space # Space.nullHandle THEN Space.Delete[seg.space]; ReturnSeg[seg]; IF file # NIL THEN IF (file.lock _ file.lock - 1) = 0 THEN ReleaseFile[file]; RETURN END; EnumerateSegments: PUBLIC PROCEDURE [ proc: PROCEDURE [SHandle] RETURNS [BOOLEAN]] RETURNS [f: SHandle] = BEGIN -- Note: once we have a pointer to a page of segment objects, the tail of -- the chain is guaranteed to be good. This is because no pages of the -- objects are ever freed, and new pages are only added at the front. So, -- we needn't stand on our heads to unlock the monitor around the call to -- 'proc'; we just unlock it as soon as we have stashed the head. Of course, -- we can't guarantee that the caller of the enumerator will see a consistent -- snapshot of the segment table, but that guarantee was never present in the -- interface anyway, so no functionality has been lost. Init: ENTRY PROCEDURE RETURNS [STableHandle] = INLINE {RETURN[segTables]}; FOR t: STableHandle _ Init[], t.link UNTIL t = NIL DO FOR i: CARDINAL IN [0..SegsPerTable) DO f _ @t.segs[i]; IF f.inuse AND proc[f] THEN RETURN[f]; ENDLOOP; ENDLOOP; RETURN[NIL] END; FlushSegmentCache: PUBLIC PROCEDURE = {}; Kill: PUBLIC PROCEDURE [seg: SHandle, base: PageNumber _ DefaultBase] = BEGIN IF seg.mapped THEN Space.Kill[seg.space] ELSE seg.killed _ TRUE; RETURN END; MakeReadOnly: PUBLIC PROCEDURE [seg: SHandle] = BEGIN ValidateSeg[seg]; IF seg.mapped THEN Space.MakeReadOnly[seg.space]; seg.write _ FALSE; RETURN END; MoveSegment: PUBLIC PROCEDURE [ seg: SHandle, base: Environment.PageNumber, pages: Environment.PageCount] = BEGIN IF IsVMSeg[seg] THEN ERROR SwapError[seg]; SwapOut[seg]; IF seg.space # Space.nullHandle THEN { IF seg.pages < pages THEN { Space.Delete[seg.space]; seg.space _ Space.nullHandle; seg.mapped _ FALSE} ELSE IF seg.mapped THEN {Space.Unmap[seg.space]; seg.mapped _ FALSE}}; seg.base _ IF base = DefaultBase THEN 1 ELSE base; seg.pages _ pages; IF seg.base # 1 AND File.GetSize[seg.file.cap] < seg.base + pages THEN SIGNAL SwapError[seg]; RETURN END; NewSegment: PUBLIC PROCEDURE [ file: FHandle, base: Environment.PageNumber, pages: Environment.PageCount, access: Access _ DefaultAccess] RETURNS [s: SHandle] = BEGIN LockFile[file]; s _ NewSegmentCommon[access]; s.file _ file; s.base _ IF base = DefaultBase THEN 1 ELSE base; s.pages _ pages; END; NewSegmentForSpace: PUBLIC PROCEDURE [space: Space.Handle, access: Access _ DefaultAccess] RETURNS [s: SHandle] = BEGIN s _ NewSegmentCommon[access]; s.space _ space; s.base _ 1; s.pages _ Space.GetAttributes[space].size; s.mapped _ s.spare1 _ TRUE; END; SegmentAddress: PUBLIC PROCEDURE [seg: SHandle] RETURNS [Address] = BEGIN ValidateSeg[seg]; IF ~seg.mapped THEN ERROR SwapError[seg]; RETURN[Space.LongPointer[seg.space]]; END; SwapIn: PUBLIC PROCEDURE [ seg: SHandle, base: PageNumber _ DefaultANYBase, info: AllocInfo _ EasyUp] = BEGIN OPEN Space; ValidateSeg[seg]; IF seg.lock = MaxSegLocks THEN ERROR SwapError[seg]; IF ~seg.mapped THEN IF IsVMSeg[seg] THEN ERROR SwapError[seg] ELSE BEGIN cap: File.Capability _ seg.file.cap; type: File.Type _ File.GetAttributes[cap].type; pageOffset: CARDINAL _ 1; IF type = DCSFileTypes.tLeaderPage THEN pageOffset _ 0; IF seg.space = Space.nullHandle THEN { seg.space _ Space.Create[ size: seg.pages, parent: IF base = DefaultMDSBase THEN mds ELSE virtualMemory]; IF seg.pages > 7 THEN Space.CreateUniformSwapUnits[parent: seg.space, size: 4]}; cap.permissions _ IF seg.write THEN Read+Write ELSE Read; Space.Map[space: seg.space, window: [file: cap, base: seg.base-pageOffset]]; IF seg.killed THEN Space.Kill[seg.space]; Space.Activate[seg.space]; seg.mapped _ TRUE; seg.killed _ FALSE; IF seg.write THEN { c: Time.Packed = Time.Current[]; SetFileTimes[file: seg.file, create: c, read: c, write: c]} ELSE SetFileTimes[file: seg.file, read: Time.Current[]]; END; seg.lock _ seg.lock+1; RETURN END; SwapOut: PUBLIC PROCEDURE [seg: SHandle] = BEGIN ValidateSeg[seg]; IF ~seg.mapped THEN RETURN; IF seg.lock > 0 THEN ERROR SwapError[seg]; Space.Deactivate[seg.space]; RETURN END; SwapUp: PUBLIC PROCEDURE [seg: SHandle] = BEGIN ValidateSeg[seg]; IF seg.mapped THEN Space.ForceOut[seg.space]; RETURN END; Unlock: PUBLIC PROCEDURE [seg: SHandle] = BEGIN ValidateSeg[seg]; IF seg.lock=0 THEN ERROR SwapError[seg]; seg.lock _ seg.lock-1; RETURN END; VMtoSegment: PUBLIC PROCEDURE [a: Address] RETURNS [SHandle] = BEGIN space, parent: Space.Handle; mapped: BOOLEAN; FindSeg: PROCEDURE [seg: SHandle] RETURNS [BOOLEAN] = BEGIN RETURN[seg.space = space] END; space _ Space.GetHandle[Space.PageFromLongPointer[a]]; DO IF space = Space.mds OR space = Space.virtualMemory THEN RETURN[NIL]; [parent: parent, mapped: mapped] _ Space.GetAttributes[space]; IF mapped THEN EXIT; space _ parent; ENDLOOP; RETURN[EnumerateSegments[FindSeg]]; END; -- File Routines FileError: PUBLIC SIGNAL [f: FHandle] = CODE; InvalidFile: PUBLIC SIGNAL [f: UNSPECIFIED] = CODE; DestroyFile: PUBLIC PROCEDURE [file: FHandle] = BEGIN name: STRING _ [100]; NameForFile[name, file]; Directory.DeleteFile[name]; END; ModifyFile: PUBLIC PROCEDURE [name: STRING] RETURNS [BOOLEAN] = BEGIN cap: File.Capability; okay: BOOLEAN _ TRUE; -- Note: once we have a pointer to a StatItem, the tail of -- the chain is guaranteed to be good. This is because no StatItems -- are ever freed, and new StatItems are only added at the front. So, -- we needn't stand on our heads to unlock the monitor around the call to -- 'proc'; we just unlock it as soon as we have stashed the head. Of course, -- we can't guarantee that ModifyFile will see a consistent -- snapshot of the StatItem list, but that guarantee was never present in the -- interface anyway, so no functionality has been lost. Init: ENTRY PROCEDURE RETURNS [POINTER TO StatItem] = INLINE {RETURN[statHead]}; CheckFHandle: PROCEDURE [file: FHandle] RETURNS [BOOLEAN] = BEGIN IF file.lock # 0 AND File.ShowCapability[cap].fID = File.ShowCapability[file.cap].fID THEN BEGIN FOR p: POINTER TO StatItem _ Init[], p.link UNTIL p = NIL DO okay _ okay AND p.proc[name, file] ENDLOOP; okay _ okay AND (~file.inuse OR file.lock = 0); RETURN[okay]; END; RETURN[FALSE]; END; FOR p: POINTER TO StatItem _ Init[], p.link UNTIL p = NIL DO IF ~(okay _ okay AND p.proc[name, NIL]) THEN EXIT; ENDLOOP; IF okay THEN { cap _ Directory.Lookup[fileName: name, permissions: Directory.ignore ! Directory.Error => GOTO error]; [] _ EnumerateFiles[CheckFHandle]}; RETURN[okay]; EXITS error => RETURN[TRUE]; END; StatItem: TYPE = RECORD [ link: POINTER TO StatItem, proc: PROC[STRING,FHandle] RETURNS [BOOLEAN]]; statHead: POINTER TO StatItem _ NIL; AddModifyProc: PUBLIC ENTRY PROC [proc: PROC[STRING,FHandle] RETURNS [BOOLEAN]] = { p: POINTER TO StatItem = Storage.Node[SIZE[StatItem]]; p^ _ [link: statHead, proc: proc]; statHead _ p}; NewFile: PUBLIC PROCEDURE [ name: STRING, access: Access _ DefaultAccess, version: VersionOptions _ DefaultVersion] RETURNS [file: FHandle] = BEGIN type: File.Type _ DCSFileTypes.tLeaderPage; old, create: BOOLEAN; cap: File.Capability; dot: CARDINAL = name.length-1; IF name[dot] = '. THEN name.length _ dot; [access,version] _ ValidateOptions[access,version]; create _ version # OldFileOnly; IF create THEN BEGIN bogus: BOOLEAN _ FALSE; old _ FALSE; cap _ Directory.CreateFile[name,type,0 ! Directory.Error => { IF type = fileAlreadyExists THEN old _ TRUE ELSE bogus _ TRUE; CONTINUE}]; IF bogus OR (old AND version = NewFileOnly) THEN ERROR FileNameError[name]; IF old THEN cap _ Directory.Lookup[ fileName: name, permissions: Directory.ignore ! Directory.Error => ERROR FileNameError[name]]; END ELSE cap _ Directory.Lookup[ fileName: name, permissions: Directory.ignore ! Directory.Error => ERROR FileNameError[name]]; file _ InsertFile[@cap, access]; END; FileNameError: PUBLIC SIGNAL [name: STRING] = CODE; ValidateOptions: PROCEDURE [ access: Access, version: VersionOptions] RETURNS [Access, VersionOptions] = BEGIN OPEN Inline; IF access = DefaultAccess THEN access _ Read; -- IF version = DefaultVersion THEN version _ 0; IF BITAND[version, NewFileOnly+OldFileOnly] = NewFileOnly+OldFileOnly OR (BITAND[version, NewFileOnly]#0 AND BITAND[access, File.grow]=0) THEN ERROR FileError[NIL]; IF BITAND[access,File.grow]=0 THEN version _ BITOR[version,OldFileOnly]; RETURN[access, version] END; NameForFile: PUBLIC PROCEDURE [name: STRING, file: FHandle] = BEGIN name.length _ 0; [] _ Directory.GetProps[file.cap, name ! Directory.Error => { String.AppendString[name, "???"L]; CONTINUE}]; RETURN END; EnumerateDirectory: PUBLIC PROC [ proc: PROC [POINTER TO FP, STRING] RETURNS [BOOLEAN], files: STRING _ NIL, wantWholeName: BOOLEAN _ FALSE] = BEGIN next: STRING _ [100]; name: STRING _ [100]; cap: File.Capability; IF files = NIL THEN files _ "*"L; DO cap _ Directory.GetNext[pathName: files, currentName: next, nextName: next ! Directory.Error => CONTINUE]; IF next.length = 0 THEN EXIT; StripQualification[from: next, to: name, all: wantWholeName]; IF proc[@cap, name] THEN EXIT; ENDLOOP; END; StripQualification: PROC [from, to: STRING, all: BOOLEAN] = BEGIN split: String.SubStringDescriptor _ [base: from, length: , offset: ]; IF from.length = 0 THEN RETURN; IF all THEN {String.AppendString[to, from]; RETURN}; FOR i: CARDINAL DECREASING IN [0.. from.length) DO IF from[i] = '> THEN {split.offset _ i+1; EXIT}; ENDLOOP; split.length _ from.length-split.offset; to.length _ 0; String.AppendSubString[to, @split]; StripDot[to]; END; StripDot: PROCEDURE [s: STRING] = INLINE BEGIN dot: CARDINAL; IF s = NIL OR s.length = 0 THEN RETURN; IF s[dot _ (s.length - 1)] = '. THEN s.length _ dot; END; InvalidateFileCache, FlushFileCache: PUBLIC PROC = {}; EnumerateFiles: PUBLIC --ENTRY-- PROCEDURE [ proc: PROCEDURE [FHandle] RETURNS [BOOLEAN]] RETURNS [f: FHandle] = BEGIN -- Note: once we have a pointer to a page of file objects, the tail of -- the chain is guaranteed to be good. This is because no pages of the -- objects are ever freed, and new pages are only added at the front. So, -- we needn't stand on our heads to unlock the monitor around the call to -- 'proc'; we just unlock it as soon as we have stashed the head. Of course, -- we can't guarantee that the caller of the enumerator will see a consistent -- snapshot of the file table, but that guarantee was never present in the -- interface anyway, so no functionality has been lost. Init: ENTRY PROCEDURE RETURNS [FTableHandle] = INLINE {RETURN[fileTables]}; FOR t: FTableHandle _ fileTables, t.link UNTIL t = NIL DO FOR i: CARDINAL IN [0..FilesPerTable) DO f _ @t.files[i]; IF f.inuse AND proc[f] THEN RETURN[f]; ENDLOOP; ENDLOOP; RETURN[NIL] END; GetFileProperties: PUBLIC PROCEDURE [file: FHandle] RETURNS [create, write, read: Time.Packed, length: LONG CARDINAL] = BEGIN [create: create, write: write, read: read, length: length] _ FileStream.GetLeaderPropertiesForCapability[file.cap !FileStream.NoLeaderPage => { create_write_read_[0]; length _ File.GetSize[file.cap] * 512; CONTINUE}] END; GetFileTimes: PUBLIC PROCEDURE [file: FHandle] RETURNS [create, write, read: Time.Packed] = BEGIN [create: create, write: write, read: read] _ FileStream.GetLeaderPropertiesForCapability[file.cap !FileStream.NoLeaderPage => {create_write_read_[0]; CONTINUE}] END; GetFileLength: PUBLIC PROCEDURE [file: FHandle] RETURNS [length: LONG CARDINAL] = BEGIN length _ FileStream.GetLeaderPropertiesForCapability[file.cap !FileStream.NoLeaderPage => { length _ File.GetSize[file.cap] * 512; CONTINUE}].length END; InsertFile: PUBLIC ENTRY PROCEDURE [ file: FPHandle, access: Access _ DefaultAccess] RETURNS [f: FHandle] = BEGIN -- Things are a bit tricky here, since it really is important that -- only a single FHandle exist per file ID. Thus, the enumeration -- and potential subsequent GetFile must be done with the monitor -- locked. Unfortunately, this means we can't use the standard -- EnumerateFiles procedure, so we roll our own local version. FOR t: FTableHandle _ fileTables, t.link UNTIL t = NIL DO FOR i: CARDINAL IN [0..FilesPerTable) DO f _ @t.files[i]; IF f.inuse AND f.cap.fID = file.fID THEN RETURN; ENDLOOP; REPEAT FINISHED => BEGIN f _ GetFile[]; f^ _ [cap: file^, inuse: TRUE, lock: 0, link: NIL, spare1: FALSE, spare2: FALSE, spare3: FALSE]; END; ENDLOOP; END; LockFile: PUBLIC PROCEDURE [file: FHandle] = BEGIN IF file.lock = MaxFileLocks THEN ERROR FileError[file]; file.lock _ file.lock + 1; RETURN END; ReleaseFile: PUBLIC PROCEDURE [file: FHandle] = BEGIN IF file.lock # 0 THEN RETURN; ReturnFile[file]; RETURN END; SetFileLength: PUBLIC PROCEDURE [file: FHandle, length: LONG CARDINAL] = BEGIN cap: File.Capability _ file.cap; cap.permissions _ AllAccess; File.SetSize[ file: cap, size: (length+Environment.bytesPerPage-1)/Environment.bytesPerPage] END; SetFileTimes: PUBLIC PROCEDURE [file: FHandle, create, write, read: Time.Packed _ [0]] = BEGIN FileStream.SetLeaderPropertiesForCapability[ cap: file.cap, create: create, write: write, read: read !FileStream.NoLeaderPage => CONTINUE] END; UnlockFile: PUBLIC PROCEDURE [file: FHandle] = BEGIN IF file.lock = 0 THEN ERROR FileError[file]; file.lock _ file.lock - 1; RETURN END; -- Seg Sub-Routines freeSegs: SHandle _ NIL; segTables: STableHandle _ NIL; SegsPerTable: CARDINAL = (Environment.wordsPerPage-1)/SIZE[SObject]; STable: TYPE = RECORD [ link: STableHandle, segs: ARRAY [0..SegsPerTable) OF SObject]; STableHandle: TYPE = POINTER TO STable; NewSegmentCommon: PROCEDURE [access: Access] RETURNS [s: SHandle] = BEGIN s _ GetSeg[]; s.file _ NIL; s.space _ Space.nullHandle; s.inuse _ TRUE; s.mapped _ s.killed _ s.spare1 _ FALSE; s.write _ IF access = DefaultAccess OR access = Read THEN FALSE ELSE TRUE; s.lock _ 0; END; ValidateSeg: PUBLIC ENTRY PROCEDURE [s: SHandle] = BEGIN OPEN Inline; table: STableHandle _ BITAND[s, 177400B]; t: STableHandle; i,j: CARDINAL; FOR t _ segTables, t.link UNTIL t = NIL DO IF t = table THEN EXIT; REPEAT FINISHED => ERROR InvalidSegment[s]; ENDLOOP; [i, j] _ DIVMOD[s-@table.segs[0],SIZE[SObject]]; IF j#0 OR ~s.inuse THEN ERROR InvalidSegment[s]; RETURN END; GetNewSegTable: INTERNAL PROCEDURE = INLINE BEGIN t: STableHandle _ Storage.Pages[1]; i: CARDINAL; FOR i IN [0..SegsPerTable) DO ReturnSegInternal[@t.segs[i]]; ENDLOOP; t.link _ segTables; segTables _ t; RETURN END; GetSeg: ENTRY PROCEDURE RETURNS [f: SHandle] = INLINE BEGIN IF freeSegs = NIL THEN GetNewSegTable[]; f _ freeSegs; freeSegs _ f.link; RETURN END; ReturnSeg: ENTRY PROCEDURE [f: SHandle] = INLINE {ReturnSegInternal[f]}; ReturnSegInternal: INTERNAL PROCEDURE [f: SHandle] = BEGIN f.inuse _ FALSE; f.link _ freeSegs; freeSegs _ f; RETURN END; IsVMSeg: PROCEDURE [seg: SHandle] RETURNS [BOOLEAN] = INLINE {RETURN[seg.spare1]}; -- File Sub-Routines freeFiles: FHandle _ NIL; fileTables: FTableHandle _ NIL; FilesPerTable: CARDINAL = (Environment.wordsPerPage-1)/SIZE[FObject]; FTable: TYPE = RECORD [ link: FTableHandle, files: ARRAY [0..FilesPerTable) OF FObject]; FTableHandle: TYPE = POINTER TO FTable; ValidateFile: PUBLIC ENTRY PROCEDURE [f: FHandle] = BEGIN OPEN Inline; table: FTableHandle _ BITAND[f, 177400B]; t: FTableHandle; i,j: CARDINAL; FOR t _ fileTables, t.link UNTIL t = NIL DO IF t = table THEN EXIT; REPEAT FINISHED => ERROR InvalidFile[f]; ENDLOOP; [i, j] _ DIVMOD[f-@table.files[0],SIZE[FObject]]; IF j#0 OR ~f.inuse THEN ERROR InvalidFile[f]; RETURN END; GetFile: INTERNAL PROCEDURE RETURNS [f: FHandle] = BEGIN IF freeFiles = NIL THEN GetNewFileTable[]; f _ freeFiles; freeFiles _ f.link; f.inuse _ TRUE; RETURN END; ReturnFile: ENTRY PROCEDURE [f: FHandle] = INLINE {ReturnFileInternal[f]}; GetNewFileTable: INTERNAL PROCEDURE = INLINE BEGIN t: FTableHandle _ Storage.Pages[1]; i: CARDINAL; FOR i IN [0..FilesPerTable) DO ReturnFileInternal[@t.files[i]]; ENDLOOP; t.link _ fileTables; fileTables _ t; RETURN END; ReturnFileInternal: INTERNAL PROCEDURE [f: FHandle] = BEGIN f.inuse _ FALSE; f.link _ freeFiles; freeFiles _ f; RETURN END; -- Streams implementation Cleanup: PUBLIC PROCEDURE [h: Streams.Handle] = BEGIN Stream.SendNow[h ! FileStream.InvalidOperation => CONTINUE]; END; CreateStream: PUBLIC PROCEDURE [file: FHandle, access: Access _ Read] RETURNS [Streams.Handle] = BEGIN op: Stream.InputOptions = [ signalEndOfStream: TRUE, signalShortBlock: FALSE, signalLongBlock: FALSE, signalSSTChange: FALSE, terminateOnEndPhysicalRecord: FALSE]; LockFile[file]; RETURN[FileStream.Create[[file.cap.fID, access], op]] END; Destroy: PUBLIC PROCEDURE [h: Streams.Handle] = { file: FHandle = FileFromStream[h]; UnlockFile[file]; IF ReleasableFile[file] THEN ReleaseFile[file]; h.delete[h]}; FileFromStream: PUBLIC PROCEDURE [ h: Streams.Handle] RETURNS [file: FHandle] = { cap: File.Capability _ FileStream.GetCapability[h]; file _ InsertFile[@cap, AllAccess]; IF ReleasableFile[file] THEN LockFile[file]}; GetTimes: PUBLIC PROCEDURE [h: Streams.Handle] RETURNS [create, write, read: Time.Packed] = BEGIN [create: create, write: write, read: read] _ FileStream.GetLeaderProperties[h] END; NewStream: PUBLIC PROCEDURE [name: STRING, access: Access _ Read] RETURNS [Streams.Handle] = BEGIN RETURN[CreateStream[NewFile[name, access], access]] END; SetTimes: PUBLIC PROCEDURE [ h: Streams.Handle, create, write, read: Time.Packed] = BEGIN FileStream.SetLeaderProperties[ sH: h, create: create, write: write, read: read] END; GetBlock: PUBLIC PROCEDURE [ h: Streams.Handle, a: Streams.Address, words: CARDINAL] RETURNS [CARDINAL] = BEGIN NoOptions: Stream.InputOptions = [FALSE, FALSE, FALSE, FALSE, FALSE]; RETURN[(h.get[h, [blockPointer: a, startIndex: 0, stopIndexPlusOne: words*2], NoOptions].bytesTransferred+1)/2]; END; PutBlock: PUBLIC PROCEDURE [ h: Streams.Handle, a: Streams.Address, words: CARDINAL] RETURNS [CARDINAL] = BEGIN h.put[h, [blockPointer: a, startIndex: 0, stopIndexPlusOne: words*2], FALSE]; RETURN[words] END; END.