File: CastRaysImplA.mesa
Author: Eric Bier in the summer of 1982
Copyright © 1984 by Xerox Corporation. All rights reserved.
Last edited by Bier on March 13, 1987 0:16:12 am PST
Contents: The ray casting (as opposed to tree building) part of the CSG package. CSG.mesa builds the trees
DIRECTORY
AIS, AtomButtonsTypes, BasicTime, CastRays, CoordSys, CSG, CSGGraphics, Feedback, Imager, ImagerColor, IO, Matrix3d, Preprocess3d, Real, Rope, Shading, SV2d, SV3d, SVArtwork, SVBasicTypes, SVBoundBox, SVFancyRays, SVImage, SVModelTypes, SVRayTypes, SVVector3d, ViewerClasses;
CastRaysImplA:
CEDAR PROGRAM
IMPORTS BasicTime, CastRays, CoordSys, CSG, ImagerColor, IO, Matrix3d, Preprocess3d, Real, Rope, Shading, SVArtwork, SVBoundBox, Feedback, SVFancyRays, SVImage, SVVector3d
EXPORTS CastRays =
BEGIN
Artwork: TYPE = SVModelTypes.Artwork;
BoundBox: TYPE = SVBasicTypes.BoundBox;
BoundSphere: TYPE = SVBasicTypes.BoundSphere;
Camera: TYPE = SVModelTypes.Camera;
Color: TYPE = Imager.Color;
Composite: TYPE = SVRayTypes.Composite;
CoordSystem: TYPE = SVModelTypes.CoordSystem;
CSGTree: TYPE = SVRayTypes.CSGTree;
FeedbackData: TYPE = AtomButtonsTypes.FeedbackData;
LightSourceList: TYPE = SVModelTypes.LightSourceList;
NotifyOfProgressProc: TYPE = CastRays.NotifyOfProgressProc;
Point3d: TYPE = SV3d.Point3d;
Point2d: TYPE = SV2d.Point2d;
PointSetOp: TYPE = SVRayTypes.PointSetOp;
Primitive: TYPE = SVRayTypes.Primitive;
Matrix4by4: TYPE = SV3d.Matrix4by4;
Ray: TYPE = SVRayTypes.Ray;
Surface: TYPE = REF ANY;
Vector3d: TYPE = SV3d.Vector3d;
Viewer: TYPE = ViewerClasses.Viewer;
Classification: TYPE = REF ClassificationObj;
ClassificationObj: TYPE = SVRayTypes.ClassificationObj;
SurfaceArray: TYPE = REF SurfaceArrayObj;
SurfaceArrayObj: TYPE = SVRayTypes.SurfaceArrayObj;
ParameterArray: TYPE = SVRayTypes.ParameterArray;
InOutArray: TYPE = SVRayTypes.InOutArray;
NormalArray: TYPE = SVRayTypes.NormalArray;
PrimitiveArray: TYPE = SVRayTypes.PrimitiveArray;
CompactArray: TYPE = REF CompactArrayObj;
CompactArrayObj: TYPE = ARRAY [1..SVRayTypes.maxSceneDepth] OF BOOL;
Image: TYPE = REF ImageObj;
ImageObj: TYPE = SVImage.ImageObj;
globalPoolCount: NAT = 10;
globalPoolPointer: NAT;
Pool: TYPE = REF PoolObj;
PoolObj: TYPE = RECORD [seq: SEQUENCE maxClasses: NAT OF Classification];
globalPool: Pool;
globalCompactPoolCount: NAT = 10;
globalCompactPoolPointer: NAT;
CompactPool: TYPE = REF CompactPoolObj;
CompactPoolObj: TYPE = ARRAY[1..globalCompactPoolCount] OF CompactArray;
globalCompactPool: CompactPool;
WriteStreamComp:
PUBLIC
PROC [comp: Composite, class: Classification, feedback: FeedbackData, makeStream:
BOOL, indent:
NAT] = {
RayCast is about to return class. Write the name of comp and summarize the classification.
opname, leftName, rightName: Rope.ROPE;
f: IO.STREAM;
IF NOT makeStream THEN RETURN;
f ← Feedback.GetTypescriptStream[$Solidviews];
Indent[f, indent];
SELECT comp.operation
FROM
union => opname ← "union";
intersection => opname ← "intersection";
difference => opname ← "difference";
ENDCASE => ERROR;
WITH comp.leftSolid
SELECT
FROM
p: Primitive => leftName ← p.name;
c: Composite => leftName ← c.name;
ENDCASE => ERROR;
WITH comp.rightSolid
SELECT
FROM
p: Primitive => rightName ← p.name;
c: Composite => rightName ← c.name;
ENDCASE => ERROR;
f.PutF["Composite %g [op: %g] (%g %g) returns class: [count: %g]\n", [rope[comp.name]],[rope[opname]], [rope[leftName]], [rope[rightName]], [integer[class.count]]];
WritePrimNames[class, f, indent];
}; -- end of WriteStreamComp
Indent:
PROC [f:
IO.
STREAM, indent:
NAT] = {
FOR i:
NAT
IN[1..indent]
DO
f.PutChar[IO.TAB];
ENDLOOP;
};
WritePrimNames:
PROC [class: Classification, f:
IO.
STREAM, indent:
NAT] = {
FOR i:
NAT
IN[1..class.count]
DO
Indent[f, indent+1];
f.PutF["%g) %g at t = %g\n", [integer[i]], [rope[class.primitives[i].name]],
[real[class.params[i]]]];
ENDLOOP;
}; -- end of WritePrimNames
WriteStreamPrim:
PUBLIC
PROC [prim: Primitive, class: Classification, feedback: FeedbackData, makeStream:
BOOL, indent:
NAT] = {
f: IO.STREAM;
IF NOT makeStream THEN RETURN;
f ← Feedback.GetTypescriptStream[$Solidviews];
Indent[f, indent];
f.PutF["Primitive %g returns class: [count: %g]\n", [rope[prim.name]], [integer[class.count]]];
WriteParams[class, f, indent];
}; -- end of WriteStreamPrim
WriteParams:
PROC [class: Classification, f:
IO.
STREAM, indent:
NAT] = {
FOR i:
NAT
IN[1..class.count]
DO
Indent[f, indent+1];
f.PutF["%g) %g at t = %g\n", [integer[i]], [rope[class.primitives[i].name]],
[real[class.params[i]]]];
ENDLOOP;
}; -- end of WriteParams
DoesHit:
PROC [class: Classification]
RETURNS [
BOOL] = {
RETURN[class.count > 0 OR class.classifs[1] = TRUE];
};
RayCast:
PUBLIC
PROC [cameraPoint: Point2d, worldRay: Ray, node:
REF
ANY, consolidate:
BOOL ←
TRUE, feedback: FeedbackData, makeStream:
BOOL ←
FALSE, indent:
NAT ← 0]
RETURNS [class: Classification] = {
The main ray casting procedure. Scene Ray must be in WORLD coordinates before this procedure is called.
IF node = NIL THEN {class ← EmptyClass[]; RETURN};
WITH node SELECT FROM
comp: Composite => {
leftClass, rightClass: Classification;
leftBoxHit, leftHit, rightBoxHit, rightHit: BOOL;
totalMiss: BOOL ← FALSE;
boundBox: BoundBox;
Before casting each ray, see if the ray will be in the bounding box of the son node.
For optimizing, here is the plan:
1) Check ray for left bound box. Set leftBoxHit if appropriate.
2) If leftBoxHit then cast the ray. Set leftHit if appropriate.
3) If not leftHit then if comp.operation = intersection or difference, return miss.
4) If hit, or union, then right box test. Set RightBoxMiss if appropriate.
5) If miss then return: leftclass for difference, empty for intersection, leftClass for union.
6) Else cast ray.
7) Return rightclass or combination if appropriate
1) Check ray for left bound box. Set leftBoxHit if appropriate.
WITH comp.leftSolid
SELECT
FROM
p: Primitive => boundBox ← p.boundBox;
c: Composite => boundBox ← c.boundBox;
ENDCASE => ERROR;
leftBoxHit ← SVBoundBox.PointInBoundBox[cameraPoint, boundBox];
2) If leftBoxHit then cast the ray. Set leftHit if appropriate.
IF leftBoxHit
THEN {
leftClass ← RayCast[cameraPoint, worldRay, comp.leftSolid, consolidate, feedback, makeStream, indent];
leftHit ← DoesHit[leftClass]; }
ELSE {leftHit ← FALSE; leftClass ← EmptyClass[]};
3) If not leftHit then if comp.operation = intersection or difference, return miss.
IF NOT leftHit THEN IF comp.operation = intersection OR comp.operation = difference
THEN {
class ← leftClass; WriteStreamComp[comp, class, feedback, makeStream, indent]; RETURN};
leftClass is (or is equivalent to) EmptyClass[];
4) If hit, or union, then right box test. Set RightBoxMiss if appropriate. (we don't have to test for this state. It is the only one left.)
WITH comp.rightSolid
SELECT
FROM
p: Primitive => boundBox ← p.boundBox;
c: Composite => boundBox ← c.boundBox;
ENDCASE => ERROR;
rightBoxHit ← SVBoundBox.PointInBoundBox[cameraPoint, boundBox];
5) If miss then return EmptyClass. Else cast ray.
IF NOT rightBoxHit THEN
This could be a union with or without a left miss or (intersection/difference) with an initial hit.
SELECT comp.operation
FROM
union => {class ← leftClass; WriteStreamComp[comp, class, feedback, makeStream, indent]; RETURN};
intersection =>
IF NOT leftHit THEN RETURN[leftClass]
ELSE {
ReturnClassToPool[leftClass]; class ← EmptyClass[]; WriteStreamComp[comp, class, feedback, makeStream, indent]; RETURN};
difference => {class ← leftClass; WriteStreamComp[comp, class, feedback, makeStream, indent]; RETURN};
ENDCASE => ERROR;
6) Else cast ray. We have Union, or (intersection/difference) with left hit. Ray hits box.
rightClass ← RayCast[cameraPoint, worldRay, comp.rightSolid, consolidate, feedback, makeStream, indent];
rightHit ← DoesHit[rightClass];
7) Return rightclass, combination or empty if appropriate
SELECT comp.operation
FROM
union =>
IF rightHit
THEN {
IF leftHit THEN class ← UnionCombine[leftClass, rightClass, consolidate]
ELSE {ReturnClassToPool[leftClass]; class ← rightClass}
}
ELSE {
ReturnClassToPool[rightClass]; class ← leftClass};
intersection =>
IF rightHit
THEN {
IF leftHit THEN class ← IntersectionCombine[leftClass, rightClass, consolidate]
ELSE {ReturnClassToPool[rightClass]; class ← leftClass;}
}
ELSE
IF leftHit
THEN {ReturnClassToPool[leftClass]; class ← rightClass}
ELSE {ReturnClassToPool[rightClass]; class ← leftClass};
difference =>
IF rightHit
THEN {
IF leftHit THEN class ← DifferenceCombine[leftClass, rightClass, consolidate]
ELSE {ReturnClassToPool[rightClass]; class ← leftClass} -- leftClass null
}
ELSE {ReturnClassToPool[rightClass]; class ← leftClass};
ENDCASE => ERROR;
WriteStreamComp[comp, class, feedback, makeStream, indent];
RETURN};
prim: Primitive => {
localRay: Ray;
IF prim.ignoreMe
THEN
{class ← CastRays.GetClassFromPool[]; CastRays.MakeClassAMiss[class]; RETURN};
localRay ← CSG.TransformRay[worldRay, prim.worldWRTPrim]; -- (takes a new ray from the pool)
class ← prim.rayCast[cameraPoint, localRay, prim.mo, prim];
WriteStreamPrim[prim, class, feedback, makeStream, 0];
CSG.ReturnRayToPool[localRay]; -- returns ray to pool
RETURN};
ENDCASE => ERROR;
}; -- end of RayCast
RayCastNoBBoxes:
PUBLIC
PROC [worldRay: Ray, node:
REF
ANY, consolidate:
BOOL ←
TRUE, feedback: FeedbackData, makeStream:
BOOL ←
FALSE, indent:
NAT ← 0]
RETURNS [class: Classification] = {
Ignore any bounding boxes which were computed. This is useful if the ray does not originate from the screen (as for computing shadows). Of course, bounding spheres would be useful in this case.
The main ray casting procedure. Scene Ray must be in WORLD coordinates before this procedure is called.
IF node = NIL THEN {class ← EmptyClass[]; RETURN};
WITH node SELECT FROM
comp: Composite => {
leftClass, rightClass: Classification;
leftHit, rightHit: BOOL;
totalMiss: BOOL ← FALSE;
For optimizing, here is the plan:
1) Cast the left ray. Set leftHit if appropriate.
2) If not leftHit then if comp.operation = intersection or difference, return miss.
3) If hit, or union, then cast right ray.
4) Return rightclass or combination if appropriate
1) Cast the left ray. Set leftHit if appropriate.
leftClass ← RayCastNoBBoxes[worldRay, comp.leftSolid, consolidate, feedback, makeStream, indent];
leftHit ← DoesHit[leftClass];
2) If not leftHit then if comp.operation = intersection or difference, return miss.
IF NOT leftHit THEN IF comp.operation = intersection OR comp.operation = difference
THEN {
class ← leftClass; WriteStreamComp[comp, class, feedback, makeStream, indent]; RETURN};
leftClass is (or is equivalent to) EmptyClass[];
3) If hit, or union, then cast right ray.
rightClass ← RayCastNoBBoxes[worldRay, comp.rightSolid, consolidate, feedback, makeStream, indent];
rightHit ← DoesHit[rightClass];
4) Return rightclass, combination or empty if appropriate
SELECT comp.operation
FROM
union =>
IF rightHit
THEN {
IF leftHit THEN class ← UnionCombine[leftClass, rightClass, consolidate]
ELSE {ReturnClassToPool[leftClass]; class ← rightClass}
}
ELSE {
ReturnClassToPool[rightClass]; class ← leftClass};
intersection =>
IF rightHit
THEN {
IF leftHit THEN class ← IntersectionCombine[leftClass, rightClass, consolidate]
ELSE {ReturnClassToPool[rightClass]; class ← leftClass;}
}
ELSE
IF leftHit
THEN {ReturnClassToPool[leftClass]; class ← rightClass}
ELSE {ReturnClassToPool[rightClass]; class ← leftClass};
difference =>
IF rightHit
THEN {
IF leftHit THEN class ← DifferenceCombine[leftClass, rightClass, consolidate]
ELSE {ReturnClassToPool[rightClass]; class ← leftClass} -- leftClass null
}
ELSE {ReturnClassToPool[rightClass]; class ← leftClass};
ENDCASE => ERROR;
WriteStreamComp[comp, class, feedback, makeStream, indent];
RETURN};
prim: Primitive => {
localRay: Ray;
IF prim.ignoreMe
THEN
{class ← CastRays.GetClassFromPool[]; CastRays.MakeClassAMiss[class]; RETURN};
localRay ← CSG.TransformRay[worldRay, prim.worldWRTPrim]; -- (takes a new ray from the pool)
class ← prim.rayCastNoBBoxes[localRay, prim.mo, prim];
WriteStreamPrim[prim, class, feedback, makeStream, 0];
CSG.ReturnRayToPool[localRay]; -- returns ray to pool
RETURN};
ENDCASE => ERROR;
}; -- end of RayCastNoBboxes
HitsTree:
PUBLIC
PROC [worldRay: Ray, tree: CSGTree]
RETURNS [
BOOL] = {
node: REF ANY ← tree.son;
class: Classification;
hits: BOOL;
class ← RayCastNoBBoxes [worldRay: worldRay, node: node, feedback: NIL, makeStream: FALSE];
hits ← DoesHit[class];
ReturnClassToPool[class];
RETURN[hits];
};
FirstHit:
PUBLIC
PROC [worldRay: Ray, tree: CSGTree, useBoundSpheres:
BOOL, feedback: FeedbackData, makeStream:
BOOL ←
FALSE, indent:
NAT ← 0]
RETURNS [hits:
BOOL, t:
REAL] = {
Like HitsTree but returns the parameter value at the first inward-going hit, if any. If we start inside of an object, wait until we are out.
node: REF ANY ← tree.son;
class: Classification;
IF NOT useBoundSpheres THEN class ← RayCastNoBBoxes [worldRay: worldRay, node: node, makeStream: makeStream, feedback: feedback, indent: indent]
ELSE class ← CastRays.RayCastBoundingSpheres [worldRay: worldRay, node: node, feedback: feedback, makeStream: makeStream, indent: indent];
hits ← FALSE; t ← 0.0;
FOR i:
NAT
IN [1..class.count]
DO
IF NOT class.classifs[i] THEN GOTO BeenOut;
REPEAT
BeenOut => {
hits ← FALSE;
FOR j:
NAT
IN [i+1..class.count+1]
DO
IF class.classifs[j]
THEN {
hits ← TRUE; t ← class.params[j-1];
ReturnClassToPool[class]; RETURN;
};
ENDLOOP;
ReturnClassToPool[class];
RETURN;
};
FINISHED => {
ReturnClassToPool[class];
RETURN;
};
ENDLOOP;
};
EmptyClass:
PUBLIC
PROC
RETURNS [class: Classification] = {
class ← GetClassFromPool[];
class.count ← 0;
class.classifs[1] ← FALSE;
}; -- end of EmptyClass
Each primitive shape must have a procedure here which can classify a ray with respect to it.
SceneExceedsMaximumDepth: SIGNAL = CODE;
UnionCombine:
PUBLIC PROC [leftClass, rightClass: Classification, consolidate:
BOOL]
RETURNS [combinedClass: Classification] = {
Merge the two sorted lists together classifying the segments by the OR of the Classifs for each segment
lPtr, rPtr: NAT;
combinedClass ← GetClassFromPool[];
lPtr ← rPtr ← 1;
combinedClass.count ← leftClass.count + rightClass.count;
IF combinedClass.count > SVRayTypes.maxSceneDepth THEN SIGNAL SceneExceedsMaximumDepth;
FOR i:
NAT
IN[1..combinedClass.count]
DO
IF rPtr > rightClass.count THEN GOTO RPtrWentOver;
IF lPtr > leftClass.count THEN GOTO LPtrWentOver;
IF leftClass.params[lPtr] < rightClass.params[rPtr]
THEN {
combinedClass.normals[i] ← leftClass.normals[lPtr];
combinedClass.params[i] ← leftClass.params[lPtr];
combinedClass.surfaces[i] ← leftClass.surfaces[lPtr];
combinedClass.primitives[i] ← leftClass.primitives[lPtr];
combinedClass.classifs[i] ← leftClass.classifs[lPtr] OR rightClass.classifs[rPtr];
lPtr ← lPtr + 1;
}
ELSE {
combinedClass.normals[i] ← rightClass.normals[rPtr];
combinedClass.params[i] ← rightClass.params[rPtr];
combinedClass.surfaces[i] ← rightClass.surfaces[rPtr];
combinedClass.primitives[i] ← rightClass.primitives[rPtr];
combinedClass.classifs[i] ← leftClass.classifs[lPtr] OR rightClass.classifs[rPtr];
rPtr ← rPtr + 1;
};
REPEAT
RPtrWentOver => { -- finish up with lPtr data
FOR k:
NAT ← i, k+1
UNTIL k > combinedClass.count
DO
combinedClass.normals[k] ← leftClass.normals[lPtr];
combinedClass.params[k] ← leftClass.params[lPtr];
combinedClass.surfaces[k] ← leftClass.surfaces[lPtr];
combinedClass.primitives[k] ← leftClass.primitives[lPtr];
combinedClass.classifs[k] ← leftClass.classifs[lPtr] OR rightClass.classifs[rPtr];
lPtr ← lPtr + 1;
ENDLOOP};
LPtrWentOver => { -- finish up with rPtr data
FOR k:
NAT ← i, k+1
UNTIL k > combinedClass.count
DO
combinedClass.normals[k] ← rightClass.normals[rPtr];
combinedClass.params[k] ← rightClass.params[rPtr];
combinedClass.surfaces[k] ← rightClass.surfaces[rPtr];
combinedClass.primitives[k] ← rightClass.primitives[rPtr];
combinedClass.classifs[k] ← leftClass.classifs[lPtr] OR rightClass.classifs[rPtr];
rPtr ← rPtr + 1;
ENDLOOP};
ENDLOOP;
combinedClass.classifs[combinedClass.count+1] ← leftClass.classifs[lPtr] OR rightClass.classifs[rPtr];
IF consolidate THEN ConsolidateClassification[combinedClass];
ReturnClassToPool[leftClass];
ReturnClassToPool[rightClass];
}; -- end of UnionCombine
IntersectionCombine:
PUBLIC PROC [leftClass, rightClass: Classification, consolidate:
BOOL]
RETURNS [combinedClass: Classification] = {
Merge the two sorted lists together classifying the segments by the AND of the Classifs for each segment
lPtr, rPtr: NAT;
combinedClass ← GetClassFromPool[];
lPtr ← rPtr ← 1;
combinedClass.count ← leftClass.count + rightClass.count;
IF combinedClass.count > SVRayTypes.maxSceneDepth THEN SIGNAL SceneExceedsMaximumDepth;
FOR i:
NAT
IN[1..combinedClass.count]
DO
IF rPtr > rightClass.count THEN GOTO RPtrWentOver;
IF lPtr > leftClass.count THEN GOTO LPtrWentOver;
IF leftClass.params[lPtr] < rightClass.params[rPtr]
THEN {
combinedClass.normals[i] ← leftClass.normals[lPtr];
combinedClass.params[i] ← leftClass.params[lPtr];
combinedClass.surfaces[i] ← leftClass.surfaces[lPtr];
combinedClass.primitives[i] ← leftClass.primitives[lPtr];
combinedClass.classifs[i] ← leftClass.classifs[lPtr] AND rightClass.classifs[rPtr];
lPtr ← lPtr + 1;
}
ELSE {
combinedClass.normals[i] ← rightClass.normals[rPtr];
combinedClass.params[i] ← rightClass.params[rPtr];
combinedClass.surfaces[i] ← rightClass.surfaces[rPtr];
combinedClass.primitives[i] ← rightClass.primitives[rPtr];
combinedClass.classifs[i] ← leftClass.classifs[lPtr] AND rightClass.classifs[rPtr];
rPtr ← rPtr + 1;
};
REPEAT
RPtrWentOver => { -- finish up with lPtr data
FOR k:
NAT ← i, k+1
UNTIL k > combinedClass.count
DO
combinedClass.normals[k] ← leftClass.normals[lPtr];
combinedClass.params[k] ← leftClass.params[lPtr];
combinedClass.surfaces[k] ← leftClass.surfaces[lPtr];
combinedClass.primitives[k] ← leftClass.primitives[lPtr];
combinedClass.classifs[k] ← leftClass.classifs[lPtr] AND rightClass.classifs[rPtr];
lPtr ← lPtr + 1;
ENDLOOP};
LPtrWentOver => { -- finish up with rPtr data
FOR k:
NAT ← i, k+1
UNTIL k > combinedClass.count
DO
combinedClass.normals[k] ← rightClass.normals[rPtr];
combinedClass.params[k] ← rightClass.params[rPtr];
combinedClass.surfaces[k] ← rightClass.surfaces[rPtr];
combinedClass.primitives[k] ← rightClass.primitives[rPtr];
combinedClass.classifs[k] ← leftClass.classifs[lPtr] AND rightClass.classifs[rPtr];
rPtr ← rPtr + 1;
ENDLOOP};
ENDLOOP;
combinedClass.classifs[combinedClass.count+1] ← leftClass.classifs[lPtr] AND rightClass.classifs[rPtr];
IF consolidate THEN ConsolidateClassification[combinedClass];
ReturnClassToPool[leftClass];
ReturnClassToPool[rightClass];
}; -- end of IntersectionCombine
DifferenceCombine:
PUBLIC
PROC [leftClass, rightClass: Classification, consolidate:
BOOL]
RETURNS [combinedClass: Classification] = {
Merge the two sorted lists together classifying the segments by the (left AND NOT right) of the Classifs for each segment
lPtr, rPtr: NAT;
combinedClass ← GetClassFromPool[];
IF combinedClass.count > SVRayTypes.maxSceneDepth THEN SIGNAL SceneExceedsMaximumDepth;
lPtr ← rPtr ← 1;
combinedClass.count ← leftClass.count + rightClass.count;
FOR i:
NAT
IN[1..combinedClass.count]
DO
IF rPtr > rightClass.count THEN GOTO RPtrWentOver;
IF lPtr > leftClass.count THEN GOTO LPtrWentOver;
IF leftClass.params[lPtr] < rightClass.params[rPtr]
THEN {
combinedClass.normals[i] ← leftClass.normals[lPtr];
combinedClass.params[i] ← leftClass.params[lPtr];
combinedClass.surfaces[i] ← leftClass.surfaces[lPtr];
combinedClass.primitives[i] ← leftClass.primitives[lPtr];
combinedClass.classifs[i] ← leftClass.classifs[lPtr] AND NOT rightClass.classifs[rPtr];
lPtr ← lPtr + 1;
}
ELSE {
combinedClass.normals[i] ← SVVector3d.Negate[rightClass.normals[rPtr]];
combinedClass.params[i] ← rightClass.params[rPtr];
combinedClass.surfaces[i] ← rightClass.surfaces[rPtr];
combinedClass.primitives[i] ← rightClass.primitives[rPtr];
combinedClass.classifs[i] ← leftClass.classifs[lPtr] AND NOT rightClass.classifs[rPtr];
rPtr ← rPtr + 1;
};
REPEAT
RPtrWentOver => { -- finish up with lPtr data
FOR k:
NAT ← i, k+1
UNTIL k > combinedClass.count
DO
combinedClass.normals[k] ← leftClass.normals[lPtr];
combinedClass.params[k] ← leftClass.params[lPtr];
combinedClass.surfaces[k] ← leftClass.surfaces[lPtr];
combinedClass.primitives[k] ← leftClass.primitives[lPtr];
combinedClass.classifs[k] ← leftClass.classifs[lPtr] AND NOT rightClass.classifs[rPtr];
lPtr ← lPtr + 1;
ENDLOOP};
LPtrWentOver => { -- finish up with rPtr data
FOR k:
NAT ← i, k+1
UNTIL k > combinedClass.count
DO
combinedClass.normals[k] ← SVVector3d.Negate[rightClass.normals[rPtr]];
combinedClass.params[k] ← rightClass.params[rPtr];
combinedClass.surfaces[k] ← rightClass.surfaces[rPtr];
combinedClass.primitives[k] ← rightClass.primitives[rPtr];
combinedClass.classifs[k] ← leftClass.classifs[lPtr] AND NOT rightClass.classifs[rPtr];
rPtr ← rPtr + 1;
ENDLOOP};
ENDLOOP;
combinedClass.classifs[combinedClass.count+1] ← leftClass.classifs[lPtr] AND NOT rightClass.classifs[rPtr];
IF consolidate THEN ConsolidateClassification[combinedClass];
ReturnClassToPool[leftClass];
ReturnClassToPool[rightClass];
}; -- end of DifferenceCombine
ConsolidateClassification:
PROC [class: Classification] = {
Combine adjacent regions which have the same classif and throw out the surface and parameter information at those points
recall ClassificationObj is RECORD [count, params, surfaces, classifs, topNormal];
currentlyWorkingOn: BOOL;
compact: CompactArray ← GetCompactFromPool[];
currentlyWorkingOn ← class.classifs[1];
FOR i:
NAT
IN[2..class.count+1]
DO
IF class.classifs[i] = currentlyWorkingOn
THEN
-- this is not a transition so throw it out
compact[i-1] ← FALSE -- don't keep it
ELSE {compact[i-1] ← TRUE; currentlyWorkingOn ← class.classifs[i];};
ENDLOOP;
CompactClassification[class, compact];
ReturnCompactToPool[compact];
}; -- end of ConsolidateClassification
CompactClassification:
PROC [class: Classification, compact: CompactArray] = {
Compact[i] is TRUE if we should keep class.*[i], FALSE otherwise. Order is preserved among the items we keep.
newCount: NAT;
newCount ← 0;
FOR i:
NAT
IN[1..class.count]
DO
IF compact[i] THEN {
newCount ← newCount + 1;
class.params[newCount] ← class.params[i];
class.classifs[newCount] ← class.classifs[i];
class.normals[newCount] ← class.normals[i];
class.surfaces[newCount] ← class.surfaces[i];
class.primitives[newCount] ← class.primitives[i];};
ENDLOOP;
class.classifs[newCount+1] ← class.classifs[class.count+1];
The in-out value on the far side of the last param that changed in-out will always be the last value given in the class.
class.count ← newCount;
};
SingleRay:
PUBLIC
PROC [cameraPoint: Point2d, tree: CSGTree, lightSources: LightSourceList, camera: Camera, feedback: FeedbackData, makeStream:
BOOL ←
FALSE]
RETURNS [color: Color] = {
Cast a single ray at the scene. Report the results to the output stream and find the color at that point.
cameraRay, worldRay: Ray;
cameraWRTWorld: Matrix3d.Matrix4by4;
boundBox: BoundBox;
boundSphere: BoundSphere;
r,g,b: REAL;
cameraRay ← CSG.CreateRay[];
[boundBox, boundSphere] ← Preprocess3d.PreprocessForImage[tree, camera]; -- must call this before casting rays
CSG.StuffCameraRay[cameraRay, cameraPoint, camera];
ray with respect to Camera (perspective)
Find WORLD ray.
cameraWRTWorld ← CoordSys.FindInTermsOfWorld[camera.coordSys];
worldRay ← CSG.TransformRayToWorld[cameraRay, cameraWRTWorld]; -- allocates ray from pool
IF makeStream THEN Feedback.PutFTypescript[feedback, oneLiner, "\n"];
[r,g,b] ← TopColorCast[cameraPoint, worldRay, tree, lightSources, camera, boundBox, boundSphere, feedback, makeStream, 0];
color ← Shading.NormalizeRGB[r,g,b];
IF makeStream THEN Feedback.PutFTypescript[feedback, oneLiner, "\n"];
CSG.ReturnRayToPool[worldRay];
}; -- end of SingleRay
SingleRay2:
PUBLIC
PROC [cameraPoint: Point2d, tree: CSGTree, camera: Camera, consolidate:
BOOL ←
TRUE, feedback: FeedbackData, makeStream:
BOOL ←
FALSE]
RETURNS [class: Classification, ray
World: Ray] = {
Cast a single ray at the scene from the given cameraPoint (point in CAMERA coords on the z = 0 plane). The client must be sure that the tree has been preprocessed so RayCast will have bounding boxes to work with. (See Preprocess3d).
The client must be sure to call ReturnClassToPool[class] when he is done with it.
topNode: REF ANY ← tree.son;
rayCamera: Ray;
cameraWRTWorld: Matrix4by4 ← CoordSys.FindInTermsOfWorld[camera.coordSys];
rayCamera ← CSG.GetRayFromPool[];
CSG.StuffCameraRay[rayCamera, cameraPoint, camera];
rayWorld ← CSG.TransformRayToWorld[rayCamera, cameraWRTWorld]; -- allocates ray from pool
CSG.ReturnRayToPool[rayCamera];
class ← RayCast[cameraPoint, rayWorld, topNode, consolidate, feedback, makeStream, 0];
}; -- end of SingleRay2
NodeToRope:
PROC [node:
REF
ANY, depth:
NAT]
RETURNS [r: Rope.
ROPE] = {
IF node = NIL THEN RETURN[NIL];
WITH node
SELECT
FROM
prim: Primitive => {r ← prim.name; RETURN};
comp: Composite => {r ← comp.name;
IF depth < 2 THEN RETURN
ELSE {r1: Rope.ROPE;
r2: Rope.ROPE;
leftSon: REF ANY ← comp.leftSolid;
rightSon: REF ANY ← comp.rightSolid;
r1 ← NodeToRope[leftSon, depth - 1];
r2 ← NodeToRope[rightSon, depth - 1];
r ← Rope.Cat[r,": ",r1,"/",r2];
RETURN};
};
ENDCASE => ERROR;
}; -- end of NodeToRope
OutputTreeInfo:
PROC [node:
REF
ANY,
I: Image, feedback: FeedbackData] = {
debugName: Rope.ROPE;
debugName ← NodeToRope[node, 2];
Feedback.PutF[feedback, oneLiner, "About to Draw Tree: %g (%g by %g)...", [rope[debugName]], [integer[I.bwWindow.fref.raster.scanCount]], [integer[I.bwWindow.fref.raster.scanLength]]];
Feedback.Blink[feedback];
}; -- end of OutputTreeInfo
GetXStepRayInWorld:
PROC [stepSize:
REAL, cameraWRTWorld: Matrix4by4, camera: Camera]
RETURNS [ray: Ray] = {
cameraXStepRay1, cameraXStepRay2: Ray;
worldXStepRay1, worldXStepRay2: Ray;
cameraXStepRay1 ← CSG.CreateRay[];
IF camera.projection = perspective
THEN {
CSG.StuffCameraRay[cameraXStepRay1, [0,0], camera];
cameraXStepRay2 ← CSG.CreateRay[];
CSG.StuffCameraRay[cameraXStepRay2, [stepSize,0], camera];
worldXStepRay1 ← CSG.TransformRayToWorld[cameraXStepRay1, cameraWRTWorld];
worldXStepRay2 ← CSG.TransformRayToWorld[cameraXStepRay2, cameraWRTWorld];
ray ← CSG.SubtractRays[worldXStepRay2, worldXStepRay1];
Ray not allocated from the pool.
CSG.ReturnRayToPool[worldXStepRay1];
CSG.ReturnRayToPool[worldXStepRay2];
}
ELSE {
-- orthographic projection
CSG.StuffCameraRayLiterally[cameraXStepRay1, [stepSize,0,0], [0,0,0]];
ray ← CSG.TransformNewRay[cameraXStepRay1, cameraWRTWorld];
Ray not allocated from the pool.
}
}; -- end of GetXStepRayInWorld
MasterObjectColorFromPrimitive:
PROC [primitive: Primitive, t:
REAL, worldRay: Ray, primitiveNormal: Vector3d]
RETURNS [color: Color] = {
localRay: Ray;
point3d: Point3d;
artwork: Artwork ← primitive.artwork;
SELECT artwork.class FROM
justColor => color ← artwork.color;
simpleSurface => {
localRay ← CSG.TransformRay[worldRay, artwork.coordSys.worldWRTlocal];
point3d ← CSG.EvaluateLocalRay[localRay, t];
CSG.ReturnRayToPool[localRay];
color ← SVArtwork.FindColorAtSurfacePoint[artwork, point3d, primitiveNormal];
};
spaceFunction => {
localRay ← CSG.TransformRay[worldRay, artwork.coordSys.worldWRTlocal];
point3d ← CSG.EvaluateLocalRay[localRay, t];
CSG.ReturnRayToPool[localRay];
color ← SVArtwork.FindColorAtSpacePoint[artwork, point3d, primitiveNormal];
};
ENDCASE => ERROR;
};
ColorFromClass:
PROC [class: Classification, x, y:
REAL, lightSources: LightSourceList, camera: Camera, worldRay: Ray, tree: CSGTree, feedback: FeedbackData, makeStream:
BOOL ←
FALSE, indent:
NAT ← 0]
RETURNS [r,g,b: REAL] = {
We are given a classification, a list of lightsources, a camera, the screen point from which the ray was shot, and the ray in WORLD coordinates from which we can derive the eyepoint. To produce an image with shadows, we proceed as follows:
Make a new list of lightsources which includes only those lightsources visible from the surface point then proceed in the usual way.
surf: Surface;
surfColor: Color;
eyePoint, surfacePt, p: Point3d;
d: Vector3d;
primitive: Primitive;
visibleLights: LightSourceList;
t: REAL;
worldNormal, primitiveNormal: Vector3d;
IF class.count = 0
THEN {
IF NOT class.classifs[1] THEN [r,g,b] ← Shading.ExtractRGB[tree.backgroundColor]
ELSE r ← g ← b ← 0.0;
RETURN};
surf ← class.surfaces[1];
t ← class.params[1];-- the parameter of the ray intersection
primitive ← class.primitives[1];
primitiveNormal ← class.normals[1];
surfColor ← MasterObjectColorFromPrimitive[primitive, t, worldRay, primitiveNormal];
worldNormal ← Matrix3d.UpdateVectorWithInverse[primitive.worldWRTPrim, primitiveNormal];
surfacePt ← CSG.EvaluateLocalRay[worldRay, t];
[p, d] ← CSG.GetLocalRay[worldRay];
eyePoint ← SVVector3d.Sub[p, d];
visibleLights ← IF tree.shadows THEN SVFancyRays.VisibleLights[lightSources, surfacePt, tree, camera.useBoundSpheresForShadows, feedback, makeStream, indent] ELSE lightSources;
Since worldRay is in WORLD coordinates, this finds eyePoint in WORLD coordinates
SELECT primitive.artwork.material
FROM
chalk => [r,g,b] ← Shading.DiffuseReflectance[worldNormal, surfacePt, surfColor, visibleLights];
plastic => [r,g,b] ← Shading.DiffuseAndSpecularReflectance[eyePoint, worldNormal, surfacePt, surfColor, visibleLights];
ENDCASE => ERROR;
}; -- end of ColorFromClass
ScanLine: TYPE = REF ScanLineObj;
ScanLineObj: TYPE = RECORD [
seq: SEQUENCE lineLen: NAT OF Color];
CreateScanLine:
PROC [len:
NAT]
RETURNS [scanLine: ScanLine] = {
scanLine ← NEW[ScanLineObj[len]];
};
CopyScanLine:
PROC [from: ScanLine, to: ScanLine] = {
FOR i:
NAT
IN [0..to.lineLen)
DO
to[i] ← from[i];
ENDLOOP;
};
PutColorInScanLine:
PROC [scanLine: ScanLine, index:
NAT, color: Color] = {
scanLine[index] ← color;
};
TopColorCast:
PROC [cameraPoint: Point2d, worldRay: Ray, tree: CSGTree, lightSources: LightSourceList, camera: Camera, sceneBox: BoundBox, boundSphere: BoundSphere, feedback: FeedbackData, makeStream:
BOOL ←
FALSE, indent:
NAT ← 0]
RETURNS [r,g,b: REAL] = {
node: REF ANY ← tree.son;
class: Classification;
IF tree.son =
NIL
THEN
{
[r,g,b] ← Shading.ExtractRGB[tree.backgroundColor];
RETURN;
};
IF camera.useBoundBoxes
THEN {
IF SVBoundBox.PointInBoundBox[cameraPoint, sceneBox]
THEN {
finalClassCount, firstClassCount: NAT;
firstClassCount ← NumberOfClassesInPool[]; -- for debugging purposes.
class ← RayCast[cameraPoint, worldRay, node, TRUE, feedback, makeStream, indent];
[r,g,b] ← ColorFromClass[class, cameraPoint[1], cameraPoint[2], lightSources, camera, worldRay, tree, feedback, makeStream, indent];
ReturnClassToPool[class];
finalClassCount ← NumberOfClassesInPool[]; -- for debugging purposes.
IF finalClassCount < firstClassCount
THEN {
Feedback.PutF[feedback, oneLiner, "WARNING: A Classification was lost while casting a ray at [%g, %g]", [real[cameraPoint[1]]], [real[cameraPoint[2]]]];
};
}
ELSE [r,g,b] ← Shading.ExtractRGB[tree.backgroundColor];
}
ELSE {
-- Use Bounding Spheres
IF CSG.RayHitsBoundSphere[worldRay, boundSphere]
THEN {
finalClassCount, firstClassCount: NAT;
firstClassCount ← NumberOfClassesInPool[]; -- for debugging purposes.
class ← CastRays.RayCastBoundingSpheres[worldRay, node, TRUE, feedback, makeStream, indent];
[r,g,b] ← ColorFromClass[class, cameraPoint[1], cameraPoint[2], lightSources, camera, worldRay, tree, feedback, makeStream, indent];
ReturnClassToPool[class];
finalClassCount ← NumberOfClassesInPool[]; -- for debugging purposes.
IF finalClassCount < firstClassCount
THEN {
Feedback.PutF[feedback, oneLiner, "WARNING: A Classification was lost while casting a ray at [%g, %g]", [real[cameraPoint[1]]], [real[cameraPoint[2]]]];
};
}
ELSE [r,g,b] ← Shading.ExtractRGB[tree.backgroundColor];
};
};
ColorCast: PROC [cameraPoint: Point2d, worldRay: Ray, tree: CSGTree, lightSources: LightSourceList, camera: Camera, makeStream: BOOL ← FALSE, f: IO.STREAM ← NIL, indent: NAT ← 0] RETURNS [color: Color] = {
class: Classification;
class ← RayCast[cameraPoint, worldRay, tree.son, makeStream, f, indent];
color ← ColorFromClass[class, cameraPoint[1], cameraPoint[2], lightSources, camera, worldRay, tree];
ReturnClassToPool[class];
};
SetUpRayTrace:
PROC [boundBox: BoundBox, camera: Camera, aisRope: Rope.
ROPE, bAndWOnly:
BOOL, resolution:
REAL, feedback: FeedbackData]
RETURNS [
I: Image, xSamples, ySamples:
NAT, stepSize, xStart, yStart:
REAL] = {
Look at the frame of the camera. If frame.fullscreen is TRUE then use the bounding box of the scene. If it is FALSE, then use the frame parameters to determine the bounding box of our ray tracing. In this case, we should check before casting each ray to see if it is in the scene's bounding box before casting it.
extentX, extentY, projectionX, projectionY, trueExtentX, trueExtentY: REAL;
stepSize ← 72.0/resolution; -- in screen dots per sample
We know the size of the box which we wish to raycast and the resolution of the casting in samples per inch. Our box size is in screen dots (at 72 per inch). We wish to know screen dots per sample. (Extent/72)*resolution = inches*(samples per inch) = samples. Extent/samples = screen dots/sample as required. Compactly, then, we need 72/resolution screen dots per sample and Extent/(screen dots per sample) for total number of samples.
IF camera.frame.fullScreen
THEN {
[I, xSamples, ySamples] ← SVImage.OpenImage[aisRope, bAndWOnly, boundBox.minVert[1], boundBox.minVert[2], boundBox.maxVert[1], boundBox.maxVert[2], resolution, feedback];
extentX ← boundBox.maxVert[1] - boundBox.minVert[1];
extentY ← boundBox.maxVert[2] - boundBox.minVert[2];
}
ELSE {
[I, xSamples, ySamples] ← SVImage.OpenImage[aisRope, bAndWOnly, camera.frame.downLeft[1], camera.frame.downLeft[2], camera.frame.upRight[1], camera.frame.upRight[2], resolution, feedback];
extentX ← camera.frame.upRight[1] - camera.frame.downLeft[1];
extentY ← camera.frame.upRight[2] - camera.frame.downLeft[2];
};
Now for the hard part. boundBox tells us the outline of the initial box. trueExtentX represents the actual extent from the left of the first pixel to the right of the last pixel. Likewise for trueExtentY. We subtract the initial extent from the true extent and split the difference. Subtracting the result to the original bounding box origin gives the ray tracing grid outline.
trueExtentX ← Real.Float[xSamples-1]*stepSize;
trueExtentY ← Real.Float[ySamples-1]*stepSize;
projectionX ← (trueExtentX - extentX)/2.0;
projectionY ← (trueExtentY - extentY)/2.0;
IF camera.frame.fullScreen
THEN {
xStart ← boundBox.minVert[1] - projectionX;
yStart ← boundBox.minVert[2] - projectionY;
}
ELSE {
xStart ← camera.frame.downLeft[1] - projectionX;
yStart ← camera.frame.downLeft[2] - projectionY;
};
Now (xStart, yStart) is the center of the origin pixel. Subtracting another half a pixel will give us the lower left hand corner of the pixel.
xStart ← xStart - stepSize/2.0;
yStart ← yStart - stepSize/2.0;
}; -- end of SetUpRayTrace
ShutDownRayTrace:
PROC [aisRope: Rope.
ROPE,
I: Image, camera: Camera, startTime: BasicTime.
GMT, feedback: FeedbackData] = {
comment: Rope.ROPE;
totalTime: INT;
endTime: BasicTime.GMT;
endTime ← BasicTime.Now[];
totalTime ← BasicTime.Period[from: startTime, to: endTime];
comment ←
IO.PutFR["res: %g dpi, rayTraceTime: (%r)",
[real[camera.resolution]], [integer[totalTime]]];
SVImage.CloseImage[I, aisRope, comment, feedback];
};
AbortDrawTree:
PROC [
I: Image, aisRope: Rope.
ROPE, camera: Camera, startTime: BasicTime.
GMT, feedback: FeedbackData] = {
comment: Rope.ROPE;
totalTime: INT;
endTime: BasicTime.GMT;
endTime ← BasicTime.Now[];
totalTime ← BasicTime.Period[from: startTime, to: endTime];
comment ←
IO.PutFR["res: %g dpi, rayTraceTime: (%r)",
[real[camera.resolution]], [integer[totalTime]]];
SVImage.CloseImage[I, aisRope, comment, feedback];
Feedback.Append[feedback, "CastRays aborted. Partial files saved.", oneLiner];
};
UpdateMaxSamples:
PROC [maxSamples: MaxSamples, r,g,b:
REAL] = {
maxSamples.maxRed ← MAX[maxSamples.maxRed, r];
maxSamples.maxGreen ← MAX[maxSamples.maxRed, g];
maxSamples.maxBlue ← MAX[maxSamples.maxRed, b];
};
FillScanLine:
PROC [startX, stepSize:
REAL, xSamples:
NAT, y:
REAL, cameraXStepRayInWorld: Ray, worldRay: Ray, tree: CSGTree, lightSources: LightSourceList, camera: Camera, boundBox: BoundBox, boundSphere: BoundSphere, scanLine: ScanLine, feedback: FeedbackData, maxSamples: MaxSamples] = {
color: Color;
r,g,b: REAL;
thisX: REAL;
Cast the first ray of the y scan line
[r,g,b] ← TopColorCast[[startX, y], worldRay, tree, lightSources, camera, boundBox, boundSphere, feedback];
UpdateMaxSamples[maxSamples, r, g, b];
color ← Shading.NormalizeRGB[r,g,b];
PutColorInScanLine[scanLine, 0, color];
CSG.AddRay[cameraXStepRayInWorld, worldRay]; -- updates worldRay
FOR j:
INTEGER
IN[1..xSamples]
DO
-- left to right
thisX ← startX+Real.Float[j]*stepSize;
[r,g,b] ← TopColorCast[[thisX, y], worldRay, tree, lightSources, camera, boundBox, boundSphere, feedback];
UpdateMaxSamples[maxSamples, r, g, b];
color ← Shading.NormalizeRGB[r,g,b];
PutColorInScanLine[scanLine, j, color];
CSG.AddRay[cameraXStepRayInWorld, worldRay]; -- updates worldRay
ENDLOOP;
};
OutputCameraInfo:
PROC [camera: Camera, feedback: FeedbackData] = {
IF camera.useBoundBoxes THEN Feedback.PutF[feedback, oneLiner, "Use Bounding Boxes.\n"]
ELSE Feedback.PutF[feedback, oneLiner, "Use Bounding Spheres.\n"];
IF camera.useBoundSpheresForShadows THEN Feedback.PutF[feedback, oneLiner, "Use Bound Spheres for Shadows.\n"]
ELSE Feedback.PutF[feedback, oneLiner, "Use nothing for Shadows.\n"];
};
DrawTreeWithStartLine:
PUBLIC
PROC [startLine:
REAL, tree: CSGTree, lightSources: LightSourceList, camera: Camera, aisRope: Rope.
ROPE, bAndWOnly:
BOOL, notify: NotifyOfProgressProc ← NoOpNotifyOfProgress, clientData:
REF
ANY ←
NIL, feedback: FeedbackData]
RETURNS [success:
BOOL, maxRed, maxGreen, maxBlue, maxBlack:
NAT] = {
Like DrawTree in CastRaysImplA, but we ignore all values of "i" until yStart+i*stepSize >= startCameraPoint[2]. So we start with i = (startCameraPoint[2]-yStart)/stepSize.
topNode: REF ANY; -- tree.son. The top active node of the CSG Tree
I: Image;
boundBox: BoundBox;
boundSphere: BoundSphere;
cameraWRTWorld: Matrix4by4;
cameraXStepRayInWorld, cameraRay, worldRay: Ray;
stepSize, xStart, yStart, yMiddleStart, thisY: REAL;
maxSamples: MaxSamples ← NEW[MaxSamplesObj];
xSamples, ySamples, iStart: NAT;
color: Color; scanLine1, scanLine2: ScanLine;
startTime: BasicTime.GMT;
startTime ← BasicTime.Now[];
success ← TRUE;
topNode ← tree.son;
camera.abort ←
FALSE;
-- if camera.abort becomes TRUE, close files and return.
Must preprocess before casting rays.
[boundBox, boundSphere] ← Preprocess3d.PreprocessForImage[tree, camera];
IF camera.frame.fullScreen
AND boundBox =
NIL
THEN {
ComplainInfiniteScene[feedback]; success ← FALSE; RETURN};
Calculates current transfrom matrices and bounding boxes.
[I, xSamples, ySamples, stepSize, xStart, yStart] ← SetUpRayTrace [boundBox, camera, aisRope, bAndWOnly, camera.resolution, feedback];
OutputTreeInfo[topNode,
I, feedback]; OutputCameraInfo[camera, feedback];
Allocate the scan line and ray storage.
cameraRay ← CSG.CreateRay[]; -- DrawTree recycles its own ray
scanLine1 ← CreateScanLine[xSamples+1]; scanLine2 ← CreateScanLine[xSamples+1];
Compute the ray step.
cameraWRTWorld ← CoordSys.FindInTermsOfWorld[camera.coordSys];
cameraXStepRayInWorld ← GetXStepRayInWorld[stepSize, cameraWRTWorld, camera];
Cast the first scan line.
iStart ← Real.Fix[(startLine-yStart)/stepSize];
yMiddleStart ← yStart+iStart*stepSize;
CSG.StuffCameraRay[cameraRay, [xStart, yMiddleStart], camera];
worldRay ← CSG.TransformRayToWorld[cameraRay, cameraWRTWorld]; -- allocates ray from pool
FillScanLine [xStart, stepSize, xSamples, yMiddleStart, cameraXStepRayInWorld, worldRay, tree, lightSources, camera, boundBox, boundSphere, scanLine1, feedback, maxSamples];
CSG.ReturnRayToPool[worldRay];
FOR i:
INTEGER
IN[iStart+1..ySamples]
DO
-- For each ray bottom to top.
IF camera.abort = TRUE THEN {AbortDrawTree[I, aisRope, camera, startTime, feedback]; RETURN};
notify[yStart+i*stepSize, xStart, yStart, xStart+xSamples*stepSize, yStart+ySamples*stepSize, clientData];
-- tell the user interface that we have just cast line i - 1.
Cast the next ray.
thisY ← yStart+i*stepSize;
CSG.StuffCameraRay[cameraRay, [xStart, thisY], camera];
worldRay ← CSG.TransformRayToWorld[cameraRay, cameraWRTWorld]; -- from pool
FillScanLine [xStart, stepSize, xSamples, thisY, cameraXStepRayInWorld, worldRay, tree, lightSources, camera, boundBox, boundSphere, scanLine2, feedback, maxSamples];
CSG.ReturnRayToPool[worldRay];
We have two complete scan lines.Average values in fours and write to ais.
FOR k:
NAT
IN[0..xSamples)
DO
IF MoreOrLessTheSame[scanLine1[k], scanLine1[k+1],
scanLine2[k], scanLine2[k+1]] THEN
color ← ColorAverage[scanLine1[k], scanLine1[k+1], scanLine2[k], scanLine2[k+1]];
SVImage.PutImage[I, i, k, color, xSamples, ySamples];
ENDLOOP;
CopyScanLine [scanLine2, scanLine1];
ENDLOOP;
ShutDownRayTrace[aisRope, I, camera, startTime, feedback];
[maxRed, maxGreen, maxBlue, maxBlack] ← SVImage.RGBTo8Bits[maxSamples.maxRed, maxSamples.maxGreen, maxSamples.maxBlue];
}; -- end of DrawTreeWithStartLine
ComplainInfiniteScene:
PROC [feedback: FeedbackData] = {
Feedback.Append[feedback, "Infinite Scene. Please define a bounding frame.", oneLiner];
Feedback.Blink[feedback];
};
MaxSamples: TYPE = REF MaxSamplesObj;
MaxSamplesObj: TYPE = RECORD [maxRed, maxGreen, maxBlue: REAL ← 0];
DrawTree:
PUBLIC
PROC [tree: CSGTree, lightSources: LightSourceList, camera: Camera, aisRope: Rope.
ROPE, bAndWOnly:
BOOL, notify: NotifyOfProgressProc ← NoOpNotifyOfProgress, clientData:
REF
ANY ←
NIL, feedback: FeedbackData]
RETURNS [success:
BOOL, maxRed, maxGreen, maxBlue, maxBlack:
NAT] = {
topNode: REF ANY; -- tree.son. The top active node of the CSG Tree
I: Image;
boundBox: BoundBox;
boundSphere: BoundSphere;
cameraWRTWorld: Matrix4by4;
cameraXStepRayInWorld, cameraRay, worldRay: Ray;
stepSize, xStart, yStart, thisY: REAL;
xSamples, ySamples: NAT;
maxSamples: MaxSamples ← NEW[MaxSamplesObj];
color: Color; scanLine1, scanLine2: ScanLine;
startTime: BasicTime.GMT;
startTime ← BasicTime.Now[];
success ← TRUE;
topNode ← tree.son;
camera.abort ←
FALSE;
-- if camera.abort becomes TRUE, close files and return.
Must preprocess before casting rays.
[boundBox, boundSphere] ← Preprocess3d.PreprocessForImage[tree, camera];
IF camera.frame.fullScreen
AND boundBox =
NIL
THEN {
ComplainInfiniteScene[feedback]; success ← FALSE; RETURN};
Calculate current transfrom matrices and bounding boxes.
[I, xSamples, ySamples, stepSize, xStart, yStart] ← SetUpRayTrace [boundBox, camera, aisRope, bAndWOnly, camera.resolution, feedback];
OutputTreeInfo[topNode,
I, feedback]; OutputCameraInfo[camera, feedback];
Allocate Storage for Scan Lines and Ray.
scanLine1 ← CreateScanLine[xSamples+1]; scanLine2 ← CreateScanLine[xSamples+1];
cameraRay ← CSG.CreateRay[];
-- DrawTree recycles its own ray
Compute the Ray increment.
cameraWRTWorld ← CoordSys.FindInTermsOfWorld[camera.coordSys];
cameraXStepRayInWorld ← GetXStepRayInWorld[stepSize, cameraWRTWorld, camera];
Cast the first scan line.
CSG.StuffCameraRay[cameraRay, [xStart, yStart], camera];
worldRay ← CSG.TransformRayToWorld[cameraRay, cameraWRTWorld]; -- allocates ray from pool
FillScanLine [xStart, stepSize, xSamples, yStart, cameraXStepRayInWorld, worldRay, tree, lightSources, camera, boundBox, boundSphere, scanLine1, feedback, maxSamples];
CSG.ReturnRayToPool[worldRay];
FOR i:
INTEGER
IN[1..ySamples]
DO
-- For each ray bottom to top.
IF camera.abort = TRUE THEN {AbortDrawTree[I, aisRope, camera, startTime, feedback]; RETURN};
notify[yStart+i*stepSize, xStart, yStart, xStart+xSamples*stepSize, yStart+ySamples*stepSize, clientData];
-- tell the user interface that we have just cast line i - 1.
Cast the next scan line.
thisY ← yStart+i*stepSize;
CSG.StuffCameraRay[cameraRay, [xStart, thisY], camera];
worldRay ← CSG.TransformRayToWorld[cameraRay, cameraWRTWorld]; -- allocates ray from pool
FillScanLine [xStart, stepSize, xSamples, thisY, cameraXStepRayInWorld, worldRay, tree, lightSources, camera, boundBox, boundSphere, scanLine2, feedback, maxSamples];
CSG.ReturnRayToPool[worldRay];
We now have two complete scan lines. Average values in fours and write to ais.
FOR k:
NAT
IN[0..xSamples)
DO
IF MoreOrLessTheSame[scanLine1[k], scanLine1[k+1],
scanLine2[k], scanLine2[k+1]] THEN
color ← ColorAverage[scanLine1[k], scanLine1[k+1], scanLine2[k], scanLine2[k+1]];
SVImage.PutImage[I, i, k, color, xSamples, ySamples];
ENDLOOP;
CopyScanLine [scanLine2, scanLine1];
ENDLOOP;
ShutDownRayTrace[aisRope, I, camera, startTime, feedback];
[maxRed, maxGreen, maxBlue, maxBlack] ← SVImage.RGBTo8Bits[maxSamples.maxRed, maxSamples.maxGreen, maxSamples.maxBlue];
}; -- end of DrawTree
MoreOrLessTheSame:
PROC [a, b, c, d:
REAL]
RETURNS [
BOOL] = {
min, max: REAL;
min ← max ← a;
IF b < min THEN min ← b ELSE IF b > max THEN max ← b;
IF c < min THEN min ← c ELSE IF c > max THEN max ← c;
IF d < min THEN min ← d ELSE IF d > max THEN max ← d;
IF max - min > 10 THEN RETURN[FALSE] ELSE RETURN[TRUE];
}; -- end of MoreOrLessTheSame
ColorAverage:
PROC [a, b, c, d: Color]
RETURNS [avgColor: Color] = {
ar, ag, ab, br, bg, bb, cr, cg, cb, dr, dg, db, red, green, blue: REAL;
[ar, ag, ab] ← Shading.ExtractRGB[a];
[br, bg, bb] ← Shading.ExtractRGB[b];
[cr, cg, cb] ← Shading.ExtractRGB[c];
[dr, dg, db] ← Shading.ExtractRGB[d];
red ← (ar + br + cr + dr)/4.0;
green ← (ag + bg + cg + dg)/4.0;
blue ← (ab + bb + cb + db)/4.0;
avgColor ← ImagerColor.ColorFromRGB[[red, green, blue]];
}; -- end of ColorAverage
ELSE color ← CastMoreRays[ul: scanLine1[k], ur: scanLine1[k+1], dl: scanLine2[k], dr: scanLine2[k+1], left: k, right: k+1, top: i, bottom: i-1, topNode: topNode, focalLength: focalLength, lightSources: lightSources, cameraWRTWorld: cameraWRTWorld];
CastMoreRays: PROC [ul, ur, dl, dr: Color, left, right, top, bottom: REAL, tree: CSGTree, focalLength: REAL, lightSources: LightSourceList, camera: Camera] RETURNS [color: Color] = {
Cast rays left, right, top, bottom, and middle. Use rays ul, ur, dl, and dr. This further subdivides each square for a more accurate intensity value.
cameraRay: Ray ← GetRayFromPool[];
worldRay: Ray;
cameraWRTWorld: Matrix4by4 ← camera.coordSys.mat;
leftColor, rightColor, topColor, bottomColor, middleColor: Color;
midLeftY, midTopX: REAL;
midLeftY ← (top-bottom)/2.0;
midTopX ← (right-left)/2.0;
cameraRay.basePt ← [left, midLeftY, 0];cameraRay.direction ← [left, midLeftY, focalLength];
worldRay ← TransformRay[cameraRay, cameraWRTWorld]; -- allocates ray from pool
leftColor ← ColorCast[[left, midLeftY], worldRay, tree, lightSources, camera];
ReturnRayToPool[worldRay];
cameraRay.basePt ← [right, midLeftY, 0];cameraRay.direction ← [right, midLeftY, focalLength];
worldRay ← TransformRay[cameraRay, cameraWRTWorld]; -- allocates ray from pool
rightColor ← ColorCast[[right, midLeftY], worldRay, tree, lightSources, camera];
ReturnRayToPool[worldRay];
cameraRay.basePt ← [midTopX, top, 0];cameraRay.direction ← [midTopX, top, focalLength];
worldRay ← TransformRay[cameraRay, cameraWRTWorld]; -- allocates ray from pool
topColor ← ColorCast[[midTopX, top], worldRay, tree, lightSources, camera];
ReturnRayToPool[worldRay];
cameraRay.basePt ← [midTopX, bottom, 0];cameraRay.direction ← [midTopX, bottom, focalLength];
worldRay ← TransformRay[cameraRay, cameraWRTWorld]; -- allocates ray from pool
bottomColor ← ColorCast[[midTopX, bottom], worldRay, tree, lightSources, camera];
ReturnRayToPool[worldRay];
cameraRay.basePt ← [midTopX, midLeftY, 0];cameraRay.direction ← [midTopX, midLeftY, focalLength];
worldRay ← TransformRay[cameraRay, cameraWRTWorld]; -- allocates ray from pool
middleColor ← ColorCast[[midTopX, midLeftY], worldRay, tree, lightSources, camera];
ReturnRayToPool[worldRay];
color ← ColorAverage[ ColorAverage[ul, topColor, leftColor, middleColor],
ColorAverage[topColor, ur, middleColor, rightColor],
ColorAverage[leftColor, middleColor, dl, bottomColor],
ColorAverage[middleColor, rightColor, bottomColor, dr] ];
ReturnRayToPool[cameraRay];
}; -- end of CastMoreRays
NoOpNotifyOfProgress: PUBLIC NotifyOfProgressProc = {};
GetClassFromPool:
PUBLIC
PROC
RETURNS [class: Classification] = {
IF globalPoolPointer = 0 THEN AddAClass[];
class ← globalPool[globalPoolPointer - 1];
globalPoolPointer ← globalPoolPointer - 1;
};
ClassPoolEmpty: SIGNAL = CODE;
ReturnClassToPool:
PUBLIC
PROC [class: Classification] = {
IF globalPoolPointer = globalPool.maxClasses THEN SIGNAL ClassPoolFull;
globalPoolPointer ← globalPoolPointer + 1;
globalPool[globalPoolPointer - 1] ← class;
};
ClassPoolFull: SIGNAL = CODE;
NumberOfClassesInPool:
PUBLIC
PROC
RETURNS [count:
NAT] = {
count ← globalPoolPointer;
};
AddAClass:
PROC = {
This scene contains sections complicated enough that the original allocation of classifications does not cover the most complicated rays. Add another classification to the pool.
newPool: Pool ← NEW[PoolObj[globalPool.maxClasses+1]];
IF globalPool.maxClasses > 50
THEN
{
-- there must be a leak in the classification system
Feedback.AppendTypescriptRaw[$Solidviews, "CastRaysImplA Warning: More than 50 Classifications!!", oneLiner];
};
FOR i:
NAT
IN [0..globalPoolPointer)
DO
newPool[i] ← globalPool[i];
ENDLOOP;
globalPoolPointer ← globalPoolPointer + 1;
globalPool ← newPool;
globalPool[globalPoolPointer - 1] ← NEW[ClassificationObj];
globalPool[globalPoolPointer - 1].surfaces ← NEW[SurfaceArrayObj];
};
GetCompactFromPool:
PROC
RETURNS [compact: CompactArray] = {
IF globalCompactPoolPointer = 0 THEN SIGNAL CompactPoolEmpty;
compact ← globalCompactPool[globalCompactPoolPointer];
globalCompactPoolPointer ← globalCompactPoolPointer -1;
};
CompactPoolEmpty: SIGNAL = CODE;
ReturnCompactToPool:
PROC [compact: CompactArray] = {
IF globalCompactPoolPointer = globalCompactPoolCount THEN SIGNAL CompactPoolFull;
globalCompactPoolPointer ← globalCompactPoolPointer + 1;
globalCompactPool[globalCompactPoolPointer] ← compact;
};
CompactPoolFull: SIGNAL = CODE;
MakeClassAMiss:
PUBLIC
PROC [class: Classification] = {
class.count ← 0;
class.classifs[1] ← FALSE;
};
Init:
PROC = {
Create a Classification Pool
globalPool ← NEW[PoolObj[globalPoolCount]];
FOR i:
NAT
IN[0..globalPoolCount)
DO
globalPool[i] ← NEW[ClassificationObj];
globalPool[i].surfaces ← NEW[SurfaceArrayObj];
ENDLOOP;
globalPoolPointer ← globalPoolCount;
Create a Compact Pool
globalCompactPool ← NEW[CompactPoolObj];
FOR i:
NAT
IN[1..globalCompactPoolCount]
DO
globalCompactPool[i] ← NEW[CompactArrayObj];
ENDLOOP;
globalCompactPoolPointer ← globalCompactPoolCount;
};
END.