NewPointSet:
PUBLIC
PROC [ dim:
NAT ]
RETURNS [ ps: LeastSq.PointSet ] = {
ps ← NEW[LeastSq.PointSetRec[dim]];
ps.count ← 0;
FOR row:
NAT
IN [0..dim)
DO
ps[row] ← NEW[LeastSq.RowRec[dim]];
FOR col:
NAT
IN [0..dim)
DO
ps[row][col] ← 0.0;
ENDLOOP;
ENDLOOP;
};
AddPoint:
PUBLIC
PROC [ ps: LeastSq.PointSet, pt: LeastSq.Row, weight:
REAL ← 1.0 ] = {
IF pt.colDim # ps.rowDim THEN ERROR;
ps.count ← ps.count+1;
FOR row:
NAT
IN [0..ps.rowDim)
DO
FOR col:
NAT
IN [0..row]
DO
ps[row][col] ← ps[row][col]+weight*pt[row]*pt[col];
ENDLOOP;
ENDLOOP;
};
LeastSqFit:
PUBLIC PROC [ ps: LeastSq.PointSet ]
RETURNS [ eq: LeastSq.Row ] = {
At this point, ps[row][col] contains the sum over all input points of pt[row]*pt[col], where pt[k] =
1 if k=0,
an independent variable if k>0 and k<dim-1,
the dependent variable if k=dim-1.
We begin with a basis set of polynomials: 1, ind1, ind2, .... We use the Gram-Schmidt algorithm (see the textbook Numerical Analysis, second edition, by Johnson and Riess, Addison Wesley, 1982, pages 255 and following for details) to construct a new basis set of polynomials that is orthogonal with respect to the given set of independent variable pairs, and normal in magnitude (in that the Euclidean norm of each polynomial is one).
The best least square approximation (with respect to the original basis set) is then a linear combination of the orthonormal polynomials, where the coefficient of polynomial i is the inner product of the dependent values with the vector of values of that polynomial evaluated at the independent variables.
basis: LeastSq.PointSet = NEW[LeastSq.PointSetRec[ps.rowDim]];
FOR row:
NAT
IN [0..ps.rowDim)
DO
-- fill ps symmetrically
FOR col:
NAT
IN (row..ps.rowDim)
DO
ps[row][col] ← ps[col][row];
ENDLOOP;
ENDLOOP;
FOR row:
NAT
IN [0..ps.rowDim)
DO
innerProd: REAL;
col: NAT;
basis[row] ← NEW[LeastSq.RowRec[ps.rowDim]];
FOR col IN [0..ps.rowDim) DO basis[row][col] ← 0.0 ENDLOOP;
FOR prevrow:
NAT
IN [0..row)
DO
innerProd ← 0.0;
FOR col
IN [0..ps.rowDim)
DO
innerProd ← innerProd+basis[prevrow][col]*ps[row][col];
ENDLOOP; -- col
FOR col
IN [0..ps.rowDim)
DO
basis[row][col] ← basis[row][col]-innerProd*basis[prevrow][col];
ENDLOOP; -- col
ENDLOOP; -- prevrow
basis[row][row] ← 1.0;
IF row<ps.rowDim-1
THEN
-- normalize this new orthogonal polynomial
BEGIN
innerProd ← 0.0;
FOR col
IN [0..ps.rowDim)
DO
FOR col1:
NAT
IN [0..ps.rowDim)
DO
innerProd ← innerProd+basis[row][col]*basis[row][col1]*ps[col][col1];
ENDLOOP; -- col1
ENDLOOP; -- col
IF innerProd>0.0
THEN {
normFact: REAL = 1.0/Real.SqRt[innerProd];
FOR col
IN [0..ps.rowDim)
DO
basis[row][col] ← normFact*basis[row][col];
ENDLOOP; -- col
};
END;
ENDLOOP; -- row
eq ← basis[ps.rowDim-1];
};
Test:
PROC [ eqIn: LeastSq.Row, n:
INT, noise:
REAL ← 0 ]
RETURNS [ eqOut: LeastSq.Row ] =
BEGIN
ps: LeastSq.PointSet = NewPointSet[eqIn.colDim];
pt: LeastSq.Row = NEW[LeastSq.RowRec[eqIn.colDim]];
pt[0] ← 1.0;
FOR i:
INT
IN [0..n)
DO
sum: REAL ← eqIn[0]+noise*Random.Choose[-10000, 10000];
FOR j:
NAT
IN [1..eqIn.colDim-1)
DO
pt[j] ← Random.Choose[-10000, 10000];
sum ← sum+eqIn[j]*pt[j];
ENDLOOP;
pt[eqIn.colDim-1] ← -sum;
AddPoint[ps, pt];
ENDLOOP;
eqOut ← LeastSqFit[ps];
END;