Solving systems of linear equations numericallyStart by reading the following Maple library. If this does not work, open the collapsed section marked by a triangle and read all the commands by running them using Enter.with(LinearAlgebra):
with(NumericalMethods);
# if not present run the next groupwith(LinearAlgebra):
MatrixEliminate:=proc(inA)
description "MatrixEliminate(A), also MatrixEliminate(<A|b>) or MatrixEliminate(<A|B>).",
"Applies Gaussian elimination to the given matrix.",
"Optional options: output= and pivoting=.",
"Option: output=gem (default), =gaussjordan, =lu, =lup.",
"Option: pivoting=partial (default), =minimal, =smallpivot, =euclidean.",
"Option =minimal is Maple's default for integer matrices.",
"Option =smallpivot is recommended for integer matrices.",
"Option =euclidean applies Euclidean algorithm, pivots are gcd's of columns.",
"With pivoting=euclidean, the output= option is ignored and =gem is used.";
local nloc,mloc,iseuclid,islu,islup,pivo,pswitch,pelim,\134
# controls: dimensions, Euclidean?,LU?,LUP?, procedures for pivot/switch/elimination
pivoPP,pivoM,pivoS,pswitchG,pswitchLU,pswitchLUP,pelimGEM,pelimGJ,pelimLU,pelimE,\134
# data: procedures for pivoting, row switching, elimination
Uloc,Lloc,Ploc,rowloc,colloc,lmaxind,isskip,iloc,jloc,tmploc,isdone;
# work registers: working matrix, indices, working registers
### matrix input & check
if not(type(inA,'Matrix'(complex))) then
error("A numerical matrix expected as argument.") end if;
Uloc:=LinearAlgebra:-Copy(inA);
nloc:=LinearAlgebra:-RowDimension(Uloc);
mloc:=LinearAlgebra:-ColumnDimension(Uloc);
### pivo: identifies candidate for pivot, row stored in lmaxind, sets isskip to false
### if no non-zero candidate found, isskip is left set to true
### uses Uloc, nloc, mloc
pivoPP:=proc(rowproc,colproc) # partial pivoting
local iproc,maxvalproc,tmproc;
maxvalproc:=0;
for iproc from rowproc to nloc do
tmproc:=evalf(abs(Uloc[iproc,colproc]));
if tmproc>maxvalproc then
maxvalproc:=tmproc;lmaxind:=iproc
end if
end do;
if maxvalproc>0 then isskip:=false end if
end;
###
pivoM:=proc(rowproc,colproc) # minimal pivoting
lmaxind:=rowproc;
while Uloc[min(lmaxind,nloc),colproc]=0 and lmaxind<=nloc do
lmaxind:=lmaxind+1
end do;
if lmaxind<=nloc then isskip:=false end if
end;
###
pivoS:=proc(rowproc,colproc) # pivoting with smallest possible pivot
local iproc,maxvalproc,tmproc;
maxvalproc:=0;
for iproc from rowproc to nloc do
tmproc:=abs(Uloc[iproc,colproc]);
if tmproc>0 and 1/tmproc>maxvalproc then
maxvalproc:=1/tmproc;lmaxind:=iproc
end if
end do;
if maxvalproc>0 then isskip:=false end if
end;
### pswitch: switches rows, input: row,column of pivot place, row of pivot candidate
### changes Uloc
pswitchG:=proc(rowproc,colproc,maxproc) # switching of rows (GEM,GJ,Euclid)
local iproc,tmproc;
for iproc from colproc to mloc do
tmproc:=Uloc[rowproc,iproc];
Uloc[rowproc,iproc]:=Uloc[maxproc,iproc];
Uloc[maxproc,iproc]:=tmproc
end do
end;
pswitchLU:=proc(rowproc,colproc,maxproc) # switching for LU
if Uloc[rowproc,colproc]=0 then
error("LU decomposition not possible, try LUP.")
end if
end;
pswitchLUP:=proc(rowproc,colproc,maxproc) # switching for LUP
local iproc,tmproc;
for iproc from colproc to mloc do
tmproc:=Uloc[rowproc,iproc];
Uloc[rowproc,iproc]:=Uloc[maxproc,iproc];
Uloc[maxproc,iproc]:=tmproc
end do;
for iproc from 1 to rowproc-1 do
tmproc:=Lloc[rowproc,iproc];
Lloc[rowproc,iproc]:=Lloc[maxproc,iproc];
Lloc[maxproc,iproc]:=tmproc
end do;
for iproc from 1 to nloc do
tmproc:=Ploc[rowproc,iproc];
Ploc[rowproc,iproc]:=Ploc[maxproc,iproc];
Ploc[maxproc,iproc]:=tmproc
end do
end;
### pelim: eliminates matrix using predetermined pivot
### changes Uloc, uses nloc, mloc
pelimGEM:=proc(rowproc,colproc) # elimination for GEM
local iproc,jproc,tmproc;
if rowproc<nloc then
for iproc from rowproc+1 to nloc do
if Uloc[iproc,colproc]<>0 then
tmproc:=Uloc[iproc,colproc]/Uloc[rowloc,colproc];
Uloc[iproc,colproc]:=0;
if colproc<mloc then
for jproc from colproc+1 to mloc do
Uloc[iproc,jproc]:=Uloc[iproc,jproc]-tmproc*Uloc[rowproc,jproc]
end do
end if
end if
end do
end if
end;
###
pelimGJ:=proc(rowproc,colproc) # elimination for Gauss-Jordan
local iproc,jproc,tmproc;
if colproc<mloc then
for jproc from colproc+1 to mloc do
Uloc[rowproc,jproc]:=Uloc[rowproc,jproc]/Uloc[rowproc,colproc]
end do
end if;
Uloc[rowproc,colproc]:=1;
if rowproc<nloc then
for iproc from rowproc+1 to nloc do
if Uloc[iproc,colproc]<>0 then
if colproc<mloc then
for jproc from colproc+1 to mloc do
Uloc[iproc,jproc]:=Uloc[iproc,jproc]-Uloc[iproc,colproc]*Uloc[rowproc,jproc]
end do
end if;
Uloc[iproc,colproc]:=0
end if
end do
end if;
if rowproc>1 then
for iproc from 1 to rowproc-1 do
if Uloc[iproc,colproc]<>0 then
if colproc<mloc then
for jproc from colproc+1 to mloc do
Uloc[iproc,jproc]:=Uloc[iproc,jproc]-Uloc[iproc,colproc]*Uloc[rowproc,jproc]
end do
end if;
Uloc[iproc,colproc]:=0
end if
end do
end if
end;
###
pelimLU:=proc(rowproc,colproc) # elimination for LU(P)
local iproc,jproc,tmproc;
if rowproc<nloc then
for iproc from rowproc+1 to nloc do
if Uloc[iproc,colproc]<>0 then
Lloc[iproc,rowproc]:=Uloc[iproc,colproc]/Uloc[rowloc,colproc];
Uloc[iproc,colproc]:=0;
if colproc<mloc then
for jproc from colproc+1 to mloc do
Uloc[iproc,jproc]:=Uloc[iproc,jproc]-Lloc[iproc,rowproc]*Uloc[rowproc,jproc]
end do
end if
end if
end do
end if
end;
###
pelimE:=proc(rowproc,colproc) # elimination for Euclidean algorithm
local iproc,jproc,tmproc,isnotOKproc;
if rowproc<nloc then
isnotOKproc:=true;
while isnotOKproc do
for iproc from rowproc+1 to nloc do
if Uloc[iproc,colproc]<>0 then
tmproc:=round(Uloc[iproc,colproc]/Uloc[rowloc,colproc]);
for jproc from colproc to mloc do
Uloc[iproc,jproc]:=Uloc[iproc,jproc]-tmproc*Uloc[rowproc,jproc]
end do
end if
end do;
pivo(rowproc,colproc);
if lmaxind>rowproc then
pswitch(rowproc,colproc,lmaxind)
else
isnotOKproc:=false
end if
end do
end if
end;
#######################
### setting up defaults
pivo:=pivoPP;
pelim:=pelimGEM;
pswitch:=pswitchG;
iseuclid:=false;
islu:=false;
islup:=false;
isdone:=false; ## was pivoting option used? for setting default pivoting of LUP
########################################
### optional argument(s) processed here
if nargs>1 then
for iloc from 2 to nargs do
if not(type(args[iloc],equation)) then
error("The option %1 not recognized.
Optional options are output= and pivoting=.",args[iloc]) end if;
tmploc:=convert(lhs(args[iloc]),string);
if tmploc="output" then ######### output
tmploc:=convert(rhs(args[iloc]),string);
if tmploc="gem" then # Gaussian elimination
islu:=false;
islup:=false;
pswitch:=pswitchG;
pelim:=pelimGEM
elif tmploc="gaussjordan" then # Gauss-Jordan elimination
islu:=false;
islup:=false;
pswitch:=pswitchG;
pelim:=pelimGJ
elif tmploc="lu" then # LU decomposition
islu:=true;
islup:=true;
pswitch:=pswitchLU;
pelim:=pelimLU;
if not(isdone) then pivo:=pivoM end if
elif tmploc="lup" then # LUP decomposition
islu:=false;
islup:=true;
pswitch:=pswitchLUP;
pelim:=pelimLU;
if not(isdone) then pivo:=pivoM end if;
else
error("Values for output are =gem, =gaussjordan, =lu, =lup.")
end if
elif tmploc="pivoting" then ######### pivoting
tmploc:=convert(rhs(args[iloc]),string);
isdone:=true;
if tmploc="minimal" then
pivo:=pivoM
elif tmploc="smallpivot" then
pivo:=pivoS
elif tmploc="euclidean" then
if not(type(inA,'Matrix'(integer))) then
error("An integer matrix expected for pivoting=euclidean.") end if;
iseuclid:=true;
pivo:=pivoS
elif tmploc="partial" then
pivo:=pivoPP
else
error("Values for pivoting are =partial, =minimal, =smallpivot, =euclidean.")
end if
else
error("The option %1 not recognized.
Optional options are output= and pivoting=.",args[kloc])
end if
end do
end if;
if iseuclid then
if islup then
error("For Euclidean pivoting, LUP decomposition is not allowed.") end if;
pivo:=pivoS;
pswitch:=pswitchG;
pelim:=pelimE
end if;
if islup then
if nloc<>mloc then
error("For LU(P) decomposition, the matrix must be square.") end if;
Lloc:=LinearAlgebra:-IdentityMatrix(nloc,compact=false);
if not(islu) then
Ploc:=LinearAlgebra:-IdentityMatrix(nloc,compact=false)
end if
end if;
##############################
colloc:=1;
for rowloc from 1 by 1 while colloc<=mloc and rowloc<=nloc do
isskip:=true;
for jloc from colloc by 1 while isskip and jloc<=mloc do
pivo(rowloc,jloc)
end do;
colloc:=jloc-1;
if not(isskip) then
if lmaxind>rowloc then
pswitch(rowloc,colloc,lmaxind)
end if;
pelim(rowloc,colloc);
end if;
colloc:=colloc+1
end do;
if islu then
return(Lloc,Uloc)
elif islup then
return(Lloc,Uloc,Ploc)
else
return(Uloc)
end if
end:
BackSubstitute:=proc(inA)
description "BackSubst(A) solves an upper triangular system by back subsitution.",
"The argument must be an upper triangular matrix (n,n+1).";
local nloc,mloc,iloc,jloc,xloc;
if not(type(inA,'Matrix'(complex))) then
error("A numerical matrix expected as argument.") end if;
nloc:=LinearAlgebra:-RowDimension(inA);
mloc:=LinearAlgebra:-ColumnDimension(inA);
if mloc<>nloc+1 then error("The matrix must have dimensions (n,n+1).") end if;
if not(IsMatrixShape(inA[1..nloc,1..nloc],triangular[upper]))
then error("The matrix must be upper triangular.") end if;
if mul(inA[iloc,iloc],iloc=1..nloc)=0 then
error("The matrix must be regular.") end if;
if nargs>1 then printf("Unused options present.") end if;
###############################
xloc:=Vector(nloc);
xloc[nloc]:=inA[nloc,mloc]/inA[nloc,nloc];
if nloc>1 then
for iloc from nloc-1 by -1 to 1 do
xloc[iloc]:=inA[iloc,mloc];
for jloc from iloc+1 to nloc do
xloc[iloc]:=xloc[iloc]-inA[iloc,jloc]*xloc[jloc]
end do;
xloc[iloc]:=xloc[iloc]/inA[iloc,iloc]
end do
end if;
return(xloc)
end:
ForwSubstitute:=proc(inA)
description "ForwSubst(A) solves a lower triangular system by forward subsitution.",
"The argument must be a lower triangular matrix (n,n+1).";
local nloc,mloc,iloc,jloc,xloc;
if not(type(inA,'Matrix'(complex))) then
error("A numerical matrix expected as argument.") end if;
nloc:=LinearAlgebra:-RowDimension(inA);
mloc:=LinearAlgebra:-ColumnDimension(inA);
if mloc<>nloc+1 then error("The matrix must have dimensions (n,n+1).") end if;
if not(IsMatrixShape(inA[1..nloc,1..nloc],triangular[lower]))
then error("The matrix must be lower triangular.") end if;
if mul(inA[iloc,iloc],iloc=1..nloc)=0 then
error("The matrix must be regular.") end if;
if nargs>1 then printf("Unused options present.") end if;
###############################
xloc:=Vector(nloc);
xloc[1]:=inA[1,mloc]/inA[1,1];
if nloc>1 then
for iloc from 2 to nloc do
xloc[iloc]:=inA[iloc,mloc];
for jloc from 1 to iloc-1 do
xloc[iloc]:=xloc[iloc]-inA[iloc,jloc]*xloc[jloc]
end do;
xloc[iloc]:=xloc[iloc]/inA[iloc,iloc];
end do
end if;
return(xloc)
end:
MatrixIterate:=proc(inA,inb,inx)
description "Solves Ax=b using iterations or shows iteration matrix (and vector).",
"MatrixIterate(A,b,xinit=<vector>,method=,tolerance=).",
"Alternative: MatrixIterate(A,method=,output=matrix).",
"Option: method=jacobi, method=gaussseidel, method=[sor,<omega>]."
"Optional options: stoppingcriterion=,output=,maxiterations=,maxnumber=,digits=.",
"Option: stoppingcriterion=relative(default), =absolute, =residual.",
"Option: output=information, =iterates, =value, =matrix, =matrices.",
"Default is =information for small n, otherwise =value.",
"Default maximal number of iterations is 50.",
"Default maximal allowed number in iterations is 1000000 .",
"Default number of decimal digits shown is Digits.";
local leps,maxnum,maxiter,isiter,isinf,ldig,\134
# controls: tolerance,maxnumber,cutoff,output(iterates?info?),digits
nloc,lomeg,isreal,lnext,ltest,lmatr,lprintiter,lprintinf,\134
# data: dimension,SORfactor,real/compl,iterative step procedure,stoptest procedure,matrix procedure
# lprint: procedures for printing iterates and info
xloc,kloc,iloc,tmploc,tmplc,isfar,maxdis;
# work registers
isreal:=true; ### real numbers only, for printing format
### matrix input & check
if not(type(inA,'Matrix'(complex))) then
error("A numerical matrix expected as the first argument.") end if;
nloc:=LinearAlgebra:-RowDimension(inA);
if nloc=1 then error("Matrix must have at least two rows and columns.") end if;
if nloc<>LinearAlgebra:-ColumnDimension(inA) then error("The matrix must be square.") end if;
if mul(inA[kloc,kloc],kloc=1..nloc)=0 then
error("Diagonal entries in the matrix must be non-zero.") end if;
if not(type(inA,'Matrix'(realcons))) then isreal:=false end if;
### shortened form handling
if not(type(inb,'Vector'(complex))) then
if type(inb,equation) and convert(lhs(inb),string)="method"
and type(inx,equation) and convert(lhs(inx),string)="output"
and convert(rhs(inx),string)="matrix" then
if nargs>3 then printf("Unused options present.") end if;
tmploc:=rhs(inb);
if convert(tmploc,string)="jacobi" then
tmplc:=Matrix(nloc);
for kloc from 1 to nloc do
for iloc from 1 to nloc do
tmplc[kloc,iloc]:=-inA[kloc,iloc]/inA[kloc,kloc]
end do;
tmplc[kloc,kloc]:=0
end do;
return(tmplc)
elif convert(tmploc,string)="gaussseidel" then
tmplc:=Matrix(nloc);
for kloc from 1 to nloc do
for iloc from 1 to kloc do
tmplc[kloc,iloc]:=inA[kloc,iloc]
end do
end do;
for kloc from 1 to nloc-1 do
for iloc from kloc+1 to nloc do
tmplc[kloc,iloc]:=0
end do
end do;
return(-LinearAlgebra:-MatrixInverse(tmplc).(inA-tmplc))
elif type(tmploc,list) and convert(tmploc[1],string)="sor" then
lomeg:=tmploc[2];
if not(type(lomeg,realcons)) then
error("SOR expects a real number as second part of argument: method=[sor,<omega>].")
end if;
tmplc:=Matrix(nloc);
xloc:=Matrix(nloc);
tmplc[1,1]:=inA[1,1];
xloc[1,1]:=(lomeg-1)*inA[1,1];
for kloc from 2 to nloc do
for iloc from 1 to kloc-1 do
tmplc[kloc,iloc]:=lomeg*inA[kloc,iloc];
xloc[kloc,iloc]:=0
end do;
tmplc[kloc,kloc]:=inA[kloc,kloc];
xloc[kloc,kloc]:=(lomeg-1)*inA[kloc,kloc]
end do;
for kloc from 1 to nloc-1 do
for iloc from kloc+1 to nloc do
tmplc[kloc,iloc]:=0;
xloc[kloc,iloc]:=lomeg*inA[kloc,iloc]
end do
end do;
return(-LinearAlgebra:-MatrixInverse(tmplc).xloc)
else
error("Method not recognized, use method=jacobi, =gaussseidel, =[sor,<omega>].")
end if
else
error("The second and third arguments do not match expected patterns: A,B,xinit,method,tolerance or A,method,output=matrix.")
end if
end if;
### typical use handling
if nargs<5 then error("Some parameter(s) missing: A, b, xinit, method, tolerance.") end if;
### right hand side input & check
if nloc<>LinearAlgebra:-Dimension(inb) then error("Vector b has wrong size.") end if;
if not(type(inb,'Vector'(realcons))) then isreal:=false end if;
### initial vector input & check
if not(type(inx,equation)) or convert(lhs(inx),string)<>"xinit"
or not(type(rhs(inx),'Vector'(complex))) then
error("Initial vector xinit=... expected as the third argument.")
end if;
tmploc:=rhs(inx);
if nloc<>LinearAlgebra:-Dimension(tmploc) then
error("Vector xinit has a wrong dimension.") end if;
xloc:=Vector(nloc,iloc->tmploc[iloc]);
if not(type(xloc,'Vector'(realcons))) then isreal:=false end if;
### method specification input & check & preparation
if not(type(args[4],equation)) or (convert(lhs(args[4]),string)<>"method") then
error("The fourth argument must specify method=...") end if;
tmploc:=rhs(args[4]);
############# procedures for iteration: modify global variable xloc
############# maxdis stores max_i|x(k+1)_i-x(k)_i|
### procedures for Jacobi
if convert(tmploc,string)="jacobi" then
lnext:=proc()
local yloc,iproc,jproc;
yloc:=Vector(nloc);
for iproc from 1 to nloc do
yloc[iproc]:=inb[iproc];
for jproc from 1 by 1 while jproc<iproc do
yloc[iproc]:=yloc[iproc]-inA[iproc,jproc]*xloc[jproc]
end do;
for jproc from nloc by -1 while jproc>iproc do
yloc[iproc]:=yloc[iproc]-inA[iproc,jproc]*xloc[jproc]
end do;
yloc[iproc]:=evalf(yloc[iproc]/inA[iproc,iproc]);
end do;
maxdis:=max(seq(abs(yloc[iproc]-xloc[iproc]),iproc=1..nloc));
xloc:=yloc
end;
lmatr:=proc()
local Bproc,cproc,kproc,iproc;
Bproc:=Matrix(nloc);
cproc:=Vector(nloc);
for kproc from 1 to nloc do
for iproc from 1 to nloc do
Bproc[kproc,iproc]:=-inA[kproc,iproc]/inA[kproc,kproc]
end do;
Bproc[kproc,kproc]:=0;
cproc[kproc]:=inb[kproc]/inA[kproc,kproc]
end do;
return(Bproc,cproc)
end
### procedures for Gauss-Seidel
elif convert(tmploc,string)="gaussseidel" then
lnext:=proc()
local yloc,iproc,jproc;
maxdis:=0;
for iproc from 1 to nloc do
yloc:=inb[iproc];
for jproc from 1 by 1 while jproc<iproc do
yloc:=yloc-inA[iproc,jproc]*xloc[jproc]
end do;
for jproc from nloc by -1 while jproc>iproc do
yloc:=yloc-inA[iproc,jproc]*xloc[jproc]
end do;
yloc:=evalf(yloc/inA[iproc,iproc]);
maxdis:=max(maxdis,abs(yloc-xloc[iproc]));
xloc[iproc]:=yloc
end do
end;
lmatr:=proc()
local DLproc,kproc,iproc;
DLproc:=Matrix(nloc);
for kproc from 1 to nloc do
for iproc from 1 to kproc do
DLproc[kproc,iproc]:=inA[kproc,iproc]
end do
end do;
for kproc from 1 to nloc-1 do
for iproc from kproc+1 to nloc do
DLproc[kproc,iproc]:=0
end do
end do;
return(-LinearAlgebra:-MatrixInverse(DLproc).(inA-DLproc),\134
LinearAlgebra:-MatrixInverse(DLproc).inb)
end
### procedures for SOR
elif type(tmploc,list) and convert(tmploc[1],string)="sor" then
lomeg:=tmploc[2];
if not(type(lomeg,realcons)) then
error("SOR expects a real number as second part of argument: method=[sor,<omega>].")
end if;
lnext:=proc()
local yloc,iproc,jproc;
maxdis:=0;
for iproc from 1 to nloc do
yloc:=inb[iproc];
for jproc from 1 by 1 while jproc<iproc do
yloc:=yloc-inA[iproc,jproc]*xloc[jproc]
end do;
for jproc from nloc by -1 while jproc>iproc do
yloc:=yloc-inA[iproc,jproc]*xloc[jproc]
end do;
yloc:=evalf((1-lomeg)*xloc[iproc]+lomeg*yloc/inA[iproc,iproc]);
maxdis:=max(maxdis,abs(yloc-xloc[iproc]));
xloc[iproc]:=yloc
end do
end;
lmatr:=proc()
local DLproc,Uproc,kproc,iproc;
DLproc:=Matrix(nloc);
Uproc:=Matrix(nloc);
DLproc[1,1]:=inA[1,1];
Uproc[1,1]:=(lomeg-1)*inA[1,1];
for kproc from 2 to nloc do
for iproc from 1 to kproc-1 do
DLproc[kproc,iproc]:=lomeg*inA[kproc,iproc];
Uproc[kproc,iproc]:=0
end do;
DLproc[kproc,kproc]:=inA[kproc,kproc];
Uproc[kproc,kproc]:=(lomeg-1)*inA[kproc,kproc]
end do;
for kproc from 1 to nloc-1 do
for iproc from kproc+1 to nloc do
DLproc[kproc,iproc]:=0;
Uproc[kproc,iproc]:=lomeg*inA[kproc,iproc]
end do
end do;
return(-LinearAlgebra:-MatrixInverse(DLproc).Uproc,\134
lomeg*LinearAlgebra:-MatrixInverse(DLproc).inb)
end
else
error("Method not recognized, use method=jacobi, =gaussseidel, =[sor,<omega>].")
end if;
### tolerance input & check
if not(type(args[5],equation)) or (convert(lhs(args[5]),string)<>"tolerance")
or not(type(evalf(rhs(args[5])),positive)) then
error("The fifth argument must specify tolerance=... (a positive number)")
end if;
leps:=evalf(rhs(args[5]));
### setting up defaults
maxiter:=50;
maxnum:=1000000;
ltest:=proc(Aproc,bproc,distproc,sizeproc,xproc) ### testing distance, relative
if sizeproc=0 then return(1)
else return(distproc/sizeproc) end if
end;
if (isreal and nloc<7) or (not(isreal) and nloc<4) then isiter:=true else isiter:=false end if;
if (isreal and nloc<5) or (not(isreal) and nloc<3) then isinf:=true else isinf:=false end if;
ldig:=Digits;
#######################################
### optional argument(s) processed here
if nargs>5 then
for kloc from 6 to nargs do
if not(type(args[kloc],equation)) then
error("The option %1 not recognized.
Optional options are stoppingcriterion=, output=, maxiterations=, maxnumber=, digits.",args[kloc])
end if;
tmploc:=convert(lhs(args[kloc]),string);
if not(member(tmploc,["stoppingcriterion","output","maxiterations","maxnumber",\134
"digits"],'iloc')) then
error("The option %1 not recognized.
Optional options are stoppingcriterion=, output=, maxiterations=, maxnumber=, digits.",args[kloc])
end if;
if iloc=5 then ######### digits
ldig:=rhs(args[kloc]);
if not(type(ldig,posint)) then
error("Number of decimal digits to show must be a positive integer.") end if
elif iloc=4 then ######### maxnumber
maxnum:=rhs(args[kloc]);
if not(type(maxnum,positive)) then
error("Maximal allowed number must be a positive number.") end if
elif iloc=3 then ######### maxiterations
maxiter:=rhs(args[kloc]);
if not(type(maxiter,posint)) then
error("Maximal number of iterations must be a positive integer.") end if
elif iloc=1 then ######### stoppingcriterion
tmplc:=convert(rhs(args[kloc]),string);
if tmplc="absolute" then
ltest:=proc(Aproc,bproc,distproc,sizeproc,xproc)
return(distproc)
end
elif tmplc="relative" then
ltest:=proc(Aproc,bproc,distproc,sizeproc,xproc)
if sizeproc=0 then return(1)
else return(distproc/sizeproc) end if
end;
elif tmplc="residual" then
ltest:=proc(Aproc,bproc,distproc,sizeproc,xproc)
local yproc;
yproc:=bproc-Aproc.xproc;
return(max(seq(abs(yproc[iproc]),iproc=1..nloc)))
end;
else error("Values for stoppingcriterion are =absolute, =relative, =residual.")
end if
else ######### output, creates matrices right away
tmplc:=convert(rhs(args[kloc]),string);
if tmplc="information" then
isiter:=true;isinf:=true
elif tmplc="iterates" then
isiter:=true;isinf:=false
elif tmplc="value" then
isiter:=false;isinf:=false
elif tmplc="matrix" then
return(lmatr()[1])
elif tmplc="matrices" then
return(lmatr())
else
error("Values for output are =information, =iterates, =value, =matrix, =matrices.")
end if
end if
end do
end if;
if ldig>Digits then Digits:=ldig end if;
############### printing routines
if isiter then
if isreal then
lprintiter:=proc()
local iproc;
printf(" x=[");
for iproc from 1 to nloc-1 do printf("% *.*f,",ldig+4,ldig,xloc[iproc]) end do;
printf("% *.*f]",ldig+4,ldig,xloc[nloc])
end:
else
lprintiter:=proc()
local iproc;
printf(" x=[");
for iproc from 1 to nloc-1 do printf("% *.*Zf,",ldig+4,ldig,xloc[iproc]) end do;
printf("% *.*Zf]",ldig+4,ldig,xloc[nloc])
end:
end if;
if isinf then
lprintinf:=proc()
printf(", res=% *.*f test=% *.*f,\134n",ldig+2,ldig,tmploc,ldig+2,ldig,tmplc)
end:
else
lprintinf:=proc()
printf("\134n")
end:
end if;
else
lprintiter:=proc()
end:
lprintinf:=proc()
end:
end if;
###################################
isfar:=true;
printf("k=00 ");
lprintiter();
if isiter then printf("\134n") end if;
for kloc from 1 to maxiter by 1 while isfar do
printf("k=%02d ",kloc);
lnext();
tmploc:=max(seq(abs(xloc[iproc]),iproc=1..nloc));
if tmploc>maxnum then
printf("Numbers in iteration too large.");
return(xloc)
end if;
tmplc:=ltest(inA,inb,maxdis,tmploc,xloc);
if isinf then
tmploc:=inb-inA.xloc;
tmploc:=max(seq(abs(tmploc[iloc]),iloc=1..nloc))
end if;
lprintiter();
lprintinf();
tmploc:=max(seq(abs(xloc[iproc]),iproc=1..nloc));
if tmplc<leps then isfar:=false end if;
end do;
if isfar then printf("Maximal number of iterations reached.") end if;
return(xloc)
end:First, let us see how Maple handles the first problem from this week's homework.A:=<<3,1,-4>|<-2,-1,2>|<1,0,0>>;
b:=<7,2,-6>;The Gauss-Jordan elimination is not used in applications, but the command allows it, so we try it to see the solution in the right column:MatrixEliminate(<A|b>,output=gaussjordan);When we solved this system by hand, we most likely chose the smallest candidate for the pivot. Our command can do it as well, so check whether the following example does it the same way as you did. Then erase the pivoting option and see the partial pivoting being used by default.MatrixEliminate(<A|b>,pivoting=smallpivot);The new system in part c) differs only in the right-hand side. We try to solve it using the proper numerical math approach here: first we apply the Gaussian elimination and then the backward substitution. Did you get the same solution?b:=<4,1,-4>;
Ael:=MatrixEliminate(<A|b>,pivoting=smallpivot);
BackSubstitute(Ael);Theory says that GEM+BS should be significantly faster compared to GJM (reduction to an identity matrix). You can compare the times for random matrices now and you will see what your machine things about this. If it is really fast, you can carefully increase n, but keep in mind that doubling n will prolong the runtime about eight times, while increasing n by half will typically make the run longer more than three times.As a bonus we show the error of the solution found by the floating-point elimination.n:=150;
A:=RandomMatrix(n,generator=-9.9..9.9):
while Rank(A)<n do A:=RandomMatrix(n,generator=-9.9..9.9) end do;
x0:=RandomVector(n,generator=-9.9..9.9):
b:=A.x0:
start:=time():Ael:=MatrixEliminate(<A|b>,output=gaussjordan):tmp:=time()-start:printf("GJM: %f\134n",tmp);
start:=time():Ael:=MatrixEliminate(<A|b>):tmp:=time()-start:printf("GEM: %f\134n",tmp);
start:=time():sol:=BackSubstitute(Ael):tmpp:=time()-start:printf("BS: %f\134n",tmpp);
printf("GEM+BS: %f",tmp+tmpp);
Norm(x0-sol,infinity);The partial pivoting is said to reduce the influence of numerical errors. However, it is hard to measure this directly, because as we change the pivoting strategy, also the resulting matrix changes and thus it makes no sense to compare matrices created by two distinct pivoting strategies. However, it is possible to compare information derived from such matrices assuming that it does not change during elimination. We would prefer something simple that would not be another source of serious error, which brings us to a determinant. For a reduced matrix we simply multiply terms on the diagonal, and if we take it in absolute value, then the usual row operations do not influence it. We can therefore trace the influence of numerical errors on elimination using determinant rather well.The following code will generate an integer-valued matrix and then finds the exact value of determinant by fraction-free (with integers, i.e. precise) elimination. We should keep in mind that integer-valued elimination is significantly more time demanding than its floating point version, in the best case it is said to require asymptotically n 4 operations, which is one order more compared to the usual Gaussian elimination. I therefore used the official Maple procedure for determinant which is implemented very efficiently, but it still runs significantly longer compared to my homemade procedure for elimination.Then we reduce the matrix twice using floating-point elimination, once with minimal pivoting (we pivot only when forced to, and grab the first non-zero candidate then) and once with partial pivoting, we time the runs just our of curiosity. Then the main calculation comes. We determine the determinant in absolute value for each of the reduced matrices and find how they differ from the correct value, and finally we divide the two errors. This last number therefore tells us how much larger the error is when we do not use partial pivoting. We can expect (especially for larger matrices) that this number will be larger than one, as partial pivoting rarely makes the error worse. The right question is how large this ratio can get. It is advisable to run the code repeatedly to see typical outcomes. We should recall that when one error is ten times larger, then it means that we lost one digit of precision, with ratio being 100 we lost two digits of precision and so on.If the code runs reasonably fast on your computer, you can try to increase n (but carefuly).An appendix for curious students: My procedure knows how to do fraction-free elimination as well, so we can see a direct comparison of computational complexity with floating-point elimination. On my computer, these were the typical times:
n :2550100150200time for partial pivoting:0.02s0.1s0.85s3s7stime for fraction-free elimination:0.03s0.3s4.5s35s280s
It is interesting to compare columns where n doubles (50 versus 25, 100 versus 50, 200 versus 100). Cubic growth means that the time should increase approximately eight-times, which seems to fit floating point elimination quite well. The times for fraction-free elimination obviously grow faster that a cubic power.n:=150:
A:=RandomMatrix(n,generator=-9..9):
while Rank(A)<n do A:=RandomMatrix(n,generator=-9..9) end do:
tstart:=time():det:=abs(Determinant(A,method=fracfree)):Tn:=time()-tstart;
A:=evalf(A):
tstart:=time():Ael:=MatrixEliminate(A,pivoting=minimal):Tn:=time()-tstart;
err:=abs((abs(mul(Ael[k,k],k=1..n))-abs(det))/det):
tstart:=time():Ael:=MatrixEliminate(A,pivoting=partial):Tn:=time()-tstart;
err/abs((abs(mul(Ael[k,k],k=1..n))-det)/det);It is natural to inquire how partial pivoting affects reliability of solutions for systems of linear equations. Fortunately, we can work with a precise solution without the need for fraction-free elimination, we simply first generate an integer-valued solution and then use it to create the corresponding right-hand side. The following code again compares the error without partial pivoting and with it, this time for solving a random system of equations. Just like before, it is a good idea to run this code repeatedly to get some feeling for a typical impact of partial pivoting. If you have a fast computer, increase n.n:=200:
A:=RandomMatrix(n,generator=-9..9):
while Rank(A)<n do A:=RandomMatrix(n,generator=-9..9) end do;
x0:=RandomVector(n,generator=-9..9):
b:=A.x0:A:=evalf(A):b:=evalf(b):
Ael:=MatrixEliminate(<A|b>,pivoting=minimal):
solNP:=BackSubstitute(Ael):
Ael:=MatrixEliminate(<A|b>,pivoting=partial):
solP:=BackSubstitute(Ael):
Norm(x0-solNP,infinity)/Norm(x0-solP,infinity);