WARNING Changes are currently being made. (as of Mar 11/10)
The following file can be tried
runtrs.m which uses:
[xopt,fopt,lopt,lambdamin,flag] = newtrust4b(a,s,dgaptol)
Here is a tarfile with the
matlab files
%
%Call: [xopt,fopt,lopt,lambdamin,muup,mulow,iter,flag] = ...
% newtrust(a,s,dgaptol);
%% files needed: Afun.m,Dfun.m, and this file newtrust.m
%%% option file conj_grad.m, see commented out part below
%%
% BEFORE USING NEWTRUST, THE FOLLOWING INSTRUCTION MUST BE ENTERED
% > global A D counter
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%Trust Region Subroutine Algorithm Trust Region Subroutine Algorithm
Algorithm with Links to Documentation
Charles Fortin and Henry Wolkowicz
Dept. Comb. & Opt., Univ. of Waterloo
%
%
%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% solves: min q(x) = x'Ax - 2a'x
% such that s^2-x'x >= 0 (Lagr. mult. lopt <=0)
%
%% which is the trust region subproblem. This can be used within
%% a trust region algorithm for min f(x).
%
% The algorithm returns an approx .optimum xopt and corresp. value qopt so that:
%
% norm(xopt) <= (1+dgaptol)s
%
% and two constants so that
%
% mulow <= qopt <= muup, mulow <= fopt <= muup, AND
% |qopt - fopt| <= |muup-mulow| <= |2*dgaptol*muup| <= |2*dgaptol*fopt|
% So that qopt >= (1-2*dgaptol)*fopt
%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%
%
%%%%%%%%%%%%%%%%%%%%%%%%%%%GLOBAL VARIABLES%%%%%%%%%%%%%%%%%%%%%%%
%
global A D counter
%
%
%%%%%%%%%%%%%%%%%%%%%% TOLERANCES%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Back to Top of algorithm page.
% Tolerances - these can and should be changed by the user.
%
%dgaptol % duality gap for stopping criteria (input)
intol= dgaptol;
normtol =intol; % allowable error in feasibility for stopping
n = size( A,1); % problem size
zerotol=intol/log(n^10); % tolerance for zero for y(1) for hard case
% since y is norm 1, tolerance decreases with dim.
kzerotol=intol; % tolerance for zero for k(t)
hzerotol= 1e-4; % hard case detection ???
iterbnd=30; % upper bound on iterations
%%%%%%%%%%%%%%%%%%%%%%%%%%%END TOLERANCES%%%%%%%%%%%%
%
%
%%%%%%%%%%%%%%%%%%%%%%%%%%%INITIALIZATION%%%%%%%%%%%%%%
% Back to Top of algorithm page.
t0=cputime; % saved to get total cputime usage
counter = 0; % counts matrix mults
a=full(a); % set to full to avoid conflicts
na=norm(a); % vector norm
if na < zerotol,
disp('WARNING: linear part -a- is approximately 0');
end
%%%First eigenvalue calculation
OPTIONS.tol= 1e-13; % convergence tolerance
OPTIONS.issym = 1; % because we know the matrices A and D are symmetric
OPTIONS.disp = 0; % no display of the output for eigs.m
v=full(sum(A))';v=-v/norm(v); % good/cheap normalized estimate ???
%i.e. largest eig of -A using v=-Ae, e=ones
OPTIONS.v0 =v ; % initial eigenvector estimate for sparse A
tarn10=cputime; % keep for the first eig calc time
[va,lambda,flageigs]=eigs('Afun',n,1,'SR',OPTIONS); % computes smallest
tarn1f=cputime-tarn10; % save initial eig calc cputime for statistics
disp(['elapsed time for first eig calc: ',num2str(tarn1f)]);
if (flageigs>0),
disp('WARNING INITIAL EIGENVALUE CALCULATION BAD');
disp(['condition est. of eigs: ',num2str(condest(A))]);
end
%eigenvalue (lambda) and corresponding eigenvector (va) for A
avnorm=norm(A*va-lambda*va);
if avnorm > zerotol*(abs(lambda)+1), % ???zerotol???check???
disp(['USING eig, output from eigs norm error ',num2str(avnorm)]);
[V,Lam]=eig(full(A));
lambda=min(diag(Lam));
indx=find(lambda==diag(Lam) );va=V(:,indx);va=va/norm(va);
avnorm=norm(A*va-lambda*va);
disp(['USING eig new norm error is ',num2str(avnorm)]);
end
lambdamin = lambda; % save the min eigenvalue of A
vaa=va'*a;
% ensure va'*a is not negative
if (vaa < 0)
va = -va;vaa=-vaa;
end
y=[0;va];y=y/norm(y); % initial normalized estimate eigenv. D(t^*)
flag = 0; %flag will change value if the almost hard case (case 2) occurs
alow=lambda;aup=lambda;
% lower estimate on mu*
if lambda < 0
mulow = alow*s^2-2*na*s; % lower estimate for optimal value
else
% mulow = -2*na*s; % replaced by min on boundary or interior
% since interior opt value is a'invAa-2a'invAa=-a'invAa
mulow = min(alow*s^2-2*na*s,-na*s);
end
% upper estimate on mu*
if lambda > 0
if (( (vaa)/(lambda ) <= s)), % optimum may be on boundary or interior
testmu = s^2*lambda - 2*s*na;
muup = -(vaa)^2/lambda;
if muup < testmu, % apply conj grad - opt interior
a=sparse(a);
%xopt = conj_grad((vaa/lambda)*va,a);
% apply conjugate gradient method to find the optimum.
xopt=A\a; % use matlab function for now or uncomment conj_grad
fopt = -a'*xopt; % optimal value since A xopt = a
lopt = 0;
topt = 99999;
muup=99999; mulow=99999;iter=0;
gap=[topt topt
lopt lopt
topt topt
];
t0f=cputime-t0;
flag = 2; % To know answer is the unconstrained minimum
disp(['elapsed time: ', num2str(t0f)]);
disp(['Problem solved using conjugate gradients ']);
return; %OPTIMALITY - PROBLEM IS SOLVED
end
bestxf = ((vaa)/lambda)*va; % vaa is >=0 now
else
muup = s^2*lambda - 2*s*(vaa); % <=0 by above if
bestxf = s*va;
end
else
[muup,indice] = min([0,lambda*s^2-2*s*(vaa)]);
if indice == 1
bestxf = zeros(size(a,1),1);
else
bestxf = s*va;
end
end
bestfxf=muup; % current best opt value corresp to bestxf
% bestxf is feasible (possibly interior)
mubnds=[mulow muup]; % keep history of bounds - latest at top
% This is only for debugging help.
gap=[mulow muup]; % duality gap - gets updated when a bound changes
if (lambda > 0)
low = min(alow - na/s,-na*s); %lower bound on t
else
low = alow - na/s;
end
high = aup + na*s; % upper bound on t
newlow=low;newhigh=high; %new points found by vertical cuts
vertflag=0; % to prevent 2 vertical cuts in a row
ss1=s^2+1;
D = [ 0 -a'
-a A]; % augmented matrix
ystar = 1 / sqrt( s*s +1); % this is value we are after for y0(t)
iter=0; % iteration count
b6 = ' '; % define 6 blanks
% ptgood, ptbad variables: used for saving interpolation data on the
% good (easy) and bad (hard) side of maximum of k(t).
% gy,by saves last y vector on good and bad side
% The latest data is put into row 1.
% Only two rows are needed. The rest is saved for debugging help.
% t, y(1), lambda, valid-data-indicator ; is data saved.
ptbad=[];
ptgood=[];
gy=[];by=[]; % The latest vectors y from the good and bad side resp.
side=0; % indicator for consecutive t found
% -i for i bad sides and +i for i good sides
tslopes=[]; % Save slopes of kt on the [bad good] sides - for
% debugging and for case where kt is almost piecewise linear.
kts=[]; % saving kt bounds and t value - for debugging
t=99999;indexg=0;indexb=0; % initialize in case of 0 iterations
tind=[]; % follow trail of t changes - for debugging
%
disp('iter ----- t ---- rel dual gap -- rel. primal opt cond');
%txt=[int2str(iter),b6,num2str(t),b6,num2str(dgap),b6,num2str(dbestgap)];
%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%NOTES on ALGORITHM%%%%%%%%%%%%%%
% Following are the functions of interest:
% h(lambda) = s^2lambda -a'(A-lambda I)^{-1} a; maximize - concave
% hp(lambda) = s^2 -a'(A-lambda I)^{-2} a;solve = 0 - concave, nonincreasing
% phi(lambda) = 1/s-1/||(A-lambda I)^{-1} a||;solve = 0
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% k(t) = (s^2+1)lambda -t maximize - concave
% k'(t) = (s^2+1)y0^2(t) -1 solve = 0 - same sign as f(t)
% psi(t) = -1/y0(t) + sqrt(s^2+1) solve = 0 - concave, nonincreasing
% y0(t) = first component of y normalized - nonincreasing, 1to0
% lambda=lambda(t) concave function
%
% The algorithm is based on maximizing k(t).
% We interpolate on psi(t) whenever possible.
%
% The general iteration interpolates psi(t) whenever
% possible. Duality is used to calculate bounds, i.e. the function k(t) is
% used when problems with the hard case occur.
% Initialize the stopping criteria
t1=abs( 1-y(1)^2*(1+s^2))>( (2*normtol+normtol^2) *y(1)^2*s^2 );
dbestgap= (bestfxf-mulow)/(abs(bestfxf)+1);
t2= dbestgap > 2*dgaptol;
dgap= abs(muup-mulow)/(abs(muup)+1) ;
t3= (muup-mulow) >= -2*dgaptol*muup;
%t3= dgap > dgaptol; % Duality gap criteria - old way
t4=iter <=iterbnd; % iteration bound
t5=abs(high-low)/(abs(high)+1) > zerotol; %interval of uncertainty for t
%%%%%%%%%%%%%%%%%%%END INITIALIZATION%%%%%%%%%%%%%
%%
%
%%%%%%%%%%%%%%%%%%%%%%%WHILE LOOP%%%%%%%%%%%
% %%%%%%%%%%%%%%%%%%%%%Back to Top of algorithm page.,
% PART 1. Find New Point t,
% Vertical Cut,
% Triangle Interpolation,
% Good Side Interpol.,
% Bad Side Interpol.,
% PART 2. Updating Begins.,
% Lambda has wrong sign,
% Bad Side Update,
% Good Side Update,
% PART 3.Tolerance Updates, End of While Loop, Start of Output,
%
%% check feasibility and iteration count and duality gap
while ( (t1&t2) | t3) & (t4 & t5), %
% start of while
iter=iter+1; % one more iteration is done
indexg=size(ptgood,1); % number of points on the good side
indexb=size(ptbad,1); % number of points on the bad side
%
%%%%%%%%% FIND a new point t.
newlow=max(newlow,low);newhigh=min(newhigh,high);
if newlow>low,
t=newlow;
tind=[tind -2]; % indicator for t value - for debugging
elseif newhigh < high,
t=newhigh;
tind=[tind -1];
else
t=(max(newlow,low)+min(newhigh,high))/2; % midpoint default
tind=[tind 1];
end
if indexb > 0 & indexg > 0, %
% The last point on right and left that has eigenvalue data
% always do triangle interpol and vertical cut if possible
ktl=ss1*ptbad(1,3)-ptbad(1,1);
kth=ss1*ptgood(1,3)-ptgood(1,1);
tlslope=ss1*ptbad(1,2)^2-1;
thslope=ss1*ptgood(1,2)^2-1;
%
%
%%%%%%%%%%%%%%%%%%%%%%%TRIANGLE INTERPOLATION%%%%%%%%%%%
% Back to Top of algorithm page.
% Always find interpolant using triangle-with associated upper bound.
tslopes=[tlslope thslope
tslopes]; % for debugging purposes - possible use
% when both sides are almost linear
mrhs=[ (tlslope*(-ptbad(1,1))+ktl)
(thslope*(-ptgood(1,1))+kth) ];
% rhs of interpolation
ktt=[ 1 (-tlslope)
1 (-thslope)];
kttans=ktt\mrhs; % find intersection of the two tangent lines at
% the last bad and good side points. The point of
% intersection is (kttans(2),kttans(1)). Hence, kttans(1)
% is an upper bound on mu^* and kttans(2) is an approximation
% for t^*
ktup=kttans(1);
tt=kttans(2); % This is alternate interpol point using triangle
%%%if ptgood(1,2) < hzerotol, % modify if hard case - test this????
%%% tt=(.2/abs(side))*tt+(1-.2/abs(side) )*newhigh; % favour good side
%%%end
if (ktup <= muup) & (ktup >= mulow),
muup=ktup; % new upper bound on optimal value
end
if max(newlow,low)<=tt & tt <= min(newhigh,high);
t=tt;
tind=[tind 2];
end
% %%%%%%%%%%%%%%%%%%%%End of triangle for new interpolant
%
%
%%%%%%%%%%%%%%%%%%%%%%%VERTICAL CUT%%%%%%%%%%%
% Back to Top of algorithm page.
%
if ptgood(1,2) > hzerotol, % avoided hard case here - try
%%% smaller values for hzerotol!!!
if vertflag== 1, %
else %
vertflag= 1; % prevents 2 vertical cuts in a row
%% errors can occur if eigenvalue not calculated properly
%% or in hard case - here and below!!!
if kth > (1+kzerotol)*ktl; % opt cond for k(t) - check???
tempnewlow=low + (kth-ktl)/tlslope;
if newlow <= tempnewlow & tempnewlow <= newhigh, %
newlow=tempnewlow;
t=newlow;
tind=[tind -2];
else
disp('WARNING newlow not in interval ?????')
%%????this is not explained satisf. why??? roundoff
%%%that important???
newlow=low; %
end %
elseif ktl > (1+kzerotol)*kth; %opt cond for k(t)-check?
tempnewhigh=high + (ktl-kth)/thslope;
if newlow <= tempnewhigh & tempnewhigh <= newhigh, %
newhigh=tempnewhigh;
t=newhigh; %
tind=[tind -1];
else %
disp('WARNING newhigh not in interval ?????')
end %
else %
disp('two values of k up to tolerance found');
% arrange proper output still????RETRN!!!
end %
end %end vertical flag check
end % end of hard case check above vert flag - temp
%%%%%%%%%%%%%%%%%%%%%%%end vertical cut%%%%%%%%%%%
end % end of if indexb > 0 and indexg > 0
%%%%%%%%%%%%%%%%%%%%%%%%%
%
if side > 0, % last iterate was on the GOOD SIDE
if sum(ptgood(:,4)) >= 2, %two t-iterates with data good side
tint=ptgood(1:2,1);
yint=ptgood(1:2,2);
f1=-1/yint(1) + 1/ystar;
f2=-1/yint(2) + 1/ystar;
slope=(tint(1)-tint(2))/(f1-f2);
tt=tint(1) - f1* slope; % linear inverse interpolation
if newlow <=tt&tt<=newhigh, % roundoff error problem check
%% We now decide whether to chose the new point
t=tt;
tind=[tind 3]; % indicator for t value - for debugging
end
if side > 4&(min(ptgood(1:2,2)) < (10^(side-1)*zerotol) ), %
%too many iterates on good side and it is near bad case
t=.95*newlow+.05*newhigh;
%????use weights from k' if indexb>0?????
tind=[tind 4]; % indicator for t value - for debugging
end %
elseif sum(ptgood(:,4)) >= 1 & indexb>0 >= 1,
if ptgood(1,2)>zerotol, % not near hard case
% interpolate with one point from each side if not hard case
tintg=ptgood(1,1);
tintb=ptbad(1,1);
psig=-1/ptgood(1,2)+sqrt(ss1);
psib=-1/ptbad(1,2)+sqrt(ss1);
tt=(tintg*psib-tintb*psig)/(psib-psig);
if newlow <= tt & tt <= newhigh,
t=tt;
tind=[tind 31];
end
else
%use spline on k(t) - to do still
tind=[tind 32];
end
end % end of if sum(ptgood) >= 2 else >= 1
%%%%%%%%%%%%%%%%%%%%%%%%%
%
elseif side < 0, % last iterate was on the BAD SIDE
if indexg > 0 & ptgood(1,2) > hzerotol, % not near hard case
% this interpolation makes no sense if near hard case
% interpolate with one point from each side if not hard case
tintg=ptgood(1,1);
tintb=ptbad(1,1);
psig=-1/ptgood(1,2)+sqrt(ss1);
psib=-1/ptbad(1,2)+sqrt(ss1);
tt=(tintg*psib-tintb*psig)/(psib-psig);
if newlow <= tt & tt <= newhigh,
t=tt;
tind=[tind 31];
end
%In case near hard case above !!!!
% %use spline on k(t) - to do still
% tind=[tind 32];
elseif sum(ptbad(:,4)) >= 2,
%INVERSE INTERPOLATION
% t(psi) inverse function of psi(t), then we linearly interpolate
% value t(0), which is t^*, using two points from the bad side.
tint=ptbad(1:2,1);
yint=ptbad(1:2,2);
f1=-1/yint(1) + 1/ystar;
f2=-1/yint(2) + 1/ystar;
% If f1=f2, then t value repeated by e.g. t=low in triangle.
% This indicates that the optimal t is very close to t=low.
if f2>f1, % correct monotonicity occurred
slope=(tint(1)-tint(2))/(f1-f2);
tt=tint(1) - f1* slope; % linear inverse interpolation
if newlow <= tt&tt<(newhigh-zerotol),
% Use the new point - it is not too large
t=tt;
tind=[tind 5]; % indicator for t value - for debugging
elseif side < -4, % need a point on good side desperately
% was following two lines - need to speed this up using a
% CONCAVE spline for k(t) in hard case!!!!
t=.95*high + .05*low;
tind=[tind 7]; % indicator for t value - for debugging
end
end %
end % end for sum(ptbad) >= 2 and >= 1
end % end for side check
disp(['side: ', num2str(side)]);
disp(['trail for t: ', num2str(tind)]);
%
%%%%%%%%%%%UPDATES%%%%%%%%%%%
% A new point t has been found. Start updates.
%
% Recall t is the most recent update for t^*
D( 1, 1) = t;
tarn20=cputime;
OPTIONS.tol= 1e-13; % convergence tolerance
if indexb>0 & indexg>0,
if tind(length(tind))==3, % finished interpol on good side
OPTIONS.v0 = gy;
elseif (t-low) <= (high-t)
OPTIONS.v0 = by;
else
OPTIONS.v0 = gy;
end
elseif indexb>0,
OPTIONS.v0 = by;
elseif indexg>0,
OPTIONS.v0 = gy;
else
OPTIONS.v0 = y;
end
[y,lambda,flageigs] = eigs('Dfun',n+1,1,'SR',OPTIONS);
tarn2f=cputime-tarn20; % save initial Lanczos call cputime for statistics
disp(['elapsed time for second eig calc: ',num2str(tarn2f)]);
if (flageigs>0),
disp('flag WARNING from eigs EIGENVALUE CALCULATION BAD');
disp(['condition est. of eigs: ',num2str(condest(D))]);
end
% y and lambda are the new estimates
% for the eigenvector of D(t^*) and for lambda^*
dynorm=norm(D*y-lambda*y);
if dynorm > zerotol*(abs(lambda)+1), % ???zerotol???check???
disp(['USING eig, output from eigs norm error ',num2str(dynorm)]);
[V,Lam]=eig(full(D));
lambda=min(diag(Lam));
indx=find(lambda==diag(Lam) );y=V(:,indx);y=y/norm(y);
dynorm=norm(D*y-lambda*y);
disp(['USING eig new norm error is ',num2str(dynorm)]);
end
if y(1)<0, % ensure the sign of the first component is positive
y=-y;
end
%
% multiplier sign check - to update lower bounds
if lambda > 0, % A is known to be pos def - multiplier has wrong sign
if y(1) >= ystar,
% means y(2:n+1)/y(1) is the optimum solution for all points outside
% trust region of radius (1-y(1)^2)/y(1)^2 or on its boundary. If this
% radius is less than s^2, as is case here since this is equivalent to
% y(1) >= ystar, then the optimum we are looking for is inside
% the trust region and A is positive semidefinite.
% We will then apply a conjugate
% gradient method to find the optimum.
% Hence, y(2:n+1)/y(1) is feasible so optimal with lambda=0.
a=sparse(a);
%xopt = conj_grad(y(2:n+1)/y(1),a);
% % apply conjugate gradient method to find the optimum.
xopt=A\a; % use the matlab function for now or uncomment conj_grad
gap = -a'*xopt; % optimal value since A xopt = a
fopt=gap;
lopt = 0;
topt = 99999;
gap=[topt topt
lopt lopt
topt topt
];
t0f=cputime-t0;
flag = 2; % To know answer is the unconstrained minimum
disp(['elapsed time: ', num2str(t0f)]);
disp(['Problem solved using conjugate gradients ']);
return; %OPTIMALITY - PROBLEM IS SOLVED
end
else % the sign of the multiplier is correct
% so apply duality theory
kt=ss1*lambda-t; % value of k(t), dual functional. This gives
% a lower bound on mu^*
kts=[t lambda kt
kts];
if (kt > mulow)&(kt <= muup),
mulow=max(mulow,kt); % new lower bound on optimal value
end
%
end % end of if lambda > 0 loop
%% end multiplier sign check
%
%
%%%%%%%%%%%%%Bad side update
if y(1) >= ystar, % bad side
%%%%???? when y(1) very small and ystar very small, then error can
%%% occur about knowing which side - roundoff problem can occur
%%% here???? analysis needed.
if t==newhigh; %% roundoff error problem here ????why???
%%%newhigh was on wrong side
newhigh=high;
end
low = t; % new lower bound on t^*
ptbad=[t y(1) lambda 1
ptbad]; % Keep points for first interpolation.
side=min(-1,side-1); % consecutive bad sides
by=y;
%%
%%
%%%%%%%%%%%%%
%% Begin bound for PSDP from bad side - approx. negative curvature
% direction and move to the boundary.
if indexg>0,
if min(ptgood(:,3)) <= 0, % multiplier has correct sign on good side
normx2=(1-y(1)^2)/(y(1)^2); % square of norm of current soln.
xf=y(2:(n+1))/y(1); % current soln. inside trust region
% In the hard case (case 2), we use MoSo formula page 558 - however
% with negative eigenvalue and eigenvector, to move to take a step to
% the boundary. In the easy case and hard case (case 1), we take a step
% to the boundary by taking the solution between the line joining the
% xf and the x-point - gy(2:n+1)/gy(1,1) - outside the trust region
% associate with the last good side t-point.
if (gy(1) < zerotol)
% This is the almost hard case (case 2)
z = gy(2:n+1);
normz=norm(z); % normz should be almost 1
else
% This is the easy case or almost hard case (case 1)
z = gy(2:n+1)/gy(1,1)-by(2:n+1)/by(1,1);
normz=norm(z);
end
xyprod=(z'*xf)/normz; % normalized inner product
ndiff=s^2-normx2;
sxy=sign(xyprod);
if sxy==0,
sxy= 1;
end
%%%??????check whether this is really the correct direction that
%%% keeps the optimality conditions - seems perhaps not???
%% or perhaps because of roundoff??? z should be in null space
%%% of gradient of Lagrangian ????? or because it is on good side???
tau=ndiff/(xyprod+sxy*sqrt(xyprod^2+ndiff) );
xnew=xf+(tau/normz)*z; % xnew is the point obtained by moving to the
% boundary
fxnew=xnew'*A*xnew-2*xnew'*a; %should improve at point xf
% get new upper bound using the feasible xnew - need to allow almost
% best xnew as the bestxf
if fxnew < bestfxf,
bestxf=xnew;
bestfxf=fxnew;
end
if (fxnew < muup) & (fxnew >=mulow),
muup=fxnew;
end
end % end if <= 0 line mult. check
end %end of if indexg line
%%%%%%%%%% end of PSDP bound
%
%
%%%%%%%%%%%%%Good side update
%%%%%%%%%%%%%%%%%%%
else % current point is on good side
if t==newlow; % roundoff error problem correction - but
newlow=low; why did it occur????
end
high = t; % we have new upper bound on t
ptgood=[t y(1) lambda 1
ptgood ] ;
side=max(1,side+1); % consecutive good sides
gy=y;
if ptgood(1,3) <= 0, % current multiplier has correct sign
if indexb > 0,
%% begin type 1 bound for PSDP from the good side
normx2=(1-by(1)^2)/(by(1)^2); % use last bad side point
xf=by(2:(n+1))/by(1);
% In the hard case (case 2), we use MoSo formula page 558 - however
% with negative eigenvalue and eigenvector, to move to take a step to
% the boundary. In the easy case and hard case (case 1), we take a step
% to the boundary by taking the solution between the line joining the
% xf and the x-point - gy(2:n+1)/gy(1,1) - outside the trust region
% associate with the last good side t-point.
if (gy(1) < zerotol)
% This is the almost hard case (case 2)
z = gy(2:n+1);
normz=norm(z); %normz
else
% This is the easy case or the almost hard case (case1)
z = gy(2:n+1)/gy(1,1)-by(2:n+1)/by(1,1);
normz=norm(z);
end
xyprod=(z'*xf)/normz; % normalized inner product
ndiff=s^2-normx2;
sxy=sign(xyprod);
if sxy==0,
sxy= 1;
end
tau=ndiff/(xyprod+sxy*sqrt(xyprod^2+ndiff) );
xnew=xf+(tau/normz)*z; %
fxnew=xnew'*A*xnew-2*xnew'*a;
% get new upper bound using the feasible xnew - need to allow almost
% best xnew as the bestxf
if fxnew < bestfxf,
bestxf=xnew;
bestfxf=fxnew;
end
if (fxnew < muup) & (fxnew >=mulow),
muup=fxnew;
end
else % else of indexb>0 line
% Need upper bound as well in case triangle was not used.
%
%% begin type 2 bound for PSDP
xf=y(2:(n+1)); % norm is sqrt of 1-y_0^2
xnew=(s/sqrt(1-y(1)^2))*xf;
% xnew is the point obtained by moving to the boundary
fxnew=xnew'*A*xnew-2*xnew'*a;
% get new upper bound using the feasible xnew
if (fxnew < muup) & (fxnew >=mulow),
bestxf=xnew;
bestfxf=fxnew;
muup=fxnew;
end
%% reached end type 2 bound for PSDP
end % ind of if indexb > 0 line
end % end if <= 0 line - multiplier sign check
end % end of if bad side
%%%%%%%%%%%%
%% save new bounds and gap
if mulow > mubnds(1,1) | muup < mubnds(1,2),
mubnds= [ mulow muup
mubnds ];
gap=[mulow muup]; % duality gap - gets updated when a bound changes
end
%
% update tolerances
t1=abs( 1-y(1)^2*(1+s^2))>( (2*normtol+normtol^2) *y(1)^2*s^2 );
% use y(1)=ystar at optimum
%t1=abs( 1-y(1)^2*(1+s^2))>(normtol*y(1)^2*s^2 ); % old Feas criteria
dbestgap= (bestfxf-mulow)/(abs(bestfxf)+1);
t2= dbestgap > 2*dgaptol; %??????why 2???change: rel dual opt cond
% if t1 feasibility fails, but bestfxf works, then feasibility is fine
if muup==0,
muupt=muup+1;
else
muupt=muup;
end
reldgap= abs(muup-mulow)/(abs(muupt)) ;
t3= (muup-mulow) >= -2*dgaptol*muup;
%t3= dgap > dgaptol; % Duality gap criteria old way
% note: abs(fopt-qopt)/abs(fopt) <= gap/abs(muup) is approx dgap
t4=iter <=iterbnd; % iteration bound criteria
t5=abs(high-low)/(abs(high)+1) > zerotol; % gap for t^* criteria
txt=[int2str(iter),b6,num2str(t),b6,num2str(reldgap),b6,num2str(dbestgap)];
disp( txt);
%
end of while loop
end % end of while for gap and iteration check
% %%%%%%%%%%%%%%%%%%%%%Back to Top of algorithm page.,
%
%%%%%%%%%%%%%%%%%%%%%%%%%END GENERAL ITERATION%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%OUTPUT DATA%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
fopt=(muup+mulow)/2;
if ~t2, % bestxf is almost feasible
xopt=bestxf;
fopt=bestfxf;
else
xopt = y(2:n+1)/y(1);
end
topt=t;lopt=lambda;
if indexg ==0,
ptgood=[99999 99999 99999 0];
end
if indexb ==0,
ptbad=[-99999 -99999 -99999 0];
end
t0f=cputime-t0;
gap=[gap % lower and upper bound for mu^*
ptbad(1,3) ptgood(1,3) % lower and upper bound for lambda^*
low high % lower and upper bound for t^*
tarn1f t0f ]; % tarn1f is elapsed time for first eigs call
% t0f is total computation time
disp(['elapsed time: ', num2str(t0f)]); % t0f is total computation time
disp(['number of matrix-vector multiplications: ', num2str(counter)])
disp(['number of iterations: ', num2str(iter)])
relopt=-norm( (A-lopt*eye(size(A,1)))*xopt-a)/fopt;
%rel opt cond. norm of grad of Lagrangian / value of Lagrang. at opt.
format long e
disp(['relative opt. condition: ', num2str(relopt)])
disp(['interval of optimal value: [ ',num2str(mulow),' to ',...
num2str(muup),' ]']);
disp('final results for ');
disp('iter ----- t ---- rel dual gap -- rel. dual opt cond');
txt=[int2str(iter),b6,num2str(t),b6,num2str(reldgap),b6,num2str(dbestgap)];
disp( txt);
if (abs(lambdamin - lopt)/(abs(lambdamin)+1) < 1e-2)
flag = 1;
end
% Top of algorithm page.
This directory on orion