\documentstyle[11pt]{article}
\newtheorem{defi}{Definition}[section]
\newtheorem{example}{Example}[section]
\newtheorem{prop}{Proposition}[section]
\newtheorem{lem}{Lemma}[section]
\newtheorem{thm}{Theorem}[section]
\newtheorem{cor}{Corollary}[section]
\newtheorem{rem}{Remark}[section] 
\newtheorem{conj}{Conjecture}[section]
\newtheorem{alg}{Algorithm}[section]
\newtheorem{ex}{Exercise}[section]
\newcounter{count}
\newcommand{\sn}{{\cal S}_n }
\newcommand{\hn}{{\cal H}_n }
\newcommand{\p}{{\cal P} }
\newcommand{\g}{{\cal G} }
\newcommand{\kvec}{{\rm vec\,}}
\newcommand{\adj}{{\rm adj\,}}
\newcommand{\trace}{{\rm trace\,}}
\newcommand{\tr}{{\rm trace\,}}
\newcommand{\diag}{{\rm diag\,}}
\newcommand{\Diag}{{\rm Diag\,}}
\renewcommand{\theequation}{\thesection.\thecount}
\newcommand{\beq}{\addtocounter{count}{1} \begin{equation}}
\newcommand{\beqr}{\addtocounter{count}{1} \begin{eqnarray}}
\newcommand{\addc}{\addtocounter{count}{1} }
\newcommand{\bs}{\setcounter{count}{0} \section}
\newcommand{\bpr}{{\bf Proof.} \hspace{1 em}}
\newcommand{\epr}{\\ \hspace*{4.5in}  $\Box$}
\begin{document}
\bibliographystyle{plain}
\title{AN INTERIOR-POINT METHOD FOR
APPROXIMATE POSITIVE SEMIDEFINITE COMPLETIONS 
\footnote{This report (related software) is available by anonymous 
ftp at orion.uwaterloo.ca in the directory pub/henry/reports
(henry/software/completions.d); or over WWW with URL
 ftp://orion.uwaterloo.ca/pub/henry/reports/ABSTRACTS.html 
(henry/software/readme.html).}}
\author{
Charles R. Johnson and Brenda Kroschel
                \thanks{
College of William \& Mary,
Department of Mathematics,
Williamsburg, Virginia 23187-8795.
E-mail: crjohnso@cs.wm.edu and kroschel@cs.wm.edu.
}
        \and
        Henry Wolkowicz
                \thanks{
          University of Waterloo,
          Department of Combinatorics and Optimization,
          Waterloo, Ontario N2L 3G1, Canada.
E-mail: henry@orion.uwaterloo.ca, URL: http://orion.uwaterloo.ca/\~{ }hwolkowi.
Research support by the National Science and Engineering Research Council
Canada.
                        }
        }
%\today %\date{May 1993}
\date{}
\begin{titlepage}
\maketitle
\thispagestyle{empty}
\begin{center}
\today 
~\\
University of Waterloo\\
CORR Report 95-11 \\
\end{center}
\begin{abstract}
Given a nonnegative, symmetric, matrix of weights, $H$, we study 
the problem of finding an Hermitian, positive semidefinite 
matrix which is closest to a given Hermitian matrix, $A,$ with respect to
the weighting $H.$ This extends the
notion of exact  matrix completion problems in that, $H_{ij}=0$
corresponds to the element $A_{ij}$ being {\em free}
(unspecified), while $H_{ij}$
large in absolute value 
corresponds to the element $A_{ij}$ being {\em approximately fixed}
(specified).

We present optimality
conditions, duality theory, and two primal-dual interior-point algorithms.
Because of sparsity considerations,
the dual-step-first
algorithm is more efficient for a large number of free elements,
while the primal-step-first
algorithm is more efficient for a large number of
fixed elements.

Included are numerical tests that illustrate the efficiency 
and robustness of the algorithms.

        \vspace{2ex}
        \noindent{\em Key words: positive definite completions,
  best nonnegative approximation, semidefinite programming, primal-dual interior
point methods, complementarity problems. 
}
        \vspace{2ex}

        \noindent{\em AMS 1991 Subject Classification: 
Primary: 65K10; Secondary: 90C25,  15A45.
        }
\end{abstract}
\tableofcontents
\end{titlepage}
\pagebreak
\bs{INTRODUCTION} \label{sect:intro}
Suppose we are given a partial Hermitian matrix $A$ 
with certain elements fixed, for
which all fully specified (fixed)  principal submatrices are positive
semidefinite. The positive semidefinite completion problem consists in 
finding numbers for the unspecified (free) 
elements in order to make $A$ positive
semidefinite. This problem has many applications and 
has been extensively studied, see
\cite{GrJoSaWo:84,BaJoLo:93}
and the references therein. Much is know in terms of existence of
completions. In this paper, we generalize the completion problem in
that we allow approximate completions. Using this approach, we introduce
and test a primal-dual interior-point algorithm. This algorithm solves
both approximate completion problems and exact completion problems
and avoids the numerical difficulties that can arise from an exact
completion approach.
\subsection{Background}
Let $H=H^t \geq 0$ be a nonnegative (elementwise) symmetric
matrix of weights with positive diagonal elements
\[ H_{ii} > 0,~ \forall i,  \]
and let $A=A^*$ be
another given Hermitian matrix. Let
\[ ||A||_F = \sqrt{ \tr A^*A}, \]
denote the {\em Frobenius norm} of $A$, and
\[ f(P):=||H \circ (A-P) ||_F^2, \]
where $\circ$ denotes {\em Hadamard product}.
We consider the weighted,
best approximate completion problem
\[
(AC)
\begin{array}{ccc}
       \mu^*:=&\min &f(P) \\
 &  \mbox{~subject to~} & KP=b\\
  &  &  P \succeq 0,
    \end{array}
\]
where $K: \hn \rightarrow {\cal C}^m$ is a linear operator 
from the Hermitian matrices to
${\cal C}^m,$ and $P \succeq 0$ denotes
{\em positive semidefiniteness}.


Note that $H_{ij}=0$ means that $P_{ij}$ is {\em free}, while $H_{ij} >0$ puts
a weight on forcing the
components $P_{ij} \approx A_{ij},$ i.e. $P_{ij}$ is approximately {\em fixed}. 
If the optimal value at an
optimal $\bar{P}$ is $f(\bar{P}) = 0,$ then we have an exact completion
of $A$, where the positions corresponding to $H_{ij}=0$ are free.
The linear constraint $KP=b$ can force components of $P$ to exactly
equal the corresponding component of $A$, e.g. 
the constraint $\tr E_{ij}P=A_{ij}$,
where $E_{ij}$ is the zero matrix with a 1 in the $i,j$ position, is 
equivalent to fixing $P_{ij} = A_{ij}.$ 

Suppose that the diagonal elements $a_{ii} >0, ~\forall i,$
i.e. $\diag(A) > 0.$ Consider
\[
\max \left\{ \det (P) : P \succeq 0, ~\diag(P)=\diag(A) \right\}.
\]
Then (Hadamard's Inequality) the maximum occurs when $P^{-1},$ and
so $P,$ is diagonal. Thus, the positive definite completion matrix $P$,
with maximum determinant, of a diagonal matrix is characterized by $P^{-1}$
having zeros in the positions corresponding to the free components 
(the off diagonal components). 

Now, suppose that $2k+1$ bands are specified in $A$, i.e. $a_{ij}$ is fixed
for $|i-j| \leq k.$ And, suppose that any principal $k+1 \times k+1$
submatrix contained within the $2k+1$ bands is positive definite. Then:
\begin{thm} (\cite{DymGoh:81})
\begin{enumerate}
\item
If
\[{\cal D}_k = \left\{ B : B \succeq 0, ~b_{ij}=a_{ij}, \forall |i-j|
                \leq k \right\},
\]
 then ${\cal D}_k \neq \emptyset.$
\item
$\max \left\{ \det (B) :B \in {\cal D}_k \right\}$ occurs at the unique
matrix $B \in {\cal D}_k$ having the property that for all $|i-j| \geq
k+1$, the $(i,j)$-entry of $B^{-1}$ equals 0.
\epr
\end{enumerate}
\end{thm}
Thus, we again have an optimum positive definite completion that is
characterized by having an inverse with zeros in the positions
corresponding to the free components.

Suppose that we are given the {\em partial Hermitian} matrix $A=A^*,$
i.e.  the components $A_{ij}, ij \in {\cal E}$ are specified (fixed),
while the others are unspecified (free).
This is denoted $A=A({\cal G}),$
for the undirected graph ${\cal G} = ({\cal N,E}).$
The {\em positive semidefinite completion problem} consists in finding
components $A_{ij}, ij \notin {\cal E},$ such that the resulting matrix
$A \succeq 0,$ is positive semidefinite.
The graph $\g$ is {\em chordal} if there are no minimal cycles of 
length $\geq 4.$
A graph $\cal G$ is {\em  completable} if and only if any
${\cal G}$-partial positive definite matrix has a positive definite
completion.
\begin{thm} (\cite{GrJoSaWo:84})
$\g$ is completable if and only if $g$ is chordal.
\epr
\end{thm}
\begin{thm} (\cite{GrJoSaWo:84})
Suppose that $\diag(A)$ is fixed and
$A(\g)$ has a positive completion.
Then there is a unique such completion, $B$, with maximum
determinant; and it is characterized by $B^{-1}$ having 0 components in
positions corresponding to all $kl \notin {\cal E}.$
\epr
\end{thm}
The unique completion with maximum determinant is again characterized by
its inverse having zeros in the positions corresponding to free
components. In this paper we see that this characterization follows
through for AC with the inverse replaced by the ``dual'' matrix.

We assume that the diagonal
of $H$ is positive, for if $H_{kk}=0,$ then we can make $P_{kk}$ as
large as we like to maintain $P \succeq 0.$ 
This allows us to set $P_{kj}=A_{kj},~\forall j,$ but
may cause problems with attainment.

There are several special cases of AC:
\begin{enumerate}
\item
Suppose that $H$ has 0 valued elements corresponding to exactly the
free elements of the partial matrix $A,$ 
and, also, both $K$ and $b$ are 0. Then we have found an
exact completion, of the partial matrix $A,$ 
if and only if the optimal value $\mu^* = 0.$
\item
Suppose that $H$ has elements, sufficiently large in absolute value, in
positions corresponding to exactly the
fixed elements of the partial matrix $A,$ 
and, again, both $K$ and $b$ are 0. Then AC finds an exact semidefinite
completion of the partial matrix $A$, if one exists.
\item
In the case that $A=0$ and $H=E,$ the matrix of ones, 
the problem AC is an exact completion problem or a
best approximation problem with a semidefinite
constraint. Exact completion problems with $P \succ 0$ and which
maximize the determinant are studied in \cite{GrJoSaWo:84} and more recently in
\cite{BaJoLo:93,JohKro:95,JohTar:95}. Existence and 
optimality conditions are presented.
Numerical techniques are presented in \cite{GHJT:95}.
Best nonnegative elementwise interpolation problems are studied in 
e.g. \cite{MiSmSwWa:85,BoWo:86,BoLe1:92}. This includes optimality 
conditions as well as algorithms based on  Newton's method. 

\item
Without the linear constraint, i.e. with $K=0$ and $b=0$, 
the problem AC is a quadratic program over a cone and can be rephrased
as a complementarity problem over ${\cal P}.$ Such problems have been
studied in e.g. \cite{KoShHa:94,KoShHa:95}.
Algorithms for general semidefinite programs with linear constraints
have been recently studied in several papers, see e.g. 
\cite{int:Nesterov5,BoGhFeBa:94,HeReVaWo:93}. These authors also have
software packages, some of which are public domain, see e.g. the URL for
semidefinite programming on the WWW
ftp://orion.uwaterloo.ca/pub/henry/software/readme.html.
\end{enumerate}

\subsection{Main Results}
In this paper we present the optimality
conditions, a duality theory, and two primal-dual interior-point 
algorithms specialized for AC. One algorithm, the dual-step-first
algorithm,  is efficient for problems where $H$ is sparse, since the
dual variable $\Lambda$ is 0 corresponding to 0 in $H$, i.e.
corresponding to free elements of $A$. The
other algorithm, the primal-step-first algorithm, is efficient when $H$ has
many relatively large (in absolute value) elements, i.e. when $A,$
and so also the primal variable $P,$ has
few free elements.

We include
numerical tests on randomly generated problems with various
properties, i.e. $A \succeq 0, H \succeq 0,$ condition numbers of $A,H$,
sparsity of $H$, and the number of sufficiently large elements of $H$. 
These tests show that
the algorithms can efficiently solve such problems. 
The
algorithms were tested on thousands of problems and never failed to find
the optimal approximate completion. Thus, in the case that the matrix
$H$ contained zeros and an exact completion exists, the algorithm never
failed to find an exact completion. In addition, since interior-point
methods converge to the analytic center of the optimal face, the
algorithm finds the completion of maximal rank.
Degeneracy and ill-conditioning did
not affect the algorithm, other than change the duality gap of the
initial starting solution.

\subsection{Outline}
The paper is organized as follows. We complete this section with
preliminary notation and results. In Section \ref{sect:duality}, we
present the optimality characterizations and a dual program for AC. The
primal-dual interior-point methods are presented in Section
\ref{sect:pdalg}. The numerical tests are summarized in two tables in
Section \ref{sect:numerics}. We discuss the results in Section
\ref{sect:concl}.
\subsection{Preliminaries}
We now present some of the matrix theoretic background. More details can
be found in e.g. \cite{HoJo:85}.
We work in the space of Hermitian matrices, $\hn$, equipped with the
trace inner product, $\left<X,Y\right>=\tr X^*Y,$ where $X^*$ denotes
complex conjugate. 
The cone of positive semidefinite Hermitian matrices is denoted
$\cal P$. Each Hermitian matrix $A \in \hn$ can be decomposed (Moreau
decomposition \cite{Mor:62}) into the sum of two orthogonal, positive and
negative, semidefinite matrices
\[
A = A_{\succ} -A_{\prec},~\mbox{with}~A_{\prec}\succeq 0,A_{\succ}  \succeq 0, ~ A_{\prec}A_{\succ} =0.
\]
We use $A \otimes B$ to denote the {\em Kronecker product} of
$A$ and $B$; $\kvec (A)$ denotes
the vector formed from the columns of the matrix $A$; while $\Diag (v)$ denotes
the diagonal matrix formed from the vector $v$, and $\diag (A)$ denotes
the vector formed from the diagonal of the matrix $A$.
\subsection{Notation}
\begin{description}
\item[$\hn$]  
~~~~~~~~the space of Hermitian $n \times n$ matrices
\item[${\cal P}_n$~or~${\cal P}$]  
~~~~~~~~the cone of positive semidefinite matrices in $\hn$
\item[$Q \preceq R$]  
~~~~~~~~$R-Q$ is positive semidefinite
\item[$A_{\succ}$]  
~~~~~~~~the positive part of $A$
\item[$A_{\prec}$]  
~~~~~~~~the negative part of $A$
\item[$A^*$] 
~~~~~~~~the adjoint of the linear operator $A$
\item[$A^{\dagger}$] 
~~~~~~~~the Moore-Penrose generalized inverse of the linear operator $A$
\item[$K^+$] 
~~~~~~~~the polar cone of $K$, $K^+=\{ \phi : \left<\phi,k\right> \geq 0,~
             \forall k \in K \}$
\item[$A \circ B$]
~~~~~~~~the Hadamard product of $A$ and $B$
\item[$A^{(2)}$]
~~~~~~~~the Hadamard product of $A$ and $A$
\item[$A \otimes B$]
~~~~~~~~the Kronecker product of $A$ and $B$
\item[$\kvec(A)$]
~~~~~~~~the vector formed from the columns of the matrix $A$
\item[$\diag(A)$]
~~~~~~~~the vector formed from the diagonal of the matrix $A$
\item[$\Diag(v)$]
~~~~~~~~the diagonal matrix formed from the vector $v$
\item[$e_i$]
~~~~~~~~the $i$-th unit vector
\item[$E_{ij}$]
~~~~~~~~the $ij$ unit matrix, $E_{ij}=e_ie_j^t$
\item[$E$]
~~~~~~~~the matrix of ones
\item[AC]
~~~~~~~~the approximate completion problem
\item[DAC]
~~~~~~~~the dual of the approximate completion problem
\item[SDP]
~~~~~~~~a semidefinite program
\item[$||A||_F$]
~~~~~~~~the Frobenius norm of the matrix $A$
\item[$f(P)$]
~~~~~~~~the objective function of AC:~ $|| H \circ (A-P) ||_F^2$
\item[$L(P,y,\Lambda)$]
~~~~~~~~the Lagrangian of AC:~  $f(P)+
       \left< y,(b-KP) \right> -\tr \Lambda P$
\item[$B_{\mu}(P)$]
~~~~~~~~the log-barrier function of AC:~  $ f(P) - \mu \log \det P$
\item[$\bar{P}$]
~~~~~~~~an optimum semidefinite matrix of AC
\item[$\bar{\Lambda}$]
~~~~~~~~an optimum semidefinite matrix of DAC
\end{description}
\bs{DUALITY and OPTIMALITY}
\label{sect:duality}
\subsection{Unconstrained Case}
We first consider the special case without the linear 
equality constraint, i.e.
with only the semidefinite constraint.
The statement that all feasible directions are not descent directions
translates into the following characterization of an optimal solution
$\bar{P}$ of AC:
\beq \label{eq:optcond}
\nabla f(\bar{P}) \in ({\cal P} - \bar{P})^+,
\end{equation}
where ${\cal P}$ denotes the {\em cone of positive semidefinite
matrices} and 
\[  S^+ = \{P: \tr QP \geq 0,~ \forall Q \in S\}
\]
is the {\em polar cone} of the set $S$.
This yields the following characterization of optimality.
\begin{thm}
The matrix $\bar{P} \succeq 0$ solves (AC) if and only if
\beq \label{eq:optcharac}
\tr (H^{(2)} \circ (\bar{P}-A))(P-\bar{P}) \geq 0, ~ \forall P \succeq 0,
\end{equation}
where $H^{(2)}=H \circ H$ is the Hadamard squared term.
\end{thm}
\bpr
Note that the gradient acting on the Hermitian matrix $h$ is
\beq \label{eq:gradf}
\begin{array}{rcl}
\left< \nabla f(P),h\right>& =& 2\tr ((H \circ P)(H \circ h)
         -(H \circ A)(H \circ h))\\
         & =& 2\tr (H \circ (P-A))(H \circ h)\\
     & = & 2\tr (H \circ h)(H \circ (P-A))\\
     & = & 2\tr (h \circ H)(H \circ (P-A))\\
     & = & 2\tr h (H^{(2)} \circ (P-A)).
\end{array}
\end{equation}
Therefore the gradient of the objective function is
\beq \label{eq:grad}
\nabla f(P) = 2H^{(2)} \circ (P-A).
\end{equation}
The result follows upon replacing $h$ by the direction $P-\bar{P}$, for $P
\succeq 0,$ and applying the so-called Pshnenichnyi condition
(\ref{eq:optcond}).
\epr
\begin{cor}
If $H=E,$ the matrix of ones, then the (unique)
optimal solution of AC is $\bar{P} = A_{\succ} ,$ where $A_{\succ} $ is
the positive part of $A$.
\end{cor}
\bpr
We substitute $\bar{P} = A_{\succ} $ in (\ref{eq:optcharac}) and see that
\[ 
\tr (H^{(2)} \circ (A_{\succ} -A))(P-A_{\succ} )=
\tr (A_{\succ} -A) (P-A_{\succ} )=\tr A_{\prec} P \geq 0,
\]
where the {\em negative part} of $A$ is denoted $A_{\prec}$, and 
satisfies $A = A_{\succ}  - A_{\prec}$, $A_{\prec} \succeq
0,$ and $\tr A_{\prec} A_{\succ}  =0.$ The objective function is strictly convex in
this case, i.e. the Hessian of $f$ 
acting on $h$ is $\tr 2h H^{(2)} \circ h = 2||h||_F^2.$
Therefore the optimum is unique.
\epr

\begin{cor}
If $H \succ 0$, then the (unique) optimal 
solution $\bar{P}$ of AC is the solution of
\[
H \circ \bar{P} = (H \circ A)_{\succ}.
\]
\end{cor}
\bpr
From the conclusion and (\ref{eq:optcharac}) we get
\beq 
\begin{array}{rcl}
 \tr (H^{(2)} \circ (\bar{P}-A))(P-\bar{P}) & = & 
            \tr (H \circ \bar{P}-H \circ A) (H \circ P-H \circ \bar{P})\\
     & = & \tr (H \circ A)_{\prec}(H \circ P)\\
     & \geq & 0,~~ \forall P \succeq 0.
\end{array}
\end{equation}
This is sufficient for optimality of $\bar{P}.$ As in the previous
proof, the objective function is strictly convex and the optimum is
unique.
\epr

\subsection{General Constrained Case}
Another approach involves the dual program of AC which we now 
derive. For $\Lambda \in \hn$ and $y \in {\cal C}^m$, let
\beq \label{eq:lagr}
L(P,y,\Lambda) = f(P)  + \left<y,b-KP\right> - \tr \Lambda P
\end{equation}
denote the {\em Lagrangian} of AC.
It is easy to see that the primal program AC is equivalent to 
\beq \label{minmax}
\mu^* = \min_P \max_{\stackrel{y}{\Lambda \succeq 0}} L(P,y,\Lambda).
\end{equation}
We assume that the generalized Slater's constraint qualification,
\[ \exists ~  P \succ  0~ \mbox{with}~ KP=b~,
\]
holds  for AC. 

Note that if the linear equality constraint does not
exist, then the assumption $\diag(H)>0$ implies that Slater's
condition holds. The linear constraint can be written as $(\left<P,A_i
\right> ) = ( b_i),$ for appropriate symmetric matrices $A_i.$ Then the
adjoint operator $K^*y= \sum_i y_i A_i.$

Slater's condition implies that strong duality holds, i.e. this means
\beq \label{maxmin}
\mu^* =  \max_{\stackrel{y}{\Lambda \succeq 0}} \min_P L(P,y,\Lambda)
\end{equation}
and $\mu^*$ is attained for some $\Lambda \succeq 0,$ see e.g.
\cite{Lu:69}.
The inner minimization of the convex, in $P,$ Lagrangian 
is unconstrained and we can differentiate to get
the equivalent program
\beq \label{maxmin2}
\mu^* = \max_{\stackrel{\nabla f(P)-K^*y=\Lambda}{\Lambda \succeq 0}} 
         \min_P f(P) +\left<y,b-KP\right> - \tr \Lambda P.
\end{equation}
We can now state the dual problem.
\beq \label{eq:dualprob}
(DAC)
\begin{array}{ccc}
       \mu^*:=&\max &f(P) +\left<y,b-KP\right>- \tr \Lambda P \\
 &  \mbox{~subject to~} &  \nabla f(P)  -K^*y- \Lambda =  0\\
          &&       \Lambda \succeq 0.
    \end{array}
\end{equation}

The above pair of dual programs, AC and DAC,
provide an optimality criteria in terms
of feasibility and complementary slackness.
This provides the basis for many algorithms including primal-dual
interior point algorithms.
In particular, we see that the duality gap, in the case of
primal and dual feasibility, is given by the complementary
slackness condition:
\beq \label{eq:complslack}
 \tr P\left(2H^{(2)} \circ (P-A)-K^*y\right)  =  0,
\end{equation}
or equivalently
\[
 P\left(2H^{(2)} \circ (P-A)  -K^*y \right)=  0.
\]

\begin{thm} \label{thm:optcond}
The matrix $\bar{P}\succeq 0$ and vector-matrix
$\bar{y},\bar{\Lambda} \succeq 0$ solve AC and DAC if and only if
\[
\begin{array}{cc}
 K\bar{P}  = b & \mbox{primal feasibility}\\
 2H^{(2)} \circ (\bar{P}-A)-K^*\bar{y} 
  -\bar{\Lambda} =  0 & \mbox{dual feasibility}\\
\tr \bar{\Lambda} \bar{P} = 0 & \mbox{complementary slackness}\\
\end{array}
\]
\epr
\end{thm}

The above yields an equation for the solution of AC. This is a very
simple optimality characterization and compares to the characterizations
of best nonnegative interpolants in \cite{MiSmSwWa:85}. This illustrates
the connection between complementarity and the positive and negative
parts. This also extends the results in \cite{smw2}.

\begin{cor}
The optimal solution of AC is the solution of the two equations
\beq \label{eq:optstat}
\bar{P} - (\bar{P} - 2H^{(2)} \circ (\bar{P}-A)+K^*y)_{\succ}=0
\end{equation}
and
\beq \label{eq:optstat2}
K\left(\bar{P}- 2H^{(2)} \circ (\bar{P}-A)+K^*y\right)_{\succ} = b.
\end{equation}
\epr
\end{cor}

\begin{cor}
Suppose that $A=0$ and $H=\frac 1{\sqrt{2}}E.$ Then
the optimal solution of AC is:
\beq \label{eq:optstat3}
\bar{P} = (K^*y)_{\succ},
\end{equation}
where
\beq \label{eq:optstat4}
K\left(K^*y\right)_{\succ} = b.
\end{equation}
\epr
\end{cor}

However, we do not apply Newton's method to (\ref{eq:optstat}) but
rather to a perturbed equation which allows us to stay interior to
$\p.$

\bs{INTERIOR POINT ALGORITHMS}
\label{sect:pdalg}
We now present the interior-point algorithms for AC. We present a
primal-step-first and a dual-step-first version.  The difference in efficiency
arises from the fact that the primal
variable $P$ does not change very much if few elements of $A$ are free,
while the dual 
variable $\Lambda$ does not change very much if many elements of $A$ are
free.
 
Since we can increase the weights in $H$ to try and force an exact
completion, we restrict
ourselves to the case without the linear equality constraint.
This avoids the need for a constraint qualification and simplifies the
algorithms.

\subsection{Dual Infeasible Algorithm}
We now derive a primal-dual interior point method using the log-barrier
approach, see e.g. \cite{HeReVaWo:93}. This is an alternative way of
deriving the optimality conditions in Theorem \ref{thm:optcond}.
The log-barrier problem for AC is
\[ \min_{P \succ 0} B_{\mu}(P) := f(P) - \mu \log \det P,
\]
where $\mu \downarrow 0$. For each $\mu > 0$ we take one
Newton step toward minimizing the log-barrier function. Therefore, we
take one Newton step for solving the stationarity condition
\beq  \label{eq:stat}
\nabla B_{\mu}(P) = 2H^{(2)} \circ (P-A) - \mu P^{-1} = 0.
\end{equation}
This equation yields the perturbed complementary slackness equation:
\beq  \label{eq:pertcomplslack}
 2PH^{(2)} \circ (P-A) = \mu I,
\end{equation}
and the estimate of the barrier parameter
\beq  \label{eq:muestimate}
 \mu = \frac 2n \tr PH^{(2)} \circ (P-A).
\end{equation}

The Newton direction is dependent on which of the equations
(\ref{eq:stat}),(\ref{eq:pertcomplslack}) we choose to solve.
The equation (\ref{eq:pertcomplslack}) is shown to perform better
in the applications to solving max-cut problems in  \cite{HeReVaWo:93}.
A discussion on various choices is given in \cite{AlHaOv:95}. A general
discussion of adaptive-step methods for linear programming is given in
\cite{MiToYe:94}.

The Hessian acting on the matrix $h$ is
\beq \label{eq:hessian}
\nabla^2 B_{\mu}(P)(h) = 2H^{(2)} \circ h - \mu P^{-1}hP^{-1}.
\end{equation}
The Newton equation for the Newton step $h$ is
\beq \label{eq:newtonstep}
\nabla^2 B_{\mu}(P)(h) = 2H^{(2)} \circ h - \mu P^{-1}hP^{-1} = -\nabla
f(P) + \mu P^{-1}.
\end{equation}

We use the Kronecker product to replace this equation by an ordinary
matrix-vector equation
\beq \label{eq:newtonalg}
\begin{array}{l}
\left[
 2\Diag (\kvec (H^{(2)})   - \mu (P^{-1} \otimes P^{-1}) \right]
   \kvec(h) \\
  ~~~~~~~~~~~~~~~~~~~~~= \kvec\left( -2H^{(2)} \circ (P-A) + \mu P^{-1}\right).
\end{array}
\end{equation}

We now have a choice of several interior point methods, e.g. a primal,
dual-infeasible method or a feasible primal-dual method. If we assume
that the diagonal of $H$ is positive, then we can always start with
strict dual feasibility and so we can have a standard feasible
primal-dual method. Otherwise, there can not be a strictly feasible dual
solution. In this case we must use a different convergence criteria.


\subsection{Primal-Dual Feasible Algorithms}
We could modify the above procedure and guarantee dual feasibility under
the assumption that $\diag (H) > 0.$ For then, there exists an initial
$P \succ 0$, such that $ 2H^{(2)} \circ (P-A) \succ 0$ as well. 
We can fix $P_{ij}=A_{ij}$ throughout the algorithm, for elements
corresponding to $H_{ij}=\infty.$
We then
solve for the step $h$ and backtrack to ensure both primal and dual
strict feasibility. This yields the primal-step-first algorithm since we only
solve for the step $h$ for changes in the primal variable $P.$ We do
need to evaluate the dual variable in order to update the barrier
parameter $\mu$ using the perturbed complementarity condition.

Alternatively, we can work with dual feasibility and perturbed
complementary slackness. (We follow the approach in \cite{HeReVaWo:93}.
See also \cite{MiToYe:94}.)
We replace equations (\ref{eq:stat}) and (\ref{eq:pertcomplslack})
with the following two:
\beq \label{eq:twoeqs}
\begin{array}{cc}
 2H^{(2)} \circ (P-A) -\Lambda =  0 & \mbox{dual feasibility}\\
-P + \mu \Lambda^{-1}  = 0 & \mbox{perturbed complementary slackness}\\
\end{array}
\end{equation}

Note that the dual feasibility equation adds restrictions to the
diagonal of $P$ in an interior-point approach. Since we want $\Lambda
\succ 0,$ we cannot
assume that the diagonal of $P$ is fixed equal to the diagonal of $A$;
nor can we assume that the diagonal of $H$ is 0. This means that we
cannot fix the diagonal of $P$ nor set it completely free. However, we
can have arbitrarily large, or small, weights on the diagonal of $H.$

We apply Newton's method to solve (\ref{eq:twoeqs}).
We let $h$ denote the step for $P$ and $l$ denote the step for
$\Lambda.$ Then linearization of the second equation yields
\[
-(P+h)+ \mu \Lambda^{-1} - \mu \Lambda^{-1} l \Lambda^{-1}=0
\]
or
\beq \label{eq:substh}
P+h= \mu \Lambda^{-1} - \mu \Lambda^{-1} l \Lambda^{-1}.
\end{equation}
We get
\beq \label{eq:hexh}
h= \mu \Lambda^{-1} - \mu \Lambda^{-1} l \Lambda^{-1}-P.
\end{equation}
and
\beq \label{eq:hexl}
l= \frac 1{\mu}\left\{ 
     - \Lambda (P+h) \Lambda
 \right\}+\Lambda
\end{equation}

The linearization of the dual feasibility equation yields
\beq \label{eq:dualfeaslin}
2 H^{(2)} \circ h -l = -( 2H^{(2)} \circ (P-A) -\Lambda).
\end{equation}
We assume that we start with an initial dual feasible solution. 
However,
we include the feasibility equation on the right-hand-side
of (\ref{eq:dualfeaslin},
because roundoff error can cause loss of feasibilty.
(Since Newton directions maintain linear equations, we could 
theoretically substitute for $h$
in this  linearization with the right-hand side being 0.)

\subsubsection{Dual-Step-First}
We can eliminate the primal step $h$ and solve for the dual step $l$.
\beq \label{eq:substl}
\begin{array}{ccl}
l & = &  2 H^{(2)} \circ h + ( 2H^{(2)} \circ (P-A) -\Lambda)\\
  & = &  2 H^{(2)} \circ  (\mu \Lambda^{-1} - \mu \Lambda^{-1} 
              l \Lambda^{-1}  -P)+ ( 2H^{(2)} \circ (P-A) -\Lambda).
\end{array}
\end{equation}
Equivalently, we get the Newton equation
\beq \label{eq:newtoneq3}
 2 H^{(2)} \circ (\mu \Lambda^{-1} l \Lambda^{-1}  ) + l =
 2 H^{(2)} \circ (\mu \Lambda^{-1} -A  ) -\Lambda.
\end{equation}
We can now solve for $l$ and substitute to recover $h$. We then
backtrack and ensure both primal and dual positive definiteness.

The equation (\ref{eq:newtoneq3}) is equivalent to
\beq \label{eq:newtonalgfeas}
\begin{array}{c}
\left[2\Diag (\kvec (H^{(2)})  \mu (\Lambda^{-1} \otimes \Lambda^{-1})
     + I \right] \kvec (l) \\
  ~~~~~~~~~~~~~~~~~~~~~~~~~= 
       \kvec\left( 2 H^{(2)} \circ (\mu \Lambda^{-1} -A) -\Lambda \right).
\end{array}
\end{equation}

We see that in this final form we can exploit the possible sparsity of $H$,
i.e. $H_{ij}=0 \Rightarrow l_{ij}=0.$ Therefore we can delete the
corresponding rows and columns of the Kronecker product when solving the
system of equations.  
Let $F$ denote the $k \times 2$ matrix with row $s$
denoting the $s$-th nonzero element of $H$, i.e. for $s = 1, \ldots, k,$
\[
 (F_{s1},F_{s2})_{s=1, \ldots k} = \left\{ ij : H_{ij} \neq 0
       \right\}.
\]
Note that  the $(kl,ij)$ component of the Hadamard-Kronecker
product $2 H^{(2)} \circ (\mu \Lambda^{-1} l \Lambda^{-1})  $ is found from 
\begin{eqnarray*}
   2\mu \tr E_{kl} \left(H^{(2)} 
          \circ \Lambda^{-1} E_{ij} \Lambda^{-1} \right)     
       &=& 2 \mu\tr e_ke_{l}^t \left(H^{(2)} \circ \Lambda^{-1}
                   e_ie_j^t \Lambda^{-1} \right)\\
       &=& 2 \mu\tr e_{l}^t \left(H^{(2)} \circ \Lambda^{-1}_{:,i}
                   \Lambda^{-1}_{j:} \right)e_k\\
    &=&  2 \mu H^{(2)}_{lk}\Lambda^{-1}_{li} \Lambda^{-1}_{jk}.
\end{eqnarray*}
Suppose that we represent the Newton system as
\beq \label{eq:newtLr1}
 L \kvec (l) = r,
\end{equation}
then for $s=kl, t=ij$, the $st$ component of the matrix $L$ is
\beq  \label{eq:operL1}
  L_{s,t}= 
  \left\{  \begin{array}{cc}
     2 \mu H^{(2)}_{F_{s_2},F_{s_1}}\Lambda^{-1}_{F_{s_2},F_{t_1}} 
       \Lambda^{-1}_{F_{t_2},F_{s_1}}  & \mbox{if}~s \neq t\\
     2 \mu H^{(2)}_{F_{s_2},F_{s_1}}\Lambda^{-1}_{F_{s_2},F_{t_1}} 
       \Lambda^{-1}_{F_{t_2},F_{s_1}}  +1
& \mbox{if}~s =t
\end{array}  \right.
\end{equation}
The $s=ij$-component of the right-hand-side of the system 
(\ref{eq:newtLr1}) is
\[
r_s = 2H^{(2)}_{s}
  \left(\mu \Lambda^{-1}_s -A_{s} \right) -\Lambda_s.
\]
This simplifies the construction of the linear system,
especially in the large sparse case.

\subsubsection{Primal-Step-First}
Alternatively, if many elements of $H$ are sufficiently large, i.e. if
we fix (or specify) elements of $A$, then it is more efficient to
eliminate $l$ and solve for $h$ first. 
By equating $l$ in (\ref{eq:hexl}) and (\ref{eq:dualfeaslin}), and
moving $h$ to one side, we get the system
\beq \label{eq:newtoneqgprim}
 2 H^{(2)} \circ h + \frac 1{\mu} \Lambda h \Lambda   =
        \Lambda -\frac 1{\mu}\Lambda P \Lambda
 -\left( 2H^{(2)} \circ (P-A) -\Lambda \right) 
\end{equation}
or, equivalently,
\beq \label{eq:newtonalgprim}
\begin{array}{c}
\left[2 \Diag (\kvec (H^{(2)}) + \frac 1{\mu}(\Lambda \otimes \Lambda)
      \right] \kvec (h) \\
  ~~~~~~~~~~~~~~~~~~~~~~~~~= 
       \kvec\left( \Lambda -\frac 1{\mu}\Lambda P \Lambda  
 -( 2H^{(2)} \circ (P-A) -\Lambda) \right).
\end{array}
\end{equation}
We solve this system
for $h$ and then substitute to get $l$ using (\ref{eq:hexl}).

Note that if the $ij$ component of $P$ is specified or fixed, i.e. if the
corresponding element of $H$ is sufficiently large, then the
$ij$ component of the primal step $h_{ij}=0.$
Therefore, if there are many elements of $H$ which are sufficiently
large, then the size of the 
system (\ref{eq:newtonalgprim}) is very small, i.e. it
is the same order as the number of elements corresponding to relatively
small values in $H$. 
Let $F$ denote the $k \times 2$ matrix with row $s$
denoting the $s$-th free element of $P$, i.e. for $s = 1, \ldots, k,$
\[
 (F_{s1},F_{s2})_{s=1, \ldots k} = \left\{ ij : P_{ij} ~ \mbox{is free}
       \right\}.
\]
Note that  the $(kl,ij)$ component of the Kronecker
product $\Lambda \otimes \Lambda$ is found from 
\[ \tr E_{kl} \Lambda E_{ij} \Lambda = \tr e_ke_{l}^t \Lambda 
   e_ie_j^t \Lambda
= \Lambda_{li} \Lambda_{jk}.
\]
Also, the $kl$ component of $\Lambda P \Lambda$ is
\[ \tr E_{kl} \Lambda P \Lambda = \tr e_ke_{l}^t \Lambda P \Lambda
= \Lambda_{l:} P \Lambda_{:k}.
\]
If we represent the Newton system as
\beq \label{eq:newtLr}
 L \kvec (h) = r,
\end{equation}
then for $s=kl, t=ij$, the $st$ component of the matrix $L$ is
\beq  \label{eq:operL}
  L_{s,t}= 
  \left\{  \begin{array}{cc}
\Lambda_{F_{s_1},F_{t_1}}
\Lambda_{F_{t_2},F_{s_2}} & \mbox{if}~s \neq t\\
2 \mu H^{(2)}_{F_{s_1},F_{s_2}}+
\Lambda_{F_{s_1},F_{s_1}}
\Lambda_{F_{s_2},F_{s_2}} & \mbox{if}~s =t
\end{array}  \right.
\end{equation}
If we denote the dual feasibility residual as
\[ r_D := 2H^{(2)} \circ (P-A) -\Lambda,
\]
then the $s=ij$-component of the right-hand-side of the system 
(\ref{eq:newtLr}) is
\[
r_s = \Lambda_s -\frac 1{\mu}\Lambda_{j:}P\Lambda_{:i} -(r_D)_s.
\]


\bs{NUMERICAL TESTS}
\label{sect:numerics}
The feasible interior point algorithm was tested on randomly generated
problems. We used MATLAB as the programming language. (The files are
available by anonymous ftp, see the address and URL on the front page.)
The tests were
done on a SUN SPARC 1 with 16megs RAM and
with SPECint 18.2 and SPECfp 17.9. 

The major part of the work of one iteration is the
construction and solution
of the linear system to be solved for the Newton direction. 

For the dual-step-first algorithm, we need to form and solve the
system in (\ref{eq:newtonalgfeas}).
This requires finding the inverse of the dual variable, 
$\Lambda^{-1},$ $O(n^3)$ operations. 
Then we need to form the Kronecker product
$\Lambda^{-1} \otimes \Lambda^{-1},$ $O(\bar{n}^4)$ operations
($\bar{n}$ denotes the number of nonzero elements of $H$), and
solve the resulting system. Note that the complexity
of both these latter operations depends completely on the sparsity of $H,$  i.e.
on the number of nonzeros in $H.$ The algorithm needs an initial
positive definite matrix $P \succ 0,$ for which the corresponding dual
variable is positive definite as well, i.e.
$ 2H^{(2)} \circ (P-A) =\Lambda \succ  0.$
We obtain this by setting $P=A + tI,$ where $t$ is chosen using the
smallest eigenvalue of $A;$ which is, in turn, found using a Lanczos
algorithm.

Similarly, for the primal-step-first algorithm, we use 
the system in (\ref{eq:newtonalgprim}). The complexity is as above except that
it depends on the number of zeros, or ''small`` elements of $H,$ i.e.
elements which are not ``sufficiently large''. The initial point is
found using a similar heuristic as in the dual-step-first algorithm.

In both algorithms, we have to be careful with the diagonal of $P$. We
cannot have 0, or sufficiently large,
elements on the diagonal of $H.$
This can cause problems in finding an initial positive definite dual
variable.

We first present the results for the dual-step-first algorithm.
The matrix of weights $H$ was chosen sparse, nonnegative, with positive
diagonal, i.e. $.1$ was added to the diagonal to ensure that it is
positive.

Different tolerances and dimensions were tested with no appreciable
difference in the number of iterations. Typically, the duality gap is
halved at each iteration. The algorithm stops with a guarantee that the
duality gap  meets the desired tolerance. Since the objective function
is always nonnegative, this is equivalent to
\[ \min \left\{f(P),  \tr \Lambda P   \right\} \leq \mbox{tolerance}.
\] 

The details for several of the tests follow in Tables \ref{table:data1}
and \ref{table:data2}.

\begin{table}[htb]
\begin{center}
\begin{tabular}{|c|c|c|c|c|c|c|c|c|}\hline
 dim & probs & toler  & $H$  dens. / infty &
           $A \succeq 0$ &cond(A)&   $H \succ 0$ &
                min/max &  iters  \\ \hline
  60 &  20  & $10^{-6}$& .01  / .001  & yes & 79.738& no & 15/23 & 16.8  \\
 65 &  20  & $10^{-6}$& .015  / .001  & yes & 49.8611& yes & 18/24 & 21.25  \\
 83 &  20  & $10^{-6}$& .007  / .001  & no & 235.1547 & no & 24/29 & 25.45 \\
 85 &  20  & $10^{-5}$& .008  / .001  & yes & 94.6955 & no & 11/17 & 13.05 \\
 85 &  20  & $10^{-6}$& .0075  / .001  & no & 299.8517 & no & 23/27 & 25.25 \\
 87 &  20  & $10^{-6}$& .006  / .001  & yes & 74.163 & yes & 14/19 & 16.85 \\
 89 &  20  & $10^{-6}$& .006  / .001  & no & 179.33 & no & 23/28 & 15.2 \\
110 &  20  & $10^{-6}$& .007  / .001  & yes & 172.255& yes & 15/20 & 17.8  \\
155 &  20  & $10^{-6}$& .01  / 0  & yes &  643.9619 & yes & 14/18 & 15.3  \\
\hline
\end{tabular}
\caption{\label{table:data1} data for dual-step-first: 
dimension; number of problems; tolerance for
duality gap; density of nonzeros in $H$/ 
density of infinite values in $H$;
positive semidefiniteness of $A$; positive definiteness of $H$;
min and max number of iterations; average number of iterations.
}
\end{center}
\end{table}

\begin{table}[htb]
\begin{center}
\begin{tabular}{|c|c|c|c|c|c|c|c|c|}\hline
 dim & probs & toler  & $H$  dens. / infty &
           $A \succeq 0$ &cond(A)&   $H \succ 0$ &
                min/max &  iters  \\ \hline
  15 &  20  & $10^{-5}$& .0751  / .02  & yes & 222.52& no & 8/17 & 10.25  \\
  15 &  20  & $10^{-6}$& .1  / .95  & yes & 19.6102 & no & 10/23 & 15.55 \\
  15 &  20  & $10^{-6}$& .01  / .95  & yes & 21.0473 & no & 10/20 & 13.2 \\
  19 &  20  & $10^{-6}$& .005  / .1  & yes & 14.811 & no & 10/18 & 12.95 \\
  21 &  20  & $10^{-6}$& .005  / .1  & yes & 20.3077 & no & 8/24 & 15 \\
  38 &  20  & $10^{-6}$&    1 / .99  & yes & 49.6068 & yes & 14/24 & 16.3 \\
  45 &  20  & $10^{-6}$&    1 / .99  & yes & 46.8396 & yes & 15/22 & 17.1 \\
  55 &  20  & $10^{-6}$&    1 / .99  & yes & 37.1883 & yes & 15/30 & 17.45 \\
  85 &  20  & $10^{-5}$& .0219  / .02  & yes & 1374.54& no & 16/23 & 18.95  \\
  95 &  20  & $10^{-5}$& .0206  / .02  & yes & 2.6983 & no & 8/14 & 11.05  \\
  95 &  20  & $10^{-6}$& 1 / .999 & yes & 196.0130 & yes & 14/18 & 16.8  \\
 145 &  20  & $10^{-6}$& .01 / .997 & yes &  658.5103 & yes & 13/17 & 14.95 \\
\hline
\end{tabular}
\caption{\label{table:data2} data for primal-step-first: 
dimension; number of problems; tolerance for
duality gap; density of nonzeros in $H$/ 
density of infinite values in $H$;
positive semidefiniteness of $A$; positive definiteness of $H$;
min and max number of iterations; average number of iterations.
}
\end{center}
\end{table}

\bs{CONCLUSION}
\label{sect:concl}
We have presented optimality conditions and an interior-point
algorithm for the approximate completion problem AC. We have shown that
the interior-point approach is very efficient and robust.
We have tested the algorithm
successfully on randomly generated problems. The algorithm never failed
to find the optimal completion. In fact, the algorithm was very robust;
the number of iterations consistently fell within a very small range.
Degeneracy and ill-conditioning did not
affect the algorithm. In particular, there were no difficulties for
problems where only a singular semidefinite completion existed. If a
positive definite completion existed, then one was always found, even if
there were multiple optimal solutions for AC. 

We have not compared our algorithm directly with others. 
Other algorithms that solve
similar problems have appeared in the literature. 
In particular, AC is a semidefinite
programming problem and so can be expressed as a problem 
with a lower bound on the
smallest eigenvalue. Algorithms for eigenvalue constrained problems 
have been solved by
active set type methods, see e.g. \cite{Ov:90}. 
However, recent testing has shown that
our approach using 
primal-dual interior-point methods is superior, see e.g. \cite{HeReVaWo:93}.
In addition, we do not present any convergence proofs. These proofs can
be obtained by guaranteeing sufficient decrease at each step. This can
be done using a potential function or via an Armijo-Goldstein-Wolfe type
step, see e.g. \cite{HeReVaWo:93}.

%\bibliography{.psd,.master,.publs}
\begin{thebibliography}{10}

\bibitem{AlHaOv:95}
F.~ALIZADEH, J-P.A. HAEBERLY, and M.L. OVERTON.
\newblock Complementarity and nondegeneracy in semidefinite programming.
\newblock Technical report, Courant Institute of Mathematical Sciences, 1995.
\newblock submitted to Math. Progr. special issue on semidefinite programming.

\bibitem{BaJoLo:93}
W.W. BARRETT, C.R. JOHNSON, and R.~LOEWY.
\newblock The real positive definite completion problem: cycle completability.
\newblock Technical report, Department of Mathematics, College of William and
  Mary, Williamsburg, Virginia, 1993.

\bibitem{BoLe1:92}
J.M. BORWEIN and A.~LEWIS.
\newblock Partially finite convex programming, part i, duality theory.
\newblock {\em Mathematical Programming.}, 57:15--48, 1992.

\bibitem{BoWo:86}
J.M. BORWEIN and H.~WOLKOWICZ.
\newblock A simple constraint qualification in infinite dimensional
  programming.
\newblock {\em Mathematical Programming}, 35:83--96, 1986.

\bibitem{BoGhFeBa:94}
S.~BOYD, L.~{El~{G}HAOUI}, E.~FERON, and V.~BALAKRISHNAN.
\newblock {\em Linear Matrix Inequalities in System and Control Theory},
  volume~15 of {\em Studies in Applied Mathematics}.
\newblock {SIAM}, Philadelphia, PA, June 1994.

\bibitem{DymGoh:81}
H.~DYM and I.~GOHBERG.
\newblock Extensions of band matrices with band inverses.
\newblock {\em Linear Algebra and its Applications}, 36:1--24, 1981.

\bibitem{GHJT:95}
W.~GLUNT, T.~HAYDEN, C.~R. JOHNSON, and P.~TARAZAGA.
\newblock Maximum determinant completions.
\newblock Technical report, Dept. of Mathematics, College of William and Mary,
  Williamsburg, Va, 1994.

\bibitem{GrJoSaWo:84}
B.~GRONE, C.~JOHNSON, E.~MARQUES DE~SA, and H.~WOLKOWICZ.
\newblock Positive definite completions of partial {H}ermitian matrices.
\newblock {\em Linear Algebra and its Applications}, 58:109--124, 1984.

\bibitem{HeReVaWo:93}
C.~HELMBERG, F.~RENDL, R.~J. VANDERBEI, and H.~WOLKOWICZ.
\newblock An interior point method for semidefinite programming.
\newblock {\em SIAM Journal on Optimization}, To appear.
\newblock Accepted Aug/94.

\bibitem{HoJo:85}
R.~HORN and C.~JOHNSON.
\newblock {\em Matrix Analysis}.
\newblock Cambridge University Press, New York, 1985.

\bibitem{JohTar:95}
C.~R. JOHNSON and P.~TARAZAGA.
\newblock Approximate semidefinite matrices in a subspace.
\newblock Technical report, Dept. of Mathematics, College of William and Mary,
  Williamsburg, Va, 1995.
\newblock in progress.

\bibitem{JohKro:95}
C.R. JOHNSON and B.~KROSCHEL.
\newblock Principal submatrices, geometric multiplicities, and structured
  eigenvectors.
\newblock {\em SIAM Journal on Matrix Analysis and Applications},
  16:1004--1012, 1995.

\bibitem{KoShHa:95}
M.~KOJIMA, M.~SHIDA, and S.~SHINDOH.
\newblock Reduction of monotone linear complementarity problems over cones to
  linear programs over cones.
\newblock Technical report, Dept. of Information Sciences, Tokyo Institute of
  Technology, Tokyo, Japan, 1995.

\bibitem{KoShHa:94}
M.~KOJIMA, S.~SHINDOH, and S.~HARA.
\newblock Interior-point methods for the monotone linear complementarity
  problem in symmetric matrices.
\newblock Technical report, Dept. of Information Sciences, Tokyo Institute of
  Technology, Tokyo, Japan, 1994.

\bibitem{Lu:69}
D.G. LUENBERGER.
\newblock {\em Optimization by Vector Space Methods}.
\newblock John Wiley, 1969.

\bibitem{MiSmSwWa:85}
C.A. MICCHELLI, P.W. SMITH, J.~SWETITS, and J.D. WARD.
\newblock Constrained $l_p$ approximation.
\newblock {\em Journal of Constructive Approximation}, 1:93--102, 1985.

\bibitem{MiToYe:94}
S.~MIZUNO, M.~J. TODD, and Y.~YE.
\newblock On adaptive-step primal-dual interior-point algorithms for linear
  programming.
\newblock {\em Mathematics of Operations Research}, 18(4):964--981, 1994.

\bibitem{Mor:62}
J.J. MOREAU.
\newblock D\'ecomposition orthogonale d'un espace hilbertien selon deux co\^nes
  mutuellement polaires.
\newblock {\em C.R. Acad.Sci Paris}, 255:238--240, 1962.

\bibitem{int:Nesterov5}
Y.~E. NESTEROV and A.~S. NEMIROVSKY.
\newblock {\em Interior Point Polynomial Algorithms in Convex Programming\ :\
  Theory and Algorithms}.
\newblock SIAM Publications. SIAM, Philadelphia, USA, 1994.

\bibitem{Ov:90}
M.L. OVERTON.
\newblock Large-scale optimization of eigenvalues.
\newblock {\em SIAM J. Optimization}, 2:88--120, 1992.

\bibitem{smw2}
P.W. SMITH and H.~WOLKOWICZ.
\newblock A nonlinear equation for linear programming.
\newblock {\em Mathematical Programming}, 34:235--238, 1986.

\end{thebibliography}
\end{document}
