\documentclass[11pt]{article}
%\usepackage{theorem,exscale}
\usepackage{kbordermatrix}
%\usepackage{color}
\usepackage{exscale}
\usepackage{tabularx}
%\usepackage{algorithm,algorithmic} 
\usepackage{latexsym}
%\usepackage{euler}
%\usepackage{eufrak}
\usepackage{amsmath,amssymb,amstext,amsthm} % Lots of math symbols and environments
\usepackage[dvips]{graphicx,color,epsfig}
\usepackage{graphicx}
\usepackage{fullpage}        % was not available on PC?
\usepackage{float}
\usepackage{verbatim}
%\usepackage{algorithmic}     % was not available on PC?
%\usepackage{hyperref}
\usepackage[bookmarks,pagebackref,
    pdfpagelabels=true, % Adds page number as label in Acrobat's page count
    ]{hyperref}
\usepackage{seceqn}
%\usepackage{amsfonts}
%\usepackage{amstext}
\usepackage{makeidx}
\makeindex

%test

\def\Rnbyn{\mathbb{R}^{n\times n}}
\def\R{\mathbb{R}}
\def\Rk{\mathbb{R}^k}
\def\Rn{\mathbb{R}^n}
\def\Rnp{\mathbb{R}_+^n}

\def\normF#1{\|#1\|_F}
\def\normt#1{\|#1\|_2}

\def\eqref#1{{\normalfont(\ref{#1})}}
\def\GN{{\bf GN\,}}
\def\SDPt{{\bf SDP\,}}
\def\SDP{\mbox{\boldmath$SDP$}\,}
\def\SDPc{{\mbox{\boldmath$SDP$,}\,}}
\def\GE{\mbox{\boldmath$GE$}\,}
\def\EDM{\mbox{\boldmath$EDM$}\,}
\def\edm{\mbox{\boldmath$EDM$}\,}
\def\EDMc{\mbox{\boldmath$EDM$,}\,}
\def\EDMs{\mbox{\boldmath$EDM$s}\,}
\def\EDM{\mbox{\boldmath$EDM$}\,}
\def\EDms{\mbox{\boldmath$EDMs$}\,}
\def\EDMt{{\bf EDM}\,}
\def\EDmt{{\bf EDM}\,}
\def\EDMR{\mbox{\boldmath$EDMR$}\,}
\def\EDmR{\mbox{\boldmath$EDMR$}\,}
\def\EDML{\mbox{\boldmath$EDML$}\,}
\def\EDmL{\mbox{\boldmath$EDML$}\,}
\def\FPDGt{{\bf FPDG}\,}
\def\FPDG{\mbox{\boldmath$FPDG$}\,}
\def\DEDML{\mbox{\boldmath$DEDML$}\,}
\def\NEDM{\mbox{\boldmath$NEDM$\,}}
\def\NEDm{\mbox{\boldmath$NEDM$\,}}
\def\SNL{\mbox{\boldmath$SNL$}\,}
\def\SNLt{{\bf SNL\,}}
\def\SNLc{\mbox{\boldmath$SNL$,}\,}
\def\SNLLS{\mbox{\boldmath$SNL_{LS}$}\,}
\def\SNLM{\mbox{\boldmath$SNL_{M}$}\,}
\def\SNLMV{\mbox{\boldmath$SNL_{MV}$}\,}
\def\SNLMVt{{\bf SNL$_{MV}$}\,}
\def\EDMt{\bf EDM\,}
\def\EDMCt{{\bf EDMC\,}}
\def\EDMC{\mbox{\boldmath$EDMC$}\,}
\def\EDMCc{\mbox{\boldmath$EDMC$},\,}
\def\EDMCp{\mbox{\boldmath$EDMC$}.\,\,}
\def\EDMCN{\mbox{\boldmath$EDMCN$}\,}
\def\EDMCR{\mbox{\boldmath$EDMC-R$}\,}
\def\EDMCRt{{\bf EDMC-R\,}}
\def\EDMCD{\mbox{\boldmath$EDMC-D$}\,}
\def\EDMCND{\mbox{\boldmath$EDMCN-D$}\,}

\def\SNLMN{\mbox{\boldmath$SNL_{MN}$}\,}
\def\SNLMND{\mbox{\boldmath$SNL_{MV}-D$}\,}

%\definecolor{red}{model}{specs}


%\def\cm{correlation matrix}
%\def\cms{correlation matrices}
\newenvironment{note}{\begin{quote}\small\sf NJH $\diamondsuit$~}{\end{quote}}

% Henry's additional definition
\newenvironment{noteH}{\begin{quote}\small\sf HW $\clubsuit$~}{\end{quote}}
\newenvironment{noteN}{\begin{quote}\small\sf NK $\spadesuit$~}{\end{quote}}
\newenvironment{noteL}{\begin{quote}\small\sf LT $\diamondsuit$~}{\end{quote}}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% All refs in roman.
\def\eqref#1{{\normalfont(\ref{#1})}}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

\newtheorem{nmbrs}{Numbering}[section]
\newtheorem{defi}[nmbrs]{Definition}
\newtheorem{example}[nmbrs]{Example}
\newtheorem{assump}[nmbrs]{Assumption}
\newtheorem{prop}[nmbrs]{Proposition}
\newtheorem{prob}[nmbrs]{Problem}
\newtheorem{lem}[nmbrs]{Lemma}
\newtheorem{thm}[nmbrs]{Theorem}
\newtheorem{cor}[nmbrs]{Corollary}
\newtheorem{rem}[nmbrs]{Remark}
\newtheorem{properties}[nmbrs]{Properties}
%\newtheorem{table}{table}
\newtheorem{remark}[nmbrs]{Remark}
\newtheorem{conj}[nmbrs]{Conjecture}
\newtheorem{alg}[nmbrs]{Algorithm}
\newtheorem{ex}[nmbrs]{Exercise}
%\newcounter{count}

%\newcommand{\cbr}[1]{\left\{ #1 \right\}}
%\newcommand{\rbr}[1]{\left( #1 \right)}
%\newcommand{\sbr}[1]{\left[ #1 \right]}
\newcommand{\nul}{\mathrm{null}}
\newcommand{\range}{\mathrm{range}}
\newcommand{\sS}{\mathcal{S}}
\newcommand{\embdim}{\mathrm{embdim}}

\newcommand{\ZZ}{{\mathcal Z} }
\newcommand{\ZS}{{\mathcal Z_S} }
\newcommand{\Xs}{{\mathcal X}_s }
\newcommand{\Lx}{{L}_X }
\newcommand{\XS}{{\mathcal W} }
\newcommand{\Ss}{{\mathcal S} }
\newcommand{\Ekyb}{{\mathcal E}^{n}(1{:}k,\bar D)}
\newcommand{\Eayb}{{\mathcal E}^{n}(\alpha,\bar D)}
\newcommand{\Ls}{{L}_S }
\newcommand{\E}{{\mathcal E} }
\newcommand{\LL}{{\mathcal L} }
\newcommand{\RR}{{\mathcal R} }
\newcommand{\EE}{{\mathcal E} }
\newcommand{\FF}{{\mathcal F} }
\newcommand{\DD}{{\mathcal D} }
\newcommand{\BB}{{\mathcal B} }
\newcommand{\CC}{{\mathcal C} }
\newcommand{\GG}{{\mathcal G} }
\newcommand{\OO}{{\mathcal O} }
\newcommand{\PP}{{\mathcal P} }
\newcommand{\TT}{{\mathcal T} }
\newcommand{\UU}{{\mathcal U} }
\newcommand{\VV}{{\mathcal V} }
\newcommand{\WW}{{\mathcal W} }
\newcommand{\uh}{\hat u}
\newcommand{\vh}{\hat v}
\newcommand{\Uh}{\hat U}
\newcommand{\Vh}{\hat V}
%\newcommand{\Sh}{\hat S}
\newcommand{\Sh}{{\mathcal S}_H}
\newcommand{\Scal}{{\mathcal S}}
\newcommand{\Sc}{{\mathcal S}_C }
\newcommand{\snn}{{\mathcal S}_{n-1} }
\newcommand{\CP}{{\bf{\rm CP\,}}}
\newcommand{\CPD}{{\bf{\rm CPD\,}}}
\newcommand{\LCP}{{\bf{\rm LCP\,}}}
\newcommand{\LCPE}{{\bf{\rm LCPE\,}}}
\newcommand{\LCDE}{{\bf{\rm LCDE\,}}}
\newcommand{\hs}{H_{\sigma}}
\newcommand{\En}{{{\mathcal E}^n} }
\newcommand{\Ek}{{{\mathcal E}^k} }

\newcommand{\Sayb}{{\mathcal S}^{n}(\alpha,\bar Y)}
\newcommand{\Sapyb}{{\mathcal S}_+^{n}(\alpha,\bar Y)}
\newcommand{\Skyb}{{\mathcal S}^{n}(1{:}k,\bar Y)}
\newcommand{\Skpyb}{{\mathcal S}_+^{n}(1{:}k,\bar Y)}
\newcommand{\Sok}{{\mathcal S}^{1{:}k}}


\newcommand{\ip}[2]{\left\langle #1, #2 \right\rangle}
\newcommand{\set}[1]{\left\{ #1 \right\}}
\newcommand{\cbr}[1]{\left\{ #1 \right\}}
\newcommand{\rbr}[1]{\left( #1 \right)}
\newcommand{\sbr}[1]{\left[ #1 \right]}
\newcommand{\norm}[1]{\left\| #1 \right\|}


\newcommand{\FDC}{{\bf{\rm FDC\,}}}
\newcommand{\SDPs}{{\bf{\rm SDPs}}}
\newcommand{\SOC}{{\bf{\rm SOC\,}}}
\newcommand{\UCQ}{{\bf{\rm UCQ\,}}}
\newcommand{\WCQ}{{\bf{\rm WCQ\,}}}
\newcommand{\UCQs}{{\bf{\rm UCQs\,}}}
\newcommand{\WUCQ}{{\bf{\rm WUCQ\,}}}
\newcommand{\CQ}{{\bf{\rm CQ\,}}}
\newcommand{\CQc}{{\bf{\rm CQ},\,}}
\newcommand{\CQs}{{\bf{\rm CQs\,}}}
\newcommand{\SDPns}{{\bf{\rm SDP}}}
\newcommand{\LP}{{\bf{\rm LP\,}}}
\newcommand{\LPns}{{\bf{\rm LP}}}
\newcommand{\Ksym}{{\stackrel{s}{\otimes}}}
\newcommand{\Kt}{{\stackrel{T}{\otimes}}}
\newcommand{\Ksksym}{{\stackrel{sk}{\otimes}}}
\newcommand{\Ksumsym}{{\stackrel{s}{\oplus}}}
\newcommand{\symmkron}{{{O\hspace{-.09in}\cdot}\hspace{.05in}}}
\newcommand{\cond}{{\rm cond\,}}
\newcommand{\abs}{{\rm abs\,}}
\newcommand{\rank}{{\rm rank\,}}
\newcommand{\spanl}{{\rm span\,}}
\newcommand{\dom}{{\rm dom\,}}
\newcommand{\conv}{{\rm conv\,}}
\newcommand{\gap}{{\rm gap\,}}
\newcommand{\optval}{{\rm optval\,}}
\newcommand{\Rm}{{\R^m\,}}
%\newcommand{\Sn}{{\mathcal S^n\,}}
\newcommand{\Sn}{{\mathcal S^n}}
%\newcommand{\Snp}{{\mathcal S^n_+\,}}
\newcommand{\Snp}{{\mathcal S^n_+\,}}
\newcommand{\Stt}{{\mathcal S^2_+}}
\newcommand{\Snpp}{{\mathcal S^n_{++}\,}}
\newcommand{\Srpp}{{\mathcal S^r_{++}\,}}
\newcommand{\Rtn}{{\R^{\scriptsize{t(n)}}\,}}
\newcommand{\Stn}{{{\mathcal S}^{\scriptsize{\pmatrix{n\cr 2}}}\,}}
\newcommand{\Sk}{{\mathcal S^{k}}\,}
\newcommand{\Skp}{{\mathcal S^{k}_+}\,}
\newcommand{\Skpp}{{\mathcal S^{k}_{++}}\,}
\newcommand{\Stp}{{\mathcal S^{t}_+}\,}
\newcommand{\Stpp}{{\mathcal S^{t}_{++}}\,}
\newcommand{\Stnn}{{{\mathcal S}^{\scriptsize{t(n)}}\,}}
\newcommand{\Mtnn}{{{\mathcal M}^{\scriptsize{t(n)}}\,}}
\newcommand{\tn}{{\scriptsize{\pmatrix{n\cr 2}}}\,}
\newcommand{\tnp}{{\scriptsize{\pmatrix{n+1\cr 2}}}\,}
\newcommand{\Rtnp}{{\R^{\scriptsize{\pmatrix{n+1\cr 2}}}\,}}
\newcommand{\MM}{{\mathcal M\,}}
\newcommand{\Mn}{{\mathcal M^n\,}}
\newcommand{\MS}{{\mathcal M_S\,}}
\newcommand{\NS}{{\mathcal N_S\,}}
\newcommand{\NN}{{\mathcal N}}
\newcommand{\MSi}{{\mathcal M_S^{-1}\,}}
\newcommand{\Cp}{{\mathcal C\,}}
\newcommand{\p}{{\mathcal P\,}}
\newcommand{\A}{{\mathcal A}}
\newcommand{\B}{{\mathcal B\,}}
\newcommand{\II}{{\mathcal I\,}}
\newcommand{\XSA}{{{\mathcal W}_{\A}} }
\newcommand{\F}{{\mathcal F\,}}
\newcommand{\N}{{\mathcal N\,}}
\newcommand{\U}{{\mathcal U\,}}
\newcommand{\Uone}{{\mathcal U_1\,}}
\newcommand{\QQ}{{\mathcal Q\,}}
\newcommand{\Flift}{{\mathcal F}_n}
% \renewcommand{\theequation}{\thesection.\thecount}
\newcommand{\bbm}{\begin{bmatrix}}
\newcommand{\ebm}{\end{bmatrix}}
\newcommand{\bem}{\begin{pmatrix}}
\newcommand{\eem}{\end{pmatrix}}
\newcommand{\beq}{\begin{equation}}
\newcommand{\beqs}{\begin{equation*}}
\newcommand{\bet}{\begin{table}}
%\newcommand{\bet}{\begin{table}}
\newcommand{\eeq}{\end{equation}}
\newcommand{\eeqs}{\end{equation*}}
%\newcommand{\beqr}{\begin{eqnarray}}
\newcommand{\beqr}{\begin{eqnarray}}
% \newcommand{\addc}{\addtocounter{count}{1} }
% \newcommand{\bs}{\setcounter{count}{0} \section}
%\newcommand{\bs}{\section}
\newcommand{\fa}{~ \; \forall}
\newcommand{\req}[1]{(\ref{#1})}
\newcommand{\sn}{{\mathcal S}_n }
\newcommand{\hn}{{\mathcal H}_n }
\newcommand{\g}{{\mathcal G} }
\newcommand{\SKn}{{\tilde{\mathcal S}}^n}

%\newcommand{\epreq}{\hfill $\blacksquare$}
\DeclareMathOperator{\KK}{{\mathcal K} }
\DeclareMathOperator{\kvec}{{vec}}
\DeclareMathOperator{\adj}{{adj}}
\DeclareMathOperator{\trace}{{trace}}
\DeclareMathOperator{\blkdiag}{{blkdiag}}
\DeclareMathOperator{\tr}{{trace}}
\DeclareMathOperator{\diag}{{diag}}
\DeclareMathOperator{\Diag}{{Diag}}
\DeclareMathOperator{\dDiag}{{dDiag}}
\DeclareMathOperator{\Ddiag}{{Ddiag}}
\DeclareMathOperator{\Mat}{{Mat}}
\DeclareMathOperator{\offDiag}{{offDiag}}
\DeclareMathOperator{\usMat}{{us2Mat}}
\DeclareMathOperator{\usvec}{{us2vec}}
\DeclareMathOperator{\svec}{{s2vec}}
\DeclareMathOperator{\vsvec}{{vs2vec}}
\DeclareMathOperator{\dsvec}{{dsvec}}
\DeclareMathOperator{\Hmat}{{Hmat}}
\DeclareMathOperator{\vSmat}{{vSmat}}
\DeclareMathOperator{\Smat}{{S2mat}}
\DeclareMathOperator{\sMat}{{s2Mat}}
\DeclareMathOperator{\hMat}{{hMat}}
\DeclareMathOperator{\vsMat}{{vs2Mat}}
\DeclareMathOperator{\kmat}{{Mat}}
\DeclareMathOperator{\sdiag}{{sdiag}}
\DeclareMathOperator{\Kprod}{\otimes}
\DeclareMathOperator{\Kcal}{{\cal K}}
\DeclareMathOperator{\Hcal}{{\cal H}}
\DeclareMathOperator{\relint}{{relint}}



%   for Real number sign
%\newcommand{\eqref}[1]{{\rm (\ref{#1})}}
\newcommand{\gesem}{\succeq}
\newcommand{\lesem}{\preceq}
\newcommand{\Sbar}{\bar{S}}
\newcommand{\Qbar}{\bar{Q}}
\newcommand{\Tbar}{\bar{T}}
\newcommand{\xbar}{\bar{x}}
\newcommand{\Xbar}{\bar{X}}
\newcommand{\nc}{\newcommand}
\nc{\arrow}{{\rm arrow\,}}
\nc{\Arrow}{{\rm Arrow\,}}
\nc{\BoDiag}{{\rm B^0Diag\,}}
\nc{\bodiag}{{\rm b^0diag\,}}
\newcommand{\oodiag}{{\rm o^0diag\,}}
\newcommand{\OoDiag}{{\rm O^0Diag\,}}
\def\nr{\par \noindent}
\nc{\Mmn}{{\mathcal M}^{mn} }
\nc{\Mnr}{{\mathcal M}^{nr} }
\nc{\Mnmr}{{\mathcal M}^{(n-1)r} }
% \nc{\eqref}[1]{{\rm (\ref{#1})}}
\nc{\kwqqp}{Q{$^2$}P\,}
\nc{\kwqqps}{Q{$^2$}Ps}
\def\argmin{\mathop{\rm argmin}}
\def\Ssum{\mathop{\mathcal S_\Sigma}}
\def\Sprod{\mathop{\mathcal S_\Pi}}
\newtheorem{problem}[subsubsection]{Problem}
\newtheorem{result}[subsubsection]{Result}
\newtheorem{claim}[nmbrs]{Claim}
\newtheorem{lemma}[nmbrs]{Lemma}
\newtheorem{coroll}[nmbrs]{Corollary}
\newtheorem{theo}[nmbrs]{Theorem}
\newtheorem{defin}[nmbrs]{Definition}
\newtheorem{nota}[nmbrs]{Notation}
\nc{\notinaho}{(X,S)\in \overline{AHO}(\A)}
\nc{\inaho}{(X,S)\in AHO(\A)}

% Simon's definitions
\newcommand{\bea}{\begin{eqnarray}}%
\newcommand{\eea}{\end{eqnarray}}%
\newcommand{\beas}{\begin{eqnarray*}}%
\newcommand{\eeas}{\end{eqnarray*}}%
\newcommand{\Rmn}{\R^{m \times n}}%
\newcommand{\Rnn}{\R^{n \times n}}%
\newcommand{\Int}{{\rm int\,}}% interior of a set
\newcommand{\cone}{{\rm cone\, }}% cone generated by a set
\newcommand{\sspan}{{\rm span}\,}
\newcommand{\ccone}{{\rm \overline{cone}}}% cone generated by a set
\newcommand{\precl}{{\rm precl}}% preclosure of a set
\newcommand{\bdry}{{\rm bdry}}% boundary of a set
\newcommand{\cl}{{\rm cl}}% closure of a set
\newcommand{\bnd}{{\rm bnd}}% boundary of a set
\newcommand{\relbnd}{{\rm relbnd\,}}% relative boundary of a set
\newcommand{\face}{{\rm face\,}}% smallest face containing a set
\newcommand{\facee}{{\rm face^{ef}\,}}% smallest face containing a set
\newcommand{\proj}{{\rm proj\,}}%
\newcommand{\Range}{\mathcal{R}}%
\newcommand{\la}{\ensuremath{\Leftarrow}}%
\newcommand{\ra}{\ensuremath{\Rightarrow}}%
\newcommand{\lla}{\;\ensuremath{\Longleftarrow}\;}% 
\newcommand{\rra}{\;\ensuremath{\Longrightarrow}\;}% 
\newcommand{\lra}{\ensuremath{\Leftrightarrow}}% 
\newcommand{\llra}{\; \Longleftrightarrow\;}% 
\renewcommand{\F}{\mathcal{F}}%
\renewcommand{\L}{\mathcal{L}}%
\newcommand{\X}{\mathcal{X}}%
\newcommand{\nn}{\nonumber}%
\renewcommand{\vec}{{\rm vec}}%
\newcommand{\vP}{v_P}%
\newcommand{\vD}{v_D}%
\newcommand{\vDP}{v_D}%
\newcommand{\vRP}{v_{RP}}%
\newcommand{\vDRP}{v_{DRP}}%
\newcommand{\vDRPM}{v_{DRP_M}}%
% Produce array delimited by []. Usage: \arr{ccc}{B&-A^T&C\\I&0&A}
\newcommand{\arr}[2]{\ensuremath{
\left[\begin{array}{#1}#2\end{array}\right]%
}}{}
% Produce small array (within text)
% e.g., $\bigl[\begin{smallmatrix} A&C\\0&-I \end{smallmatrix}\bigr]$
\newcommand{\sarr}[1]{\ensuremath{
\left[\begin{smallmatrix}#1\end{smallmatrix}\right]}}
% Produce *nondelimited* small array underneath a min or max 
% e.g., $\begin{smallmatrix} x \in X\\ y \in Y \end{smallmatrix}$
\newcommand{\tarr}[1]{\ensuremath{
\begin{smallmatrix} #1 \end{smallmatrix}}}
% Relabel list environments.
% undo this relabel - by Henry????? - comment out the next four lines????
%\makeatletter
%\renewcommand{\theenumi}{\rm (\alph{enumi})}
%\renewcommand{\p@enumii}{\theenumi}
%\makeatother


\begin{document}
%\begin{multicols}{2}
%\setlength{\multicolsep}{30pt}

\bibliographystyle{plain}
\title{
 Semidefinite Representations and Facial Reduction
\footnote{Department
of Combinatorics and Optimization,
          Waterloo, Ontario N2L 3G1, Canada,
}
%   \footnote{Research supported by Natural Sciences Engineering Research
%     Council Canada and a grant from AFOSR.}
}
             \author{
\href{http://sites.google.com/site/nathankrislock/}{Nathan Krislock}
        \thanks{INRIA Grenoble RhÙne-Alpes,
BiPop Research Group.}
%\and
%\href{http://www.math.uwaterloo.ca/~ptingkei/}{Ting Kei Pong}
%        \thanks{Research supported by The Natural Sciences and Engineering
%                Research Council of Canada.}
\and
\href{http://orion.math.uwaterloo.ca/~hwolkowi/}{Henry Wolkowicz}%
        \thanks{Research supported by The Natural Sciences and Engineering
                Research Council of Canada.}
}
\date{\today}
          \maketitle
%\begin{center}
%          University of Waterloo\\
%          Department of Combinatorics and Optimization\\
%          Waterloo, Ontario N2L 3G1, Canada\\
%          Research Report CORR 2009-04
%\end{center}

%{\bf Key Words:}  

%\noindent {\bf AMS Subject Classification:}


\begin{abstract}
In this paper we extend a recent algorithm for solving the sensor
network localization problem (\SNL) to include instances with facial
reduction to include the intersection of more than two faces.
In particular, we continue to exploit the implicit
degeneracy in the semidefinite programming (\SDP) relaxation of \SNL.

This is a preliminary working paper, and is a work in progress.

\end{abstract}



\tableofcontents
%\listoftables
%\listoffigures



%----------------------------------------------------
\section{Introduction}
%----------------------------------------------------
In this paper we derive and test an algorithm for solving large
scale sensor localization problems (\SNL) using simultaneous
intersections of faces.

The \SNL problem consists in locating sensors using the fact
that some of the sensors are anchors for which the
locations are given, and that the distances between sensors
within a given {\em radio range} are (approximately) known.
Our algorithm extends the results in \cite{kriswolk:09}.


Following the approach in \cite{kriswolk:09},
we then exploit the implicit
degeneracy in the semidefinite programming (\SDP) relaxation of \SNL.
This involves finding the subspace representation of the faces
of the semidefinite cone $\Snp$ corresponding to the faces of the Euclidean
distance matrix cone $\En$.
We repeatedly find the intersection of faces by finding the
intersection of the subspace representations. We delay the
completion of the original \EDM till the end,
i.e.,~after we find the \EDM completion from the data, 
we finalize by rotating the
problem using the original positions of the anchors.

%We include a discussion of {\em backward stability} for our
%algorithm. After the initialization of finding and localizing
%the rigid subgraphs, the algorithm can be compared to Gaussian
%elimination (\GE) with partial pivoting. At each step, we find a pair
%of cliques that correspond to the {\em best condition number} for 
%the subspace intersection step, i.e. this can be compared to the
%partial pivoting step in \GE.


%Our problem is closely related to {\em graph realizability}. A
%characterization for the cases with the embedding
%dimension $r=2,3$ is given in \cite{MR2295049,ConnellySloughter2004}. In the $r=2$
%case: {\em if and only if it is a partial 2-tree, i.e. a subgraph of the
%2-sum of triangles in the sense of graph theory}.
%In the case $r=3$: {\em if and only if it does not have $K_5$ or the
%1-skeleton of the octahedron as a minor}.

%----------------------------------------------------
\section{Background}
%----------------------------------------------------
The \SNL problem has recently attracted a lot of interest; see, for example,~\cite{MR2274505,NieCOAP07,Savarese:01,Savvides:01,KimKojimaWaki:09}. See also the webpage 
\href{http://www.convexoptimization.com/dattorro/sensor_network_localization.html}
{www.convexoptimization.com/dattorro/sensor\_network\_localization.html}
and the recent thesis \cite{krislock:2010}.
Nie \cite{NieCOAP07} using sums of squares also includes an error analysis.
Noisy distances are handled in \cite{BYIEEE:06} using a combination of regularization and refinement.
%An alternating projection algorithm is given in \cite{MR91j:90074}.



%----------------------------------------------------
\section{Notation and Preliminary Results}
%----------------------------------------------------
We let $\MM^{m \times n}$ denote the vector space of $m \times n$ real matrices
\index{$m\times n$ real matrices, $\Mmn$}
equipped with the {\em trace inner product}, $\langle A,B\rangle = \trace A^TB$;
let $\Mn:=\MM^{n \times n}$ and let
\index{trace inner product, $\langle A,B\rangle = \trace A^TB$}
$\Sn$ denote the subspace of
{\em real symmetric $n \times n$ matrices}; 
\index{symmetric $k \times k$ matrices, $\Sk$}
$\Snp$ and $\Snpp$
denote the cone of positive semidefinite and positive definite
\index{cone of positive semidefinite matrices, $\Skp$}
matrices, respectively;
\index{cone of positive definite matrices, $\Skpp$}
$A \succeq B$ and $A \succ B$ denote the 
L\"owner partial order, $A-B \in \Snp$ and
\index{L\"owner partial order, $A \succeq B$}
$A-B \in \Snpp$, respectively;
\index{vector of ones, $e$}
$\RR (\LL)$ and $\NN (\LL)$ denote the range space and 
null space of the linear transformation $\LL$, respectively;
\index{range space of $\LL$, $\RR (\LL)$}
\index{null space of $\LL$, $\NN (\LL)$}
we let $e$ denote the vector of ones of appropriate dimension; and
we use the {\sc Matlab} notation $1{:}n = \{1,\ldots,n\}$.
\index{$1:n = \{1,\ldots,n\}$}
For $M\in \Mn$, we let $\Ssum(M)=\frac 12 (M+M^T) \in \Sn$
denote the {\em sum symmetrization}.  Thus, 
$\Ssum \colon \Mn \rightarrow \Sn$
represents the orthogonal projection onto $\Sn$.%
\index{sum symmetrization of $M$, $\Ssum(M)$}%
\index{$\Ssum(M)$, sum symmetrization of $M$}
The adjoint of $\Ssum$ is given by $\Ssum^*(S) = S$, for all $S \in \Sn$.
For $M\in \Mmn$, we let $\Sprod(M)=MM^T \in \Sn$ 
denote the {\em product symmetrization}.
\index{product symmetrization of $M$, $\Sprod(M)$}
\index{$\Sprod(M)$, product symmetrization of $M$}

For a subset $S$, let
$\cone(S)$ denotes the convex cone generated by the set $S$.
\index{cone generated by $C$, $\cone (C)$}
A subset $F\subseteq K$ is a {\em face of the cone $K$}, denoted
$F \unlhd K$,  if  \index{face, $F \unlhd K$}
\[
\left( x, y \in K, \ \frac 12(x+y) \in  F\right)
                 \implies \left(\cone \{x,y\} \subseteq F\right).
\]
If $F \unlhd K$, but is not equal to $K$, we write $F \lhd K$.
If $\{0\} \neq F \lhd K$, then $F$ is a {\em proper face} of $K$.
\index{proper face} 
For $S \subseteq K$, we let $\face\!(S)$ denote the smallest face of $K$
that contains $S$. 
A face $F \unlhd K$ is an {\em exposed face} if it is the 
intersection of $K$ with a hyperplane.
\index{exposed face}
The cone $K$ is {\em facially exposed} if every face $F\unlhd K$ is exposed.
\index{facially exposed cone}

The cone of positive semidefinite matrices $\Snp$ is facially exposed. 
A face $F\unlhd \Snp$ can be characterized using the range or the nullspace of 
any matrix $S$ in the 
relative interior of the face. If $S \in \relint F$,%
\index{relative interior, $\relint \cdot$}
and $S=U D_S U^T$ is the compact spectral decomposition of $S$
with the diagonal matrix of eigenvalues $D_S \in \Ss_{++}^t$, then 
\beq
\label{eq:facerepr}
F=U \Ss_+^t U^T.
\eeq


We let $\Sh \subseteq \Sn$ denote the space of {\em hollow matrices}; i.e., the set of symmetric
matrices with zero diagonal.%
\index{hollow space, $\Sh$}
%We let $\NN$ denote the {\em nonnegative matrices}.%
%\index{nonnegative matrices, $\NN$}
%A matrix $D=(D_{ij})\in \Sh \cap \NN$ is called
%a  {\em pre-distance matrix} \index{pre-distance matrix, $D$}. 
Let $D \in \Sh$.  If there exist points $p_1,\ldots,p_n \in \R^r$ such that
\beq  \label{eq:dn}
  D_{ij}=\|{p_i-p_j}\|_2^2, \quad i,j=1,\ldots,n,
\eeq
then $D$ is called a {\em Euclidean distance matrix}, denoted
\index{Euclidean distance matrix, \EDM}
\EDM\@. Note that we work with {\em squared distances}. 
The smallest value of $r$ such that \eqref{eq:dn} holds 
 is called the {\em embedding dimension} of $D$. 
%Throughout the paper, we assume that $r$ is given and {\em fixed}.
%\index{embedding dimension ({\em fixed}), $r$}
\index{embedding dimension}
The set of \EDM matrices forms a closed convex cone in $\Sn$, denoted
$\En$.
\index{cone of \EDM, $\Ek$}
If we are given a partial \EDM, $D_p \in \En$,
let $\GG=(N,E,\omega)$ be the corresponding simple graph on the
\index{graph of the \EDM, $\GG=(N,E,\omega)$} 
nodes $N = 1{:}n$ whose edges $E$ correspond to the known 
entries of $D_p$, with $(D_p)_{ij}=\omega_{ij}^2$, for all $(i,j) \in E$.


\begin{defi}
For $Y\in \Sn$ and $\alpha \subseteq 1{:}n$, we let $Y[\alpha]$ 
denote the corresponding 
\index{principal submatrix, $Y[\alpha]$}
{\em principal submatrix} formed from the rows and columns with indices
$\alpha$. If, in addition, $|\alpha |=k$ and
$\bar Y \in \Ss^k$ is given,  then we define
\[
\Sayb:= \left\{ Y \in \Sn : Y[\alpha] = \bar Y \right\}, \quad
\Sapyb:= \left\{ Y \in \Snp : Y[\alpha] = \bar Y \right\}.
\]
\end{defi}
\index{principal submatrix set, $\Sayb$}
\index{principal submatrix positive semidefinite set, $\Sapyb$}
\begin{defi}
Given $D\in \En$ and $\alpha \subseteq 1{:}n$, let 
$B:=\KK^\dagger(D[\alpha])=P_\alpha P_\alpha^T$, where 
$P$ is full column rank. Then the
rows of $P$ are called a representation of the points in the subset
$\alpha$.
\end{defi}
\index{representation of a clique}
\noindent
The subset of matrices
in $\Sn$ with the top left $k\times k$ block fixed is
\beq
\label{eq:setSblock}
\Skyb = \left\{Y \in \Sn : Y= 
\left[
\begin{array}{c|c}
 \bar Y & \cdot \cr
 \hline 
 \cdot & \cdot
\end{array}
\right]
\right\}.
\index{top-left block fixed, $\Skyb$}
\index{$\Skyb$, top-left block fixed}
\index{principal submatrix top-left block, $\Skyb$}
\index{$\Skyb$, principal submatrix top-left block}
\eeq
Similarly, if the principal submatrix $\bar D \in \EE^k$ is given, 
for index set $\alpha \subseteq 1{:}n$, with $|\alpha| = k$,
we define 
\beq
\label{eq:setS}
\Eayb:= \left\{ D \in \En : D[\alpha] = \bar D \right\}.
\eeq
\index{$\Eayb$, principal submatrix of \EDM}
\index{principal submatrix of \EDM, $\Eayb$}
The subset of matrices
in $\En$ with the top left $k\times k$ block fixed is
\beq
\label{eq:setEblock}
\Ekyb = \left\{D \in \En : D = 
\left[
\begin{array}{c|c}
 \bar D & \cdot \cr
 \hline 
 \cdot & \cdot
\end{array}
\right]
\right\}.
\index{top-left block fixed, $\Ekyb$}
\index{principal submatrix top-left block, $\Ekyb$}
\eeq


	
We are given a subset (including the distances between anchors) of the 
(squared) distances from \eqref{eq:dn}. This forms a partial \EDMc $D_p$. 
We intend to solve the {\em \EDM completion problem}, i.e. 
finding the missing entries of $D_p$ to complete the $\EDM$.
\index{\EDM completion problem}
This completion problem can be solved by finding a set of points 
$p_1,\ldots,p_n \in \R^r$ satisfying \eqref{eq:dn}, 
where $r$ is the embedding dimension of the partial \EDMc $D_p$.
\index{embedding dimension ({\em fixed}), $r$}
Equivalently, we solve the graph
realizability problem with dimension $r$, i.e. we finding 
\index{graph realizability}
positions in $\R^r$ for the vertices of a graph such that the 
inter-distances of these positions satisfy the 
given edge lengths of the graph.
%e.g. \cite{biswasliangtohwangye,biswasliangtohye:05,MR2398864,BiswasYe:04,MR2191577}.

Let $Y \in \Mn$ be an $n \times n$ real matrix and $y \in \Rn$ a vector.
\index{$n\times n$ matrices, $\Mn$}
We let $\diag (Y)$ denote the vector in $\Rn$ formed from the 
diagonal of $Y$. Then $\Diag(y)=\diag^*(y)$ denotes the diagonal matrix in 
$\Mn$ with the vector $y$ along its diagonal; 
$\Diag$ is the adjoint of $\diag$.
The operator $\offDiag$ can then be defined as 
$\offDiag (Y) := Y - \Diag ( \diag Y )$;%
\index{offDiag operator of a matrix, $\offDiag M$}%
\index{diagonal of a matrix, $\diag M$}%
\index{diagonal matrix from a vector, $\Diag v$} 
let $\usvec : \Sn \rightarrow \R^{n(n-1)/2}$ where $\usvec (D)$
\index{$\usvec$}
is $\sqrt{2}$ times the vector in $\R^{n(n-1)/2}$ formed from the 
strictly upper triangular part of $D$ taken columnwise;
the adjoint is $\usMat = \usvec^*$ and $\usMat (d) \in \Sh$ takes 
 \index{$\usMat$}
$\frac 1{\sqrt{2}}$ times the vector
$d \in \R^{n(n-1)/2}$ and forms the matrix in $\Sh$.
Note that $\usMat = \usvec^\dagger$, i.e. $\usvec \usMat = I$;
and $\usMat$ is an isometry from $\R^{n(n-1)/2}$ to $\Sh$.

For $P^T=\begin{bmatrix} p_1 & p_2 & \ldots & p_n \end{bmatrix} 
\in \MM^{r\times n}$,
where $p_j$, $j=1,\ldots,n$, are the points used in \eqref{eq:dn},
let $Y:=PP^T$, and let $D$ be the corresponding \EDM satisfying \eqref{eq:dn}.
\index{matrix of points in space, $P$}
The following linear operators $\KK$ and $\DD_e$ provide the connection
\index{\EDM linear operator, $\KK$}\index{vector linear operator, $\DD_v$}
between \SDP and \EDM.
\beq \begin{array}{rcl} \label{KK}
\KK(Y)& := &  \DD_e(Y)-2Y \\
      & := &  \diag (Y)\,e^T + e\,\diag (Y)^T - 2Y \\
      &  = &  \left( p_{i}^{T} p_{i} + p_{j}^{T} p_{j} 
                     - 2 p_{i}^{T} p_{j} \right)_{i,j = 1}^{n} \\
      &  = &  \left( \| p_{i} - p_{j} \|_{2}^{2} \right)_{i,j = 1}^{n} \\
      &  = &  D.
\end{array} \eeq
By abuse of notation, we also allow $\DD_v$ to act on a vector;
that is, $\DD_v(y) := yv^T+vy^T$.
Note that 
\beq \begin{array}{ccl} \label{KK2}
\KK(Y) =   2\Ssum (\diag(Y) e^T )-2Y   =   2(\Ssum (\cdot e^T) \diag )(Y) -2Y.
\end{array} \eeq
Therefore, the adjoint of $\KK$ acting on $D \in \Sn$ is
\beq \begin{array}{rcl} \label{KKs}
\KK^*(D) 
&=& 
  2(\Ssum (\cdot e^T) \diag )^*(D) -2D
\\&=&
  2\diag^* (\cdot e^T)^* (\Ssum )^*(D) -2D
\\&=&
  2\Diag (\cdot e) (D) -2D
\\&=&
2(\Diag(De)-D).
\end{array} \eeq
Moreover,
\beq \begin{array}{rcl} \label{KKsKK}
\KK^*\KK(Y) 
&=& 
2(\Diag(\KK(Y)e)-\KK(Y))
\\&=&
-2(\KK(Y)-\Diag(v)),
\end{array} \eeq
where $v=\KK(Y)e$, i.e. we write it this way to emphasize that we
\index{$\KK^*\KK$}
simply subtract the row sums from the diagonal of $\KK(Y)$.

The linear operator $\KK$ is one-one and onto between the 
{\em centered} and {\em hollow} subspaces of $\Sn$, which are defined as
\beq \begin{array}{rcll}
\Sc &:=&  \{ Y \in \Sn :  Ye = 0 \} & \mbox{(zero row sums)}, \\
\Sh& := & \{ D \in \Sn :  \diag(D) = 0 \} &  = \RR(\offDiag).
\end{array}
\eeq
%\index{\EDM linear operator, $\KK$}\index{vector linear operator, $\DD_v$} \index{$\DD_v$, vector linear operator}\index{centered subspace, $\Sc$}\index{hollow subspace, $\Sh$}\index{$\KK$}
Let $J:=J_k:=I-\frac 1k ee^T$ denote the orthogonal projection onto the
\index{$J$, orthogonal projection onto $\{e\}^\perp$}
subspace $\{e\}^\perp$, and define the linear operator
$\TT (D) := -\frac 12 J \offDiag (D) J$. 
\index{$\TT = \KK^\dagger$}
(We use $J$ when the dimension is clear.)
Then we have the following relationships.
\begin{prop}(\cite{homwolkA:04})
\label{prop:KTonetoone}
The linear operator $\TT$ is the generalized inverse of 
the linear operator $\KK$; that is, $\KK^\dagger =\TT$.  
Moreover:
\beq \label{eq:RN1}
\begin{array}{rcl}
\RR (\KK) = \Sh; & \quad  &\NN(\KK) = \RR(\DD_e);\\
\RR(\KK^*)=\RR (\TT) = \Sc; & \quad & \NN(\KK^*)= \NN(\TT) = \RR(\Diag);
\end{array}
\eeq
\beq \label{eq:RN3}
\begin{array}{rcl}
\Sn= \Sh \oplus  \RR(\Diag) = \Sc \oplus \RR(\DD_e).
\end{array}
\eeq
%\end{proof}
\end{prop}

\begin{thm}(\cite{homwolkA:04})
\label{thm:KTonetoone}
The linear operators $\TT$ and $\KK$ are one-to-one and onto mappings between the cone $\En \subset \Sh$ and the face of the semidefinite cone $\Snp \! \cap \Sc$.  That is, 
\[ \TT(\En) = \Snp \! \cap \Sc 
   \quad \mbox{and} \quad 
   \KK(\Snp \! \cap \Sc) = \En. \]
%\end{proof}
\end{thm}




Let $D_p \in \Ss^n$ be a \emph{partial} \EDM with embedding dimension $r$
and let $H \in \Ss^n$ be the $0$--$1$ matrix corresponding to the known entries of $D_p$.
One can use the substitution $D=\KK(Y)$, where $Y\in \Snp \cap \Ss_C$,
in the \EDM completion problem 
\[
	\begin{array}{cc}
	\mbox{Find} & D \in \EE^n \\
	\mbox{s.t.} & H \circ D = D_p
	\end{array}
\]
to obtain the \SDP relaxation
\[
	\begin{array}{cc}
	\mbox{Find} & Y \in \Ss^n_+ \cap \Ss_C \\
	\mbox{s.t.} & H \circ \KK(Y) = D_p
	\end{array}.
\]
This relaxation does not restrict the rank of $Y$ and may
yield a solution with embedding dimension that is too large, 
if $\rank (Y) > r$.
A clique $\gamma \subseteq 1{:}n$
\index{clique}
in the graph $\GG$ corresponds to a subset of sensors for  
which the distances $\omega_{ij}=\|p_i-p_j\|_2$ are known, for all
$i,j \in \gamma$; equivalently, the clique corresponds
to the principal submatrix $D_p[\gamma]$ of the
partial \EDM matrix $D_p$, where all the elements of $D_p[\gamma]$ are known.
Moreover, solving \SDP problems with rank restrictions is NP-HARD.
However, we work on faces of $\Snp$ described by $U\Ss_+^tU^T$, with $t \leq n$.
In order to find the face with the smallest dimension $t$, we must have the correct knowledge of the matrix $U$. 
In this paper, we obtain information on $U$ using the cliques in the 
graph of the partial \EDM.

Suppose that 
\beq
\label{eq:defV}
V^Te=0 \mbox{ and } \begin{bmatrix} e & V\end{bmatrix} 
\mbox{ is nonsingular}.
\eeq
We now introduce the composite operators
\beq \begin{array}{rcl} \label{KV} 
\KK_V(X)& := &  \KK( V X V^T),
\end{array} \eeq 
and
\beq \begin{array}{rcl} \label{TV} 
\TT_V(D)& := &  V^\dagger\TT( D)(V^T)^\dagger =
               - \frac 12 V^\dagger J\offDiag(D) J(V^T)^\dagger.
\end{array} \eeq 

\begin{lem}[\cite{AlKaWo:97,homwolkA:04}] \label{KVTV} 
Suppose that $V$ satisfies the definition in (\ref{eq:defV}). Then
\begin{eqnarray*} 
\KK_V ( \snn) =\Sh, \\
\TT_V ( \Sh) =\snn, 
\end{eqnarray*} 
and $\KK_V =\TT_V^\dagger$.
\end{lem}

From (\ref{eq:defV})  and \eqref{KK} we get that 
\beq \begin{array}{rcl} \label{KsVTsV} 
\KK^*_V(D)& = &  V^T \KK^*(D) V  \\  
\end{array} \eeq 
is the adjoint operator of $\KK_V$.
The following corollary summarizes useful
relationships between $\E$, the cone of Euclidean distance matrices of
order $n$, and $\p$, the cone of positive semidefinite matrices of
order $n-1$.

\begin{cor}[\cite{AlKaWo:97,homwolkA:04}] \label{lem:co} 
Suppose that $V$ is defined as in (\ref{eq:defV}).  Then:
\begin{eqnarray*} 
\KK_V(\p)  & = &  \E , \\
\TT_V(\E) & = &\p.
\end{eqnarray*} 
\end{cor}


%===================================================
\section{Facial Geometry}
%===================================================

Let $\bar D$ be an $n \times n$ \emph{partial} Euclidean distance matrix with associated graph $G = (V,E)$.  In particular, we have that 
\[
	V = 1{:}n 
	\quad \text{and} \quad
	E = \set{ij : \text{$\bar D_{ij}$ is specified}}.
\]
Let $\FF$ be the corresponding set of feasible centred Gram matrices $Y$; that is,
\[
	\FF := \set{ Y \in \sS^n_+ \cap \sS^n_C : \KK(Y)_{ij} = \bar D_{ij}, \forall ij \in E}.
\]

Let $\alpha \subseteq 1{:}n$ be a subset of the nodes of the graph $G$.  We define the feasible set of centred Gram matrices $Y$ that agree with the distances on the subgraph induced by the nodes in $\alpha$ as
\[
	\FF_\alpha := \set{ Y \in \sS^n_+ \cap \sS^n_C : \KK(Y)_{ij} =
\bar D_{ij}, \text{$\forall ij \in E \cap (\alpha \times \alpha)$}}.
\]
Note that $\FF \subseteq \FF_\alpha$.

Let $\alpha_1,\alpha_2 \subseteq 1{:}n$ and let
\[
	\face(\FF_{\alpha_i}) =  U_i \sS^{n-|\alpha_i|+d_i+1}_+  U_i^T,
\]
for $i=1,2$.


%===================================================
\subsection{Mixed dimension face intersection lemma}
%===================================================
Suppose that $0\neq f_i \lhd \Snp, i=1,2$ are proper faces. We would
like to study the intersection of these two faces. 
Let 
\[
d_i = \dim f_i, \ 
G_i \in \relint f_i, \ 
0 \neq v_i \in \NN(G_i), \ 
Q_i^Te = v_i, \ 
Q_i \in \OO_{n}, \ 
i=1,2.
\]
\index{orthogonal matrices, $\OO$}
\index{$\OO$, orthogonal matrices}
By using the rotations $f_i \leftarrow Qf_iQ^T$, we can ensure that 
\[
e \in \NN(G_i), \ 
G_i \in \Sc, \ 
f_i \subseteq \Sc, \ 
i=1,2.
\]
Therefore, if needed, we can assume that the $G_i$ are 
appropriate centered Gram matrices. 
In addition, we can factor a Gram matrix ????$G \in f_1 \cap f_2$???? as
\[
G= U 
\begin{bmatrix} G_{11} & G_{12} \cr G_{12}^T & G_{22} \end{bmatrix}
 U^T, \quad 
	 U= 
	\kbordermatrix{
		& d_1 + 1 & |\alpha_2 \setminus \alpha_1| \\
		|\alpha_1 \setminus \alpha_2| & U^{11} & 0 \\ 
		|\alpha_1\cap\alpha_2| & U^{12}  & 0 \\ 
		|\alpha_2 \setminus \alpha_1| & 0 & I_{22} 
	} = 
	\kbordermatrix{
		& d_1+1 & |\alpha_2 \setminus \alpha_1| \\
		|\alpha_1| & \bar U & 0 \\
		|\alpha_2 \setminus \alpha_1| & 0 & I_{22}
	},
\]
?????check out the details???? i.e. any two faces can be factored this
way to get a special structure for the intersection?????

????connection to Kronecker canonical form???? e.g.,~\cite{MR943994}
and so we can do this for an arbitrary pair of faces???? how????


We now consider the special case where each $G_i$ has a 
decomposition as above with a $U$ that has a identity matrix block.

\begin{lemma}  \label{lem:mixed_dim_face_intersection}
For $i=1,2$, let $d_i$ be a nonnegative integer, $\alpha_i \subseteq 1{:}n$, and $\bar U_i \in \R^{|\alpha_i| \times (d_i+1)}$.
Let
\begin{align}
	U_1 & := 
	\kbordermatrix{
		& d_1 + 1 & |\alpha_2 \setminus \alpha_1| \\
		|\alpha_1 \setminus \alpha_2| & U_1^{11} & 0 \\ 
		|\alpha_1\cap\alpha_2| & U_1^{12}  & 0 \\ 
		|\alpha_2 \setminus \alpha_1| & 0 & I_{22} 
	} := 
	\kbordermatrix{
		& d_1+1 & |\alpha_2 \setminus \alpha_1| \\
		|\alpha_1| & \bar U_1 & 0 \\
		|\alpha_2 \setminus \alpha_1| & 0 & I_{22}
	}, \\
	U_2 & := 
	\kbordermatrix{
		& |\alpha_1 \setminus \alpha_2| & d_2 + 1\\
		|\alpha_1 \setminus \alpha_2| & I_{11} & 0 \\
		|\alpha_1\cap\alpha_2| & 0 & U_2^{12} \\
		|\alpha_2 \setminus \alpha_1| & 0 & U_2^{22}
	} :=
	\kbordermatrix{
		& |\alpha_1 \setminus \alpha_2| & d_2+1 \\
 		|\alpha_1 \setminus \alpha_2| & I_{11} & 0 \\
		|\alpha_2| & 0 & \bar U_2
	}.
\end{align}
For $i=1,2$, let $k_i := \dim\rbr{ \nul\rbr{U_i^{12}} }$ and $Z_i \in \R^{(d_i+1) \times k_i}$ satisfy
\[
	\range(Z_i) = \nul(U_i^{12}).
\]

\begin{itemize}
\item
If $\range(U_2^{12}) \subseteq \range(U_1^{12})$ and
\beq
	\label{eq:Uexp1}
	U := 
	\kbordermatrix{
		& d_2 + 1 & k_1 \\
		|\alpha_1 \setminus \alpha_2| & U_1^{11}\rbr{U_1^{12}}^\dag
U_2^{12} & U_1^{11}Z_1 \\
		|\alpha_2| & \bar U_2 & 0
	},
\eeq
then $\range(U) = \range(U_1) \cap \range(U_2)$.

\item
If $\range(U_1^{12}) \subseteq \range(U_2^{12})$ and
\beq
	\label{eq:Uexp2}
	U := 
	\kbordermatrix{
		& d_1 + 1 & k_2 \\
		|\alpha_1| & \bar U_1 & 0 \\
		|\alpha_2 \setminus \alpha_1| & U_2^{22}\rbr{U_2^{12}}^\dag
U_1^{12} & U_2^{22}Z_2 
	},
\eeq
then $\range(U) = \range(U_1) \cap \range(U_2)$.
\end{itemize}
If $\range(U_1^{12}) = \range(U_2^{12})$, then
\beq \label{eqn:d1k2d2k1}
	d_1 + k_2 = d_2 + k_1.
\eeq
\end{lemma}
%
\begin{proof}
Let $U$ be given by equation~\eqref{eq:Uexp1}.
Suppose that $x \in \range(U_1) \cap \range(U_2)$.  Then
\beq \label{eq:xinRU1capRU2}
	x = 
	\bbm
		U_1^{11} v_1 \\
		U_1^{12} v_1 \\
		v_2
	\ebm
	=
	\bbm
		w_1 \\
		U_2^{12} w_2 \\
		U_2^{22} w_2
	\ebm,
\eeq
for some $v = [v_1; v_2] \in \R^{d_1 + 1 + |\alpha_2 \setminus \alpha_1|}$ and $w = [w_1; w_2] \in \R^{|\alpha_1 \setminus \alpha_2| + d_2 + 1}$.  Therefore, we have that
\[
	v_1 \in \rbr{U_1^{12}}^\dag U_2^{12} w_2 + \nul\rbr{U_1^{12}},
\]
so $v_1 = \rbr{U_1^{12}}^\dag U_2^{12} w_2 + Z_1 z_1$, for some $z_1 \in \R^{k_1}$.
Thus,
\beq \label{eq:xinRU}
	x = 
	\bbm
		U_1^{11}\rbr{U_1^{12}}^\dag U_2^{12} w_2 + U_1^{11}Z_1 z_1 \\
		U_2^{12} w_2 \\
		U_2^{22} w_2
	\ebm
	=
	U \bbm w_2 \\ z_1 \ebm,
\eeq
so $x \in \range(U)$.
Now suppose that $x \in \range(U)$.  Then there exist $w_2 \in \R^{d_2+1}$ and $z_1 \in \R^{k_1}$ satisfying equation~\eqref{eq:xinRU}.  Let
\begin{align*}
	v_1 & := \rbr{U_1^{12}}^\dag U_2^{12} w_2 + Z_1 z_1, \\
	v_2 & := U_2^{22} w_2, \\
	w_1 & := U_1^{11}v_1.
\end{align*}
Since $\range\rbr{U_2^{12}} \subseteq \range\rbr{U_1^{12}}$ and
$\range{Z_1} = \nul\rbr{U_1^{12}}$, we have 
\[
	U_1^{12} v_1 = U_1^{12}\rbr{U_1^{12}}^\dag U_2^{12} w_2 +
U_1^{12}Z_1 z_1 = U_2^{12} w_2.
\]
Therefore, equation~\eqref{eq:xinRU1capRU2} holds, so $x \in \range(U_1) \cap \range(U_2)$.
Thus,
\[
	\range(U) = \range(U_1) \cap \range(U_2).
\]
A similar argument gives the same result when $U$ is given by equation~\eqref{eq:Uexp2}.

Finally, suppose $\range(U_1^{12}) = \range(U_2^{12})$.  Then
\[
	d_1 + 1 = \dim(\nul(U_1^{12})) + \rank(U_1^{12}) = k_1 +
\rank(U_1^{12})
\]
and
\[
	d_2 + 1 = \dim(\nul(U_2^{12})) + \rank(U_2^{12}) = k_2 +
\rank(U_2^{12})
\]
implies that $d_2 + 1 - k_2 = d_1 + 1 - k_1$.  Therefore, $d_2 + k_1 = d_1 + k_2$.
\end{proof}





\begin{lemma}  \label{lem:mixed_dim_face_intersection-two}
For $i=1,2$, let $d_i$ be a nonnegative integer, $\alpha_i \subseteq 1{:}n$, and $\bar U_i \in \R^{|\alpha_i| \times (d_i+1)}$.
Let
\begin{align}
	U_1 & := 
	\kbordermatrix{
		& d_1 + 1 & |\alpha_2 \setminus \alpha_1| \\
		|\alpha_1 \setminus \alpha_2| & U_1^{11} & 0 \\ 
		|\alpha_1\cap\alpha_2| & U_1^{12}  & 0 \\ 
		|\alpha_2 \setminus \alpha_1| & 0 & I 
	} := 
	\kbordermatrix{
		& d_1+1 & |\alpha_2 \setminus \alpha_1| \\
		|\alpha_1| & \bar U_1 & 0 \\
		|\alpha_2 \setminus \alpha_1| & 0 & I
	}, \\
	U_2 & := 
	\kbordermatrix{
		& |\alpha_1 \setminus \alpha_2| & d_2 + 1\\
		|\alpha_1 \setminus \alpha_2| & I & 0 \\
		|\alpha_1\cap\alpha_2| & 0 & U_2^{12} \\
		|\alpha_2 \setminus \alpha_1| & 0 & U_2^{22}
	} :=
	\kbordermatrix{
		& |\alpha_1 \setminus \alpha_2| & d_2+1 \\
 		|\alpha_1 \setminus \alpha_2| & I & 0 \\
		|\alpha_2| & 0 & \bar U_2
	}.
\end{align}
Let $\FF_i,i=1,2$ denote the corresponding two faces. Then there
exists matrices $Q_i,i=1,2$, each with linearly independent
columns such that 
\[
\cap_{i=1,2} \range(U_i) = \range(U_j Q_j), j=1,2. 
\]
Equivalently, the intersection of the two faces is represented
by either choice of $(U_j Q_j), j=1,2$. 
\end{lemma}
%
\begin{proof}
The intersection of the two faces is represented by the matrix
$X$ in the relative interior where the range of $X$ is given by
the intersection of the two ranges, i.e.
\[
X= U_i Q_iD_i Q_i^TU_i^T, D_i \succ 0, i=1,2,
\]
for appropriate matrices $Q_i$.
This means we can represent the face with
either representation of the appropriate range space.
\end{proof}









%Let $\alpha_1, \alpha_2 \subseteq 1{:}n$ be two subsets with faces respectively given by
%\begin{align}
%	U_1 & = 
%	\kbordermatrix{
%		& d_1 + 1 & |\alpha_2 \setminus \alpha_1| \\
%		|\alpha_1 \setminus \alpha_2| & U_1^{11} & 0 \\ 
%		|\alpha_1\cap\alpha_2| & U_1^{12}  & 0 \\ 
%		|\alpha_2 \setminus \alpha_1| & 0 & I 
%	} = 
%	\kbordermatrix{
%		& d_1+1 & |\alpha_2 \setminus \alpha_1| \\
%		|\alpha_1| & \bar U_1 & 0 \\
%		|\alpha_2 \setminus \alpha_1| & 0 & I
%	}, \\
%	U_2 & = 
%	\kbordermatrix{
%		& |\alpha_1 \setminus \alpha_2| & d_2 + 1\\
%		|\alpha_1 \setminus \alpha_2| & I & 0 \\
%		|\alpha_1\cap\alpha_2| & 0 & U_2^{12} \\
%		|\alpha_2 \setminus \alpha_1| & 0 & U_2^{22}
%	} =
%	\kbordermatrix{
%		& |\alpha_1 \setminus \alpha_2| & d_2+1 \\
% 		|\alpha_1 \setminus \alpha_2| & I & 0 \\
%		|\alpha_2| & 0 & \bar U_2
%	}.
%\end{align}
%That is, $\alpha_1$ has an \emph{intrinsic dimension} of $d_1$ and $\alpha_2$ has an \emph{intrinsic dimension} of $d_2$.  Thus, we are allowing the possibility of having \emph{mixed dimensions} while computing the \emph{face intersection} given by a full column rank matrix $U$ that satisfies
%\begin{equation} \label{eq:U}
%	\range(U) = \range(U_1) \cap \range(U_2).
%\end{equation}

%Let $k_i$ be the \emph{relative rigidity level} of the intersection of subset $\alpha_i$, for $i=1,2$.  Then
%\[
%	k_i := \max\set{0,d_i + 1 - |\alpha_1\cap\alpha_2|}, \quad \text{for $i=1,2$}.
%\]
%For example, a rigidity level of zero means that the intersection is \emph{rigid}.  
%We assume that $\range(U_1^{12}) = \range(U_2^{12})$ and that 
%\[
%	\dim\rbr{ \nul\rbr{U_i^{12}} } = k_i, \quad \text{for $i=1,2$}.
%\]

%Then we can define $U$ that satisfies equation~\eqref{eq:U} by either of the two following expressions:
%\begin{align}
%	\label{eq:Uexp1}
%	U & := 
%	\kbordermatrix{
%		& d_2 + 1 & k_1 \\
%		|\alpha_1 \setminus \alpha_2| & U_1^{11}\rbr{U_1^{12}}^\dag
%		U_2^{12} & U_1^{11}Z_1 \\
%		|\alpha_2| & \bar U_2 & 0
%	}, \\
%	\label{eq:Uexp2}
%	U & := 
%	\kbordermatrix{
%		& d_1 + 1 & k_2 \\
%		|\alpha_1| & \bar U_1 & 0 \\
%		|\alpha_2 \setminus \alpha_1| & U_2^{22}\rbr{U_2^{12}}^\dag
%		U_1^{12} & U_2^{22}Z_2 
%	}.
%\end{align}

%By Lemma~\ref{lem:mixed_dim_face_intersection}, the \emph{intrinsic dimension} of $\alpha_1\cup\alpha_2$ is
%\[
%	d_1 + k_2 = d_2 + k_1.
%\]






%---------------------------------------------------
\subsubsection{Determining $k_1$ and $k_2$}
%---------------------------------------------------

For a Euclidean distance matrix $\hat D \in \sS^n$, we define the \emph{embedding dimension} of $\hat D$ as
\[
	\embdim(\hat D) := \min\set{ d : \exists p_1,\ldots,p_n \in \R^d \ni \norm{p_i-p_j}_2^2 = \hat D_{ij}, \forall ij}.
\]
For a \emph{partial} Euclidean distance matrix $D$, we define the \emph{maximum embedding dimension} as
\[
	\text{maxdim}(D) := \max\set{ \rank(Y) : Y \in \sS^n_+ \cap \sS^n_C \ni \KK(Y)_{ij} = D_{ij}, \forall ij \in E },
\]
where $E := \set{ij : \text{$D_{ij}$ is ``specified''} }$.

Let $\alpha \subseteq 1{:}n$ and 
\[
	\face \set{ Y \in \sS^n_+ : H[\alpha] \circ \KK(Y[\alpha]) = H[\alpha] \circ D[\alpha]}
	:=
	U \sS^{n - |\alpha| + d + 1} U^T,
\]
where 
\begin{align}
	U\sbr{\alpha,1{:}(d+1)} 
	& := \bar U \in \R^{|\alpha| \times (d+1)}, \\
	U\sbr{\bar\alpha,(d+2){:}(n - |\alpha| + d + 1)} 
	& := I \in \sS^{n-|\alpha|}.
\end{align}

\[
	U_\beta := \bar U\sbr{\beta,:}
\]



%===================================================
\subsection{Alternate/Direct Approach Using SVD}
%===================================================

We start off with the equation for the unknown vectors $y$.
\[
U_1x= U_2y= U_3z.
\]
We let
\[
U_i=u_i s_i v_i^T, i=1,2,3
\]
denote the SVD for the matrices. Therefore, the system for $y$ becomes:
\[
u_1 s_1 v_1 x= u_2 s_2 v_2 y= u_3 s_3 v_3 z;
\]
\[
s_1 v_1 x= u_1^T u_2 s_2 v_2 y= u_1^T u_3 s_3 v_3 z.
\]
In this example, all the cliques are the same size, $4$. The embedding
dimension is $2$. Therefore we lose $4-2-1=1$ rank in the clique
reduction for each clique, i.e. the dimension of each face is $7-1=6$.
Therefore, each $U_i$ is $7\times 6$ and is full column rank.
In fact, by construction the $U_i$ have orthogonal columns and so
singular values are all $1$ except for one which is $0$.
Therefore, $s_1v_1$ has a zero bottom row.
We get:
\[
x= (s_1v_1 )^\dagger u_1^T u_2 s_2 v_2 y= (s_1v_1 )^\dagger u_1^T u_3 s_3 v_3 z,
\quad  b_2 y=0, b_3z=0,
\]
where $b_i$ denotes the last row of $u_1^T u_i s_i v_i, i=2,3$.
We can substitute using bases for the orthogonal complements of the
$b_i$
\[
x= (s_1v_1 )^\dagger u_1^T u_2 s_2 v_2 P_2\bar y= 
            (s_1v_1 )^\dagger u_1^T u_3 s_3 v_3 P_3 \bar z.
\]

We can repeat this step to find $\bar y$ in terms of $\bar z$, e.g. by
using the SVD
$(s_1v_1 )^\dagger u_1^T u_2 s_2 v_2 P_2 = \bar u \bar s \bar v$.
Therefore, we get
\[
\bar u \bar s \bar v \bar y= 
            (s_1v_1 )^\dagger u_1^T u_3 s_3 v_3 P_3 \bar z.
\]
Again, $\bar s$ has a zero bottom row. This gives us a new constraint on
$\bar z$. We can substitute using a basis for the range of the
orthogonal complement of the bottom row of
$\bar u^T(s_1v_1 )^\dagger u_1^T u_3 s_3 v_3 P_3$, call it $\bar b_3$ and  the
basis $\bar P_3$.
This yields the equation
\[
\bar y = 
(\bar s \bar v)^\dagger \bar u^T(s_1v_1 )^\dagger 
      u_1^T u_3 s_3 v_3 P_3 \bar P_3 \hat z.
\]




%----------------------------------------------------

%----------------------------------------------------
\section{Clique Reduction}
\label{sect:cliqred}
We now present several techniques for reducing an \EDM completion
problem using cliques in the graph. This extends the results presented in
\cite{DiKrQiWo:06,DiKrQiWo:08,kriswolk:09}. 
In particular, we modify the approach in \cite{kriswolk:09} for combining
two cliques.

%\begin{noteH}
%Somewhere, we should include the completion and facial results from
%Johnson-Tarazaga and also Johnson-Mihai\dots??
%\end{noteH}

The following two technical lemmas are given in \cite{kriswolk:09}.
\begin{lem}[\cite{kriswolk:09}]
\label{lem:psdDe}
Let $B \in \Sn$, $Bv=0$, $v \neq 0$, $y \in \Rn$ and $\bar Y := B+\DD_v(y)$.
If $\bar Y \succeq 0$, then 
\[
        y \in \RR(B) + \cone \{v\}.
\]
\qed
\end{lem}

\begin{lem}[\cite{kriswolk:09}]
\label{lem:rangeYbar}
Let $Y \in \Skp$ and  $\bar U \in \MM^{k\times t}$ with $\bar U$ 
having full column rank.  
If $\face \{\bar Y\} \unlhd 
\mbox{ (resp.$=$) }
\bar U \Stp \bar U^{T}$, then
\beq
\label{eq:facereprYsub}
\face \Skpyb \unlhd 
\mbox{ (resp.$=$) }
\begin{bmatrix} \bar U & 0 \cr 0 & I_{n-k} \end{bmatrix} 
\Ss^{n-k+t}_+
\begin{bmatrix} \bar U & 0 \cr 0 & I_{n-k} \end{bmatrix}^T.
\eeq
\end{lem}
\begin{proof}
The result in \cite{kriswolk:09} assumes that $\bar U ^T \bar
U = I$. The extension to $\bar U$ having full column rank
follows from taking the compact QR-factorization $U=QR$,
where $Q^TQ=I$ and $R$ is nonsingular.
\end{proof}

We can now find an expression for the face defined by a given clique in
the graph. Without loss of generality, we can assume that
$\alpha = 1{:}k \subseteq 1{:}n$, $|\alpha| = k$.
\begin{thm}[\cite{kriswolk:09}]
\label{thm:onecliquered}
Let $D \in \En$, with embedding dimension $r$. 
Let $\alpha := 1{:}k$, 
$\bar D := D[\alpha] \in \Ek$ with embedding dimension $t$,
and $B := \KK^\dagger(\bar D) = \bar U_B S \bar U_B^T$,
where $\bar U_B \in \MM^{k\times t}$, $\bar U_B$ having full
column rank, and $S \in \Ss^t_{++}$. 
Furthermore, let $U_B := 
\begin{bmatrix} 
\bar U_B & \frac 1{\sqrt{k}} e 
\end{bmatrix}
\in \MM^{k\times (t+1)}$,  
$U := \begin{bmatrix} U_B & 0 \cr 0 & I_{n-k}  \end{bmatrix}$,
and let $\begin{bmatrix} V & \frac{U^Te}{\|U^Te\|}  \end{bmatrix}
\in \MM^{n-k+t+1}$ be orthogonal.
Then
\beq
\label{eq:UBY}
\face \KK^\dagger\left(\Ekyb\right) 
=\left( U \Ss_+^{n-k+t+1} U^T \right) \cap \Sc
= (U V) \Ss_+^{n-k+t} (U V)^T.
\eeq
\end{thm}
\begin{proof}
As in Lemma \ref{lem:rangeYbar},
the result in \cite{kriswolk:09} assumes that $\bar U ^T \bar
U = I$. The extension follows as in the proof of the Lemma.
\end{proof}
In Theorem \ref{thm:onecliquered} we can make various choices for $S$ and
thus change the choice of $\bar U_B$.
An interesting choice for $\bar U_B$ allows for
a representation for the points in the clique.
\begin{cor}
\label{cor:onecliquered}
Let $D, r, \alpha, \bar D, t$ be defined as in Theorem
\ref{thm:onecliquered}.
Let $B := \KK^\dagger(\bar D) = P_B P_B^T$,
where $P_B \in \MM^{k\times t}$ is full column rank.
Furthermore, let  $Q$ be orthogonal, $U_B := 
\begin{bmatrix} 
P_BQ & \frac 1{\sqrt{k}} e 
\end{bmatrix}
\in \MM^{k\times (t+1)}$,  
$U := \begin{bmatrix} U_B & 0 \cr 0 & I_{n-k}  \end{bmatrix}$,
and let $\begin{bmatrix} V & \frac{U^Te}{\|U^Te\|}  \end{bmatrix}
\in \MM^{n-k+t+1}$ be orthogonal.
Then \eqref{eq:UBY} holds, and
the rows of $P_BQ$ provide a relative representation of the points
in the clique $\alpha$, i.e. 
\[
\KK((P_BQ)(P_BQ)^T)=D[\alpha].
\]
\end{cor}
\begin{proof}
We just need to use $S=I_t=QQ^T$ in the expression for $B$ in the hypothesis
of Theorem \ref{thm:onecliquered}; e.g. we could use the compact
spectral decomposition $B=UDU^T$ and set $P_B=UD^{1/2}$. Then
$\KK((P_BQ)(P_BQ)^T)= \KK(P_B(P_B^T)=\KK(B) =D[\alpha]$.
\end{proof}

The following result provides expressions for the face for the union of
two cliques.
\begin{thm}[\cite{kriswolk:09}]
\label{thm:interclique}
Let $D \in \En$ with embedding dimension $r$ and
define the sets of positive integers
\beq
\label{eq:ordcliques}
\begin{array}{c}
 \alpha_1  :=  1{:}(\bar k_1 + \bar k_2), \quad
  \alpha_2  :=  (\bar k_1 + 1){:}(\bar k_1 + \bar k_2 + \bar k_3)
   \subseteq  1{:}n,  \\
 k_1  :=  |\alpha_1| = \bar k_1 + \bar k_2, \quad
  k_2  :=  |\alpha_2| = \bar k_2 + \bar k_3, \\
k := \bar k_1 + \bar k_2 + \bar k_3.
\end{array}
\eeq
For $i=1,2$, let $\bar D_i := D[\alpha_i] \in \EE^{k_i}$
with embedding dimension $t_i$,
and $B_i := \KK^\dagger(\bar D_i) = \bar U_i S_i \bar U_i^T$,
where $\bar U_i \in \MM^{k_i\times t_i}$, $\bar U_i^T \bar U_i = I_{t_i}$,
$S_i \in \Ss^{t_i}_{++}$, and
$U_i := 
\begin{bmatrix} 
\bar U_i & \frac 1{\sqrt{k_i}} e 
\end{bmatrix}
\in \MM^{k_i \times (t_i+1)}$.
Let $t$ and $\bar U \in \MM^{k \times (t+1)}$ satisfy
\beq
\label{eq:rangeU}
\RR(\bar U) = 
\RR \left(
\begin{bmatrix} U_1 & 0 \cr 0 & I_{\bar k_3}  \end{bmatrix}
\right)
\cap
\RR \left(
\begin{bmatrix} I_{\bar k_1} & 0 \cr 0 & U_2 \end{bmatrix}
\right), \mbox{  with  }  \bar U^T \bar U = I_{t+1}.
\eeq
Let
$U := \begin{bmatrix} \bar U & 0 \cr 0 & I_{n-k} \end{bmatrix}
  \in \MM^{n \times (n-k+t+1)}$
and 
$\begin{bmatrix} V & \frac{U^Te}{\|U^Te\|} \end{bmatrix}
\in \MM^{n-k+t+1}$ be orthogonal. Then
\beq
\label{eq:UBY2}
\bigcap_{i=1}^2
\face \KK^\dagger\left(\En(\alpha_i,\bar D_i)\right)
=\left( U \Ss_+^{n-k+t+1} U^T \right) \cap \Sc
= (U V) \Ss_+^{n-k+t} (U V)^T.
\eeq
\qed
\end{thm}
%----------------------------------------------------
\subsection{Rigidity and Primal-Dual Approach}
%----------------------------------------------------
\index{Rigidity}
First, we summarize the definitions and characterizations of global and
universal rigidity in terms of the matrix
$\Omega$ and its co-rank.

We then present our algorithm of facial reduction for the
Gram matrix as simultaneously
building the conjugate face for the rigidity matrix.

%----------------------------------------------------
\subsection{Nonsingular Reduction with Intersection Embedding Dimension $r$}
%----------------------------------------------------
\index{nonsingular reduction}

We need the following technical result on the
intersection of two structured subspaces.
\begin{lem}[\cite{kriswolk:09}]
\label{lem:twocliquered}
Let
\[
U_1:=\begin{bmatrix} 
  U^{\prime}_1  \cr 
  U^{\prime\prime}_1 
\end{bmatrix}, \quad
U_2:=\begin{bmatrix} 
   U^{\prime\prime}_2 \cr 
   U^{\prime}_2
\end{bmatrix}, \quad
\hat U_1:=\begin{bmatrix} 
  U_1^{11} & 0 \cr 
  U_1^{12} & 0 \cr 
  0 & I
\end{bmatrix}, \quad
\hat U_2:=\begin{bmatrix} 
  I & 0 \cr 
  0 & U_2^{12} \cr
  0 & U_2^{22}
\end{bmatrix}
\]
be appropriately blocked with $U_1^{12}, U_2^{12} \in \MM^{k \times l}$
full column rank and $\RR(U_1^{12}) = \RR(U_2^{12})$.
Furthermore, let
\beq
\label{eq:U1U2}
\bar U_1 := 
\begin{bmatrix} 
  U_1^\prime\cr
  U_1^{\prime\prime}\cr
  U_2^{\prime} (U_2^{\prime\prime})^\dagger U_1^{\prime\prime}
\end{bmatrix}, \quad
\bar U_2 := 
\begin{bmatrix} 
  U_1^{\prime} (U_1^{\prime\prime})^\dagger U_2^{\prime\prime}\cr
  U_2^{\prime\prime}\cr
  U_2^\prime
\end{bmatrix}.
\eeq
Then $\bar U_1$ and $\bar U_2$ are full column rank and satisfy
\[
%\label{eq:NrangeUs}
\RR (\hat U_1) \cap  \RR (\hat U_2) =   
\RR\left(\bar U_1 \right) = \RR\left(\bar U_2 \right).
\]
Moreover, if $e_l \in \R^l$ is the $l^\mathrm{th}$ standard unit vector, and
$U_ie_{l} = \alpha_i e$, for some $\alpha_i \neq 0$, for $i=1,2$, then
$\bar U_ie_{l} = \alpha_i e$, for $i=1,2$.
\qed
\end{lem}


The following key result shows that we can complete the distances in the
union of two cliques provided that their intersection has embedding dimension equal to $r$.
\begin{thm} [\cite{kriswolk:09}]
\label{thm:twocliquered}
Let the hypotheses of Theorem~\ref{thm:interclique} hold.  
Let 
\[
  \beta \subseteq \alpha_1 \cap \alpha_2, \quad 
  \bar D := D[\beta], \quad
  B := \KK^\dagger(\bar D), \quad \bar U_\beta := \bar U[\beta,:],  
\]
where 
$\bar U  \in \MM^{k \times (t+1)}$
satisfies equation~\eqref{eq:rangeU}.
Let 
$\begin{bmatrix} \bar V & \frac{\bar U^Te}{\|\bar U^Te\|} \end{bmatrix}
\in \MM^{t+1}$ be orthogonal.  Let 
\beq
\label{eq:Zcalc1}
Z:= (J \bar U_\beta \bar V)^\dagger B ((J \bar U_\beta \bar V)^\dagger)^T. 
\eeq
If the embedding dimension for $\bar D$ is $r$,  then 
$t = r$, $Z \in \Ss^{r}_{++}$ is the unique 
solution of the equation
\begin{equation}
  \label{eq:Zbeta}
  (J \bar U_\beta \bar V) Z (J \bar U_\beta \bar V)^T = B,
\index{$J$, orthogonal projection onto $\{e\}^\perp$}
\end{equation}
and
\beq
\label{eq:DKKUVZ}
D[\alpha_1 \cup \alpha_2] = \KK\left((\bar U \bar V)Z(\bar U \bar V)^T\right).
\eeq
\qed
\end{thm}
The following result shows that if we know the minimal face of $\Ss^n_+$ containing 
$\KK^\dagger(D)$, and we know a small submatrix of $D$, then we can compute 
a set of points in $\R^r$ that generate $D$ by solving a small equation.  

\begin{cor} [\cite{kriswolk:09}]
\label{cor:finalUbar}
Let $D \in \En$ with embedding dimension $r$, and let $\beta \subseteq 1{:}n$.
Let $U \in \MM^{n \times (r+1)}$ satisfy 
\[
	\face \KK^\dagger\left(D\right) 
	=
	\left( U \Ss_+^{r+1} U^T \right) \cap \Sc,
\]
let $U_\beta := U[\beta,:]$, 
and let $\begin{bmatrix} V & \frac{U^Te}{\|U^Te\|} \end{bmatrix}
\in \MM^{r+1}$ be orthogonal.
If $D[\beta]$ has embedding dimension $r$, then 
\[
	(J U_\beta V) Z (J U_\beta V)^T = \KK^\dagger(D[\beta])
\]
has a unique solution $Z \in \Ss^r_{++}$, and $D = \KK(PP^T)$, 
where $P := UVZ^{1/2} \in \R^{n \times r}$.
\qed
\end{cor}
We now show that we can combine two cliques using the relative
point representations of each.
\begin{thm}
\label{thm:twocliquepointunion}
Let the hypotheses of Theorem~\ref{thm:interclique} hold, and following
Corollary \ref{cor:onecliquered}, for $i=1,2$, let
$B_i=P_iP_i^T$ be full column rank factorizations, so that the rows
of $P_i$ provide relative positions for the points in the cliques
$\alpha_i$; and partition
\[
P_1:=\begin{bmatrix} 
  P^{\prime}_1  \cr 
  P^{\prime\prime}_1 
\end{bmatrix}, \quad
P_2:=\begin{bmatrix} 
   P^{\prime\prime}_2 \cr 
   P^{\prime}_2
\end{bmatrix}, \quad
\hat P_1:=\begin{bmatrix} 
  P_1^{11} & 0 \cr 
  P_1^{12} & 0 \cr 
  0 & I
\end{bmatrix}, \quad
\hat P_2:=\begin{bmatrix} 
  I & 0 \cr 
  0 & P_2^{12} \cr
  0 & P_2^{22}
\end{bmatrix}.
\]
Furthermore, let
\beq
\label{eq:U1U2two}
\bar P_1 := 
\begin{bmatrix} 
  P_1^\prime\cr
  P_1^{\prime\prime}\cr
  P_2^{\prime} (P_2^{\prime\prime})^\dagger P_1^{\prime\prime}
\end{bmatrix}, \quad
\bar P_2 := 
\begin{bmatrix} 
  P_1^{\prime} (P_1^{\prime\prime})^\dagger P_2^{\prime\prime}\cr
  P_2^{\prime\prime}\cr
  P_2^\prime
\end{bmatrix}.
\eeq
If the embedding dimension of $\bar D$ is $r$, 
then:  $t=r$;
$Q_1:= (P_1^{\prime\prime})^\dagger P_2^{\prime\prime}$ and
  $Q_2:= (P_2^{\prime\prime})^\dagger P_1^{\prime\prime}$ are both
  orthogonal;
$\bar P_1$ and $\bar P_2$ are full column rank and their rows provide
relative representations for the points in the union of the cliques
$\alpha_i, i=1,2$, i.e.
\beq
\label{eq:DKKP}
D[\alpha_1 \cup \alpha_2] = \KK(\bar P_i\bar P_i^T),\quad i=1,2.
\eeq
\end{thm}
\begin{proof}
From Lemma \ref{lem:twocliquered}, we have that $\RR(\bar P_1)=\RR (\bar P_2)$. 
Therefore, $\RR(P_1^{\prime\prime})=\RR( P_2^{\prime\prime})$. 
This means that we can apply the projections on these ranges and get that 
\[P_2^{\prime\prime} (P_2^{\prime\prime})^\dagger 
P_1^{\prime\prime}= P_1^{\prime\prime}; \quad
  P_1^{\prime\prime} (P_1^{\prime\prime})^\dagger 
  P_2^{\prime\prime}= P_2^{\prime\prime}.
  \]
  Therefore, $\bar P_1$ is obtained using 
  $Q_1= (P_1^{\prime\prime})^\dagger P_2^{\prime\prime}$ and
  the multiplication $P_1Q_1$. 
  Similarly, $\bar P_2$ is obtained using 
  $Q_2= (P_2^{\prime\prime})^\dagger P_1^{\prime\prime}$ and
  the multiplication $P_2Q_2$. 

  Since 
  \[
  P_ie=0, \quad
  \bar D=\KK(P_i^{\prime\prime}(P_i^{\prime\prime})^T), \quad \quad i=1,2,
  \]
  We get that both $Q_i, i=1,2$ are orthogonal.
\end{proof}
\begin{rem}
Note that there can be many ways to find the full column rank factorizations
$B_i=P_iP_i^T$ in Theorem \ref{thm:twocliquepointunion}, e.g.: the
compact spectral decomposition; the partial Cholesky factorization; or
the compact $QR$ factorization.
\end{rem}

%----------------------------------------------------
\subsection{Degenerate Case I}
\label{sect:degI}
%----------------------------------------------------

We now show that we can combine three cliques using the relative
point representations of each. ?????? but the intersections are
degenerate ?????
We first note the effect of a permutation of the points $P$ on the Gram
matrix $B=PP^T$ and the EDM $D=\KK(B)$, 
i.e.,~let $\Pi$ be an $n\times n$ permutation matrix.
Then $\Pi P$ denotes a permutation of the order of the points, nodes.
We see:
\beq
\label{eq:permutP}
\Pi B \Pi^T = (\Pi P)(\Pi P)^T, \quad
\KK(\Pi B \Pi^T) = 
\diag(\Pi B \Pi^T) e^T+ e\diag(\Pi B \Pi^T)^T - 2\Pi B \Pi^T = \Pi D\Pi^T.
\eeq

The following result provides expressions for the face for the union of
three cliques.
\begin{thm}
\label{thm:3interclique}
Let $D \in \En$ with embedding dimension $r$ and
define the sets of positive integers
\beq
\label{eq:ordcliques3}
\begin{array}{l}
 \alpha  =  \alpha_{1}  \cup   \alpha_{2}   \cup   \alpha_{3} =
   \alpha_{11}  \cup   \alpha_{12}   \cup   \alpha_{22} \cup
      \alpha_{23}   \cup   \alpha_{33} \cup    \alpha_{13}=1:n,\\ 
 \alpha_{1}  =  \alpha_{11}  \cup   \alpha_{12}   \cup   \alpha_{13},
 \alpha_{2}  =  \alpha_{12}  \cup   \alpha_{22}   \cup   \alpha_{23}
    = k_{11}+1:k_{23},
 \alpha_{3}  =  \alpha_{23}  \cup   \alpha_{33}   \cup   \alpha_{13}
                 =k_{22}+1:k_{13},\\ 
 \alpha_{11}  =  1{:}k_{11},  ~~
\alpha_{12}  =  (k_{11}+1){:}k_{12},~~
 \alpha_{22}  =  (k_{12}+1){:}k_{22},\\
 \qquad \alpha_{23}  =  (k_{22}+1){:}k_{23},~~
 \alpha_{33}  =  (k_{23}+1){:}k_{33},~~
 \alpha_{13}  =  (k_{33}+1){:}k_{13},\\
1<k_{11}< k_{12}<k_{22}< k_{23}<k_{33}<k_{13}=n. 
\end{array}
\eeq
Let 
\[
P=P[\alpha]=P[\{ 1,\ldots, k_{11},\ldots,  k_{12},\ldots, k_{22},\ldots
,k_{23},\ldots, k_{33},\ldots, k_{13}\}]
 \in \MM^{n\times r}
\]
 denote the matrix with corresponding $n$ points.
For $i=1,2,3$, let $\bar D_i := D[\alpha_i] \in \EE^{|\alpha_i|}$
with embedding dimension $t_i$,
and $B_i := \KK^\dagger(\bar D_i) = \bar U_i S_i \bar U_i^T$,
where $\bar U_i \in \MM^{|\alpha_i|\times t_i}$, 
$\bar U_i^T \bar U_i = I_{t_i}$,
$S_i \in \Ss^{t_i}_{++}$, and
$U_i := 
\begin{bmatrix} 
\bar U_i & \frac 1{\sqrt{|\alpha_i|}} e 
\end{bmatrix}
\in \MM^{|\alpha_i| \times (t_i+1)}$.
Let $\Pi_i, i=1,2,3$ denote the permutations that return the original
ordering in $P$:
\[
\begin{array}{l}
P=\Pi_1 P[\{ 1:k_{12},(k_{33}+1):k_{13},(k_{12}+1):k_{33} \}],\\
P=\Pi_2 P[\{ (k_{11}+1):k_{23},  1:k_{11}, (k_{23}+1):k_{1,3} \}],\\
P=\Pi_3 P[\{ (k_{22}+1):k_{13}, 1:k_{22} \}].\\
\end{array}
\]
Let $t$ and $\bar U \in \MM^{n \times (t+1)}$ satisfy
$\bar U^T \bar U = I_{t+1}$ and
\beq
\label{eq:rangeU3}
\begin{array}{rcl}
\RR(\bar U) 
&=& 
\RR \left( \Pi_1
\begin{bmatrix} U_1 & 0 \cr 0 & I_{n-|\alpha_1|}  \end{bmatrix}
\right)
\cap
\RR \left(
\begin{bmatrix} I_{|\alpha_{11}|} & 0 & 0  \cr 0 & U_2 & 0 \cr
            0 & 0 & I_{|\alpha_{33} \cup \alpha_{13}|} \end{bmatrix}
\right) 
\cap
\RR \left(
\begin{bmatrix} I_{n-|\alpha_{3}|} & 0   \cr 0 & U_3 \end{bmatrix}
\right)
\\&=&  \cap_{i=1}^3 
\RR \left( \Pi_i
\begin{bmatrix} U_i & 0 \cr 0 & I_{n-|\alpha_i|}  \end{bmatrix}
\right)
\end{array}
\eeq
????still have to fix up $n$ here to get proper subset of
points?????also ???k????t????
Let
$U := \begin{bmatrix} \bar U & 0 \cr 0 & I_{n-k} \end{bmatrix}
  \in \MM^{n \times (n-k+t+1)}$
and 
$\begin{bmatrix} V & \frac{U^Te}{\|U^Te\|} \end{bmatrix}
\in \MM^{n-k+t+1}$ be orthogonal. Then
\beq
\label{eq:UBY3}
\bigcap_{i=1}^3
\face \KK^\dagger\left(\En(\alpha_i,\bar D_i)\right)
=\left( U \Ss_+^{n-k+t+1} U^T \right) \cap \Sc
= (U V) \Ss_+^{n-k+t} (U V)^T.
\eeq
\qed
\end{thm}




\begin{rem}
\label{rem:threecliquepointunion}
Let the hypotheses of Theorem~\ref{thm:3interclique} hold, and following
Corollary \ref{cor:onecliquered}, for $i=1,2,3$, let
$B_i=P_iP_i^T$ be full column rank factorizations, so that the rows
of $P_i$ provide relative positions for the points in the cliques
$\alpha_i$; and partition
\[
P_1:=\begin{bmatrix} 
  P^{11}_1  \cr 
  P^{12}_1 \cr
  P^{13}_1 
\end{bmatrix}, \quad
P_2:=\begin{bmatrix} 
   P^{12}_2 \cr 
   P^{22}_2\cr
   P^{23}_2\cr
\end{bmatrix}, \quad
P_3:=\begin{bmatrix} 
   P^{23}_3 \cr 
   P^{33}_3\cr
   P^{13}_3\cr
\end{bmatrix},
\]
so that the matrices $P_i$ are $k_i\times r$ with $k_i>2r, \forall i$
and the pairs of points
\beq
\label{eq:intmatrices}
   \left(P^{12}_1, P^{12}_2\right),
   \left(P^{23}_2, P^{23}_3\right),
   \left(P^{13}_1, P^{13}_3\right),
\eeq
coincide with the intersections of the cliques
$\left(\alpha_1\cap \alpha_2 \right),
\left(\alpha_2\cap \alpha_3 \right),
\left(\alpha_1\cap \alpha_3 \right)$, respectively. And, these matrices
in \eqref{eq:intmatrices} are all nonsingular.
We have $P_i^Te=0, \forall i$.
Let 
\[
U_i=\begin{bmatrix} P_i & e\end{bmatrix}, 
\hat U_i=\begin{bmatrix} U_i & 0 \cr 0 & I_{n-|\alpha_i|}
\end{bmatrix}, 
V_i=\begin{bmatrix} e^T \cr -I \end{bmatrix}, \quad \forall i.
\]
Then
\beq
\label{eq:rangesUhati}
\RR\left(\hat U_i\right) 
        = \RR\left(
       \begin{bmatrix} P_i  & 0  &  \left(1-\frac n{|\alpha_i|}\right)e&  e  \cr
                           0   & V_i & e& e 
              \end{bmatrix}
         \right)
        = \RR\left(
          \begin{bmatrix} P_i  & 0  & \left(1-\frac n{|\alpha_i|}\right)e&e  \cr
                           0   & e^T & 1&1 \cr
                           0   & -I_{n-|\alpha_i|-1} & e& e 
              \end{bmatrix}
         \right).
\eeq
Moreover, the last column $e$ of 
          $\hat V_i=
          \begin{bmatrix} P_i  & 0  & \left(1-\frac n{|\alpha_i|}\right)e&e  \cr
                           0   & e^T & 1&1 \cr
                           0   & -I_{n-|\alpha_i|-1} & e& e 
              \end{bmatrix}$
is orthogonal to the other columns in $\hat V_i$.
Therefore, we can ignore the column of ones and
we need only find the intersection of the ranges of the
three matrices, respectively,
 $\Pi_i \hat V_i, i=1,2,3$; each has six blocks of rows:
\beq
\label{eq:3Vis}
\begin{array}{c}
\begin{bmatrix} 
  P_1^{11} & 0 & 0 & 0 & \left(1-\frac n{|\alpha_1|}\right)e\cr 
  \fbox{$P_1^{12}$} & 0 & 0 & 0 & \left(1-\frac n{|\alpha_1|}\right)e\cr 
  0 & 
     \begin{bmatrix} e^T \cr -I \end{bmatrix}& 
     \begin{bmatrix} e^T \cr 0 \end{bmatrix}& 
     \begin{bmatrix} e^T \cr 0 \end{bmatrix}
                   &e   \cr
  0 & 0& -I & 0  &e  \cr
  0 & 0& 0 & -I  &e  \cr
  \fbox{\fbox{\fbox{$P_1^{13}$}}} & 0 & 0 & 0 & \left(1-\frac n{|\alpha_1|}\right)e\cr 
\end{bmatrix}, \quad
\begin{bmatrix} 
     \begin{bmatrix} e^T \cr -I \end{bmatrix}& 
    0&    
     \begin{bmatrix} e^T \cr 0 \end{bmatrix}& 
     \begin{bmatrix} e^T \cr 0 \end{bmatrix}
  & e\cr 
  0 & \fbox{$P_2^{12}$}    & 0 & 0 & \left(1-\frac n{|\alpha_1|}\right)e\cr
  0 & P_2^{22}      & 0 & 0 & \left(1-\frac n{|\alpha_1|}\right)e\cr
  0 & \fbox{\fbox{$P_2^{23}$}}    & 0 & 0 & \left(1-\frac n{|\alpha_1|}\right)e\cr
  0 & 0& -I & 0   & e  \cr
  0 & 0& 0 & -I    & e \cr
\end{bmatrix}, \\
\begin{bmatrix} 
     \begin{bmatrix} e^T \cr -I \end{bmatrix}& 
     \begin{bmatrix} e^T \cr 0 \end{bmatrix}& 
     \begin{bmatrix} e^T \cr 0 \end{bmatrix}&  0  & e\cr
  0 & -I    & 0  & 0 & e\cr
  0 & 0    & -I & 0 & e\cr
  0 & 0    & 0 &  \fbox{\fbox{$P_3^{23}$}}  & \left(1-\frac n{|\alpha_1|}\right)e\cr
  0 & 0&  0 & P_3^{33}      & \left(1-\frac n{|\alpha_1|}\right)e\cr
  0 & 0& 0&  \fbox{\fbox{\fbox{$P_3^{13}$}}}   & \left(1-\frac n{|\alpha_1|}\right)e  \cr
\end{bmatrix}.
\end{array}
\eeq
We have put boxes around the blocks that overlap/coincide.
We immediately get the three homogeneous (block) equations
\[
\begin{array}{rcl}
\begin{bmatrix}
P_1^{12} & -P_2^{12} &  & \delta_1 e &  -\delta_2 e & \cr
 & P_2^{23} & -P_3^{23}  &  & \delta_2 e & -\delta_3 e  \cr
  P_1^{13}  &&-P_3^{13}  & \delta_1 e & & -\delta_3 e
\end{bmatrix}
\begin{pmatrix}
x_1^1\cr
x_2^2\cr
x_3^4\cr
x_1^5\cr
x_2^5\cr
x_3^5\cr
\end{pmatrix}
=0.
\end{array}
\]
We now use block Gaussian elimination on this system. We assume that we
order the cliques appropriately to choose the blocks with the best
condition numbers first as the pivot blocks.
\[
\begin{array}{rcl}
\begin{bmatrix}
I & -(P_1^{12})^{-1} P_2^{12} &  & \delta_1 (P_1^{12})^{-1} e &  -\delta_2 (P_1^{12})^{-1} e & \cr
 & P_2^{23} & -P_3^{23}  &  & \delta_2 e & -\delta_3 e  \cr
   &P_1^{13} (P_1^{12})^{-1} P_2^{12} &-P_3^{13}  & \delta_1 e 
                        -P_1^{13} \delta_1 (P_1^{12})^{-1} e &  \delta_2 P_1^{13} (P_1^{12})^{-1} e && -\delta_3 e
\end{bmatrix}
\end{array}
\]
\[
%\small
\scriptsize
{
\begin{array}{rcl}
\begin{bmatrix}
I & & -(P_1^{12})^{-1}P_2^{12}(P_2^{23})^{-1}P_3^{23}  & \delta_1 (P_1^{12})^{-1} e &  -\delta_2 (P_1^{12})^{-1} e +\delta_2(P_1^{12})^{-1}P_2^{12}(P_2^{23})^{-1}e & -\delta_3(P_1^{12})^{-1}P_2^{12}(P_2^{23})^{-1}e  \cr
 & I& -(P_2^{23})^{-1} P_3^{23}  &  & \delta_2 (P_2^{23})^{-1} e & -\delta_3 (P_2^{23})^{-1} e  \cr
   & & 
\fbox{$P_1^{13}(P_1^{12})^{-1}P_2^{12}(P_2^{23})^{-1}P_3^{23}-P_3^{13}$} 
      & \delta_1 e -P_1^{13}\delta_1(P_1^{12})^{-1} e &  \delta_2 P_1^{13} (P_1^{12})^{-1} e  -\delta_2P_1^{13}(P_1^{12})^{-1}P_2^{12}(P_2^{23})^{-1}e &&  \delta_3P_1^{13}(P_1^{12})^{-1}P_2^{12}(P_2^{23})^{-1}e -\delta_3 e
\end{bmatrix}
\end{array}
}
\]
Then the union of the three cliques is rigid if and only if 
\[
\fbox{$P_1^{13}(P_1^{12})^{-1}P_2^{12}(P_2^{23})^{-1}P_3^{23}-P_3^{13}$} 
\quad \mbox{is nonsingular}
\]
(proof is that: it is iff the dimension of the nullspace is $3$????)


Furthermore, let
\beq
\label{eq:U1U2three}
\bar P_1 := 
\begin{bmatrix} 
  P_1^\prime\cr
  P_1^{\prime\prime}\cr
  P_2^{\prime} (P_2^{\prime\prime})^\dagger P_1^{\prime\prime}
\end{bmatrix}, \quad
\bar P_2 := 
\begin{bmatrix} 
  P_1^{\prime} (P_1^{\prime\prime})^\dagger P_2^{\prime\prime}\cr
  P_2^{\prime\prime}\cr
  P_2^\prime
\end{bmatrix}.
\eeq
If the embedding dimension of $\bar D$ is $r$, 
then:  $t=r$;
$Q_1:= (P_1^{\prime\prime})^\dagger P_2^{\prime\prime}$ and
  $Q_2:= (P_2^{\prime\prime})^\dagger P_1^{\prime\prime}$ are both
  orthogonal;
$\bar P_1$ and $\bar P_2$ are full column rank and their rows provide
relative representations for the points in the union of the cliques
$\alpha_i, i=1,2$, i.e.
\beq
\label{eq:DKKP3}
D[\alpha_1 \cup \alpha_2] = \KK(\bar P_i\bar P_i^T),\quad i=1,2.
\eeq
\end{rem}
\begin{proof}
?????? not a proof ??? after a remark????
From Lemma \ref{lem:twocliquered}, we have that $\RR(\bar P_1)=\RR (\bar P_2)$. 
Therefore, $\RR(P_1^{\prime\prime})=\RR( P_2^{\prime\prime})$. 
This means that we can apply the projections on these ranges and get that 
\[P_2^{\prime\prime} (P_2^{\prime\prime})^\dagger 
P_1^{\prime\prime}= P_1^{\prime\prime}; \quad
  P_1^{\prime\prime} (P_1^{\prime\prime})^\dagger 
  P_2^{\prime\prime}= P_2^{\prime\prime}.
  \]
  Therefore, $\bar P_1$ is obtained using 
  $Q_1= (P_1^{\prime\prime})^\dagger P_2^{\prime\prime}$ and
  the multiplication $P_1Q_1$. 
  Similarly, $\bar P_2$ is obtained using 
  $Q_2= (P_2^{\prime\prime})^\dagger P_1^{\prime\prime}$ and
  the multiplication $P_2Q_2$. 

  Since 
  \[
  P_ie=0, \quad
  \bar D=\KK(P_i^{\prime\prime}(P_i^{\prime\prime})^T), \quad \quad i=1,2,
  \]
  We get that both $Q_i, i=1,2$ are orthogonal.
\end{proof}



%----------------------------------------------------
\subsection{Nearest \EDMt}
\label{sect:nearestedm}
%----------------------------------------------------
Suppose that we have a clique $\alpha$ corresponding to the \EDM
$\bar D$. Then we can find the smallest face containing 
$\En(\alpha,\bar D)$ using $B=\KK^\dagger(\bar D)$; see \cite{kriswolk:09}.
We now consider the case when we are given a possibly noisy
\EDM and we would like to find a best approximation, or the
nearest \EDM, i.e. we want to find a best approximation of 
$B=\KK^\dagger(\bar D)$ with the correct rank $r$.
\index{nearest \EDM}
For this purpose, we let $D \in \E^n$ with embedding dimension $r$, and
suppose that $D_\epsilon = D + N_\epsilon \in \Sh \cap \NN$, where the 
off diagonal elements of the rows of the error matrix $N_\epsilon \in \Sh$ 
are independently and identically distributed with zero 
mean and the same variance.  We now look for the best approximation
to the given noisy distance matrix $D_\epsilon (=\bar D)$.
For example, we could do this in two steps:  we first find the least
squares solution $B_\epsilon=\KK^\dagger (D_\epsilon)$, which may not
be positive semidefinite and may have the wrong rank; we then use the
truncated spectral decomposition
$B_\epsilon=\KK^\dagger (D_\epsilon) \approx U_\epsilon
\Sigma_\epsilon U_\epsilon^T$,
where $U_\epsilon^TU_\epsilon=I_r$, $\Sigma_\epsilon \in \Srpp$. 
We could then use the approximation $\KK( U_\epsilon\Sigma_\epsilon 
U_\epsilon^T) \approx D_\epsilon$, i.e.
\beq
\label{eq:trunceigdecomp}
D_\epsilon \approx \KK( U_\epsilon \Sigma_\epsilon  U_\epsilon ^T), \qquad \mbox{where 
$B_\epsilon =\KK^\dagger (D_\epsilon ) \approx U_\epsilon
\Sigma_\epsilon  U_\epsilon ^T$,
$U_\epsilon ^TU_\epsilon =I_r$, and $\Sigma_\epsilon  \in \Srpp$}.
\eeq
Alternatively, we could solve the \SDP
\beq
\label{eq:minxrankr}
\begin{array}{rll}
\min    &   \left \| \KK(X) - D_\epsilon \right\|  \\
\mbox{s.t.}   &   \rank (X) = r\\
              &    Xe=0 \\
              &    X \succeq 0.
\end{array}
\eeq
We could introduce $V$ as in
Corollary \ref{lem:co}, eliminate the constraints. We get the following.
\begin{problem}
The unconstrained nearest \EDM problem with embedding dimension $r$ is 
\beq
\label{eq:bestapprox}
\begin{array}{rccl}
U^*_r &\in& 
\argmin    &   \frac 12\left \| \KK_V(UU^T) - D_\epsilon \right\|^2_F \\
&&\mbox{s.t.}   &   U \in M^{(n-1)r}.
\end{array}
\eeq
The nearest \EDM is $D^*= \KK_V( U^*_r (U^*_r)^T)$.
\end{problem}

%%----------------------------------------------------
%\subsection{Noise Model}
%%----------------------------------------------------

%We have a choice of which noise model to use for noisy $D = (d_{ij})$.  Additive error would use
%\beq
%	d_{ij} = \|p_i - p_j\|^2 + \sigma \cdot \texttt{randn}.
%\eeq
%Note that $\texttt{randn} \in \NN(0,1)$, so $\sigma \cdot \texttt{randn} \in \NN(0,\sigma^2)$.
%Under this noise model, the maximum likelihood estimate is found by solving the 
%linear least-squares problem
%\beq
%	\min \{ \| \KK(Y) - D \|_F^2 \ : \ Y \succeq 0 \}.
%\eeq
%We could also have a different variance for each $d_{ij}$:
%\beq
%	d_{ij} = \|p_i - p_j\|^2 + \sigma v_{ij} \cdot \texttt{randn},
%\eeq
%where $v_{ij} \geq 0$.
%In this case, the maximum likelihood estimate is found by solving a weighted least-squares problem
%\beq
%	\min \{ \| W \circ (\KK(Y) - D) \|_F^2 \ : \ Y \succeq 0 \},
%\eeq
%where $W = (w_{ij})$ is given by
%\beq
%	w_{ij} = 
%	\left \{
%	\begin{array}{cl}
%		v_{ij}^{-1}, & \mbox{if $v_{ij} \neq 0$} \\
%		0, & \mbox{if $v_{ij} = 0$}.
%	\end{array}
%	\right.
%\eeq

%
%However, we have the difficulty that $d_{ij} < 0$ under the additive noise model, 
%which is clearly undesirable.  
%To avoid this problem, we could use the multiplicative noise model:
%\begin{eqnarray*}
%	d_{ij} & = & \|p_i-p_j\|^2 (1+\sigma\cdot\texttt{randn})^2 \\
%			& = & \|p_i-p_j\|^2 + 2\sigma\|p_i-p_j\|^2\cdot\texttt{randn} 
%					+ \sigma^2\|p_i-p_j\|^2\cdot\texttt{randn}^2.
%\end{eqnarray*}
%The multiplicative noise model is a common choice in sensor network localization.
%Numerical evidence shows that 
%$2\sigma\cdot\texttt{randn} + \sigma^2\cdot\texttt{randn}^2$
%is approximately $\NN(0,4\sigma^2)$ for $0 < \sigma < .1$.

%\begin{lem}
%Let $X$ be a random variable in $\NN(0,\sigma^2)$.  Let $0 < c < \ ??$.
%Then $Y = 2 X + X^2$ is approximately $\NN(\sigma^2,4\sigma^2+2\sigma^4)$ for all $0 < \sigma \leq c$.
%\end{lem}
%\begin{proof}

%\begin{eqnarray*}
%	\mathbf{E}[Y] 	& = & \mathbf{E}[2 X + X^2] \\
%					& = & 2 \mathbf{E}[X] + \mathbf{E}[X^2] \\
%					& = & 0 + \sigma^2 \\
%					& = & \sigma^2
%\end{eqnarray*}

%\begin{eqnarray*}
%	\mathbf{Var}[Y] 	& = & \mathbf{E}[ (Y - \mathbf{E}[Y])^2 ] \\
%					& = & \mathbf{E}[ Y^2 - 2 \sigma^2 Y + \sigma^4 ] \\
%					& = & \mathbf{E}[ Y^2 ] - \sigma^4 \\
%					& = & \mathbf{E}[ 4 X^2 + 4 X^3 + X^4 ] - \sigma^4 \\
%					& = & 4 \sigma^2 + 4 \mathbf{E}[ X^3 ] + \mathbf{E}[ X^4 ] - \sigma^4 \\
%					& = & 4 \sigma^2 + 0 + 3\sigma^4 - \sigma^4 \\
%					& = & 4 \sigma^2 + 2 \sigma^4
%\end{eqnarray*}

%

%\end{proof}

%The above lemma would imply that the maximum likelihood estimate is well approximated by the 
%solution of the weighted least-squares problem, provided that $v_{ij} \approx \|p_i-p_j\|^2$.





%%----------------------------------------------------
%\subsubsection{Noise model}
%%----------------------------------------------------
%
%Let $D \in \E^n$ with embedding dimension $r$ and suppose that $D_\epsilon = D + N_\epsilon$. We assume that there is a multiplicative error on the measured distances,
%\[
%	\hat d_{ij} = \|p_i - p_j\|_2 (1+\sigma\epsilon_{ij}),
%\]
%where $\sigma \in [0,1]$ is the noise factor, and $\epsilon_{ij}$ are 
%independently and identically distributed random variables 
%coming from the normal distribution with mean zero and variance one.
%Then 
%\[
%	D_\epsilon = \left( \hat d_{ij}^2 \right) 
%	= \left( \|p_i - p_j\|_2^2 (1+ \sigma\epsilon_{ij})^2 \right), 
%\]
%and $D = \left( \|p_i - p_j\|_2^2 \right)$, so that 
%\[
%	N_\epsilon = D_\epsilon - D 
%		= \left( \|p_i - p_j\|_2^2 (2 \sigma\epsilon_{ij}+ \sigma^2\epsilon_{ij}^2) \right).
%\]
%
%We used the \emph{multiplicative noise model}%
%	\index{noise model!multiplicative} 
%noise model for our tests on noisy problems:
%\[
%	d_{ij} = \|p_i-p_j\|(1+\sigma\varepsilon_{ij}), \quad \mbox{for all $ij \in E$},
%\]
%where $\sigma \geq 0$ represents the \emph{noise factor}%
%	\index{noise factor}%
%	\index{$\sigma$, noise factor}
%and, for all $ij \in E$, the random variable $\varepsilon_{ij}$ is normally distributed with zero mean and standard deviation one. That is, $\set{\varepsilon_{ij}}_{ij \in E}$ are uncorrelated, have zero mean and the same variance.  Here we are modelling the situation that the amount of additive noise corrupting a distance measurement between two sensors is directly proportional to the distance between the sensors.
%
%This multiplicative noise model is the one most commonly considered in sensor network localization; see, for example, \cite{Biswas:2004},\cite{Biswas:2006},\cite{Tseng:2007},\cite{Wang:2008},\cite{Pong:2009},\cite{Kim:2009,Kim:2009a}.
%For large values of $\sigma$, it is possible that $1 + \sigma\varepsilon$ is negative.  Therefore, the alternate multiplicative noise model
%\[
%	d_{ij} = \|p_i-p_j\||1+\sigma\varepsilon_{ij}|, \quad \mbox{for all $ij \in E$},
%\]
%is sometimes used.  Note, however, that in both multiplicative noise models, we have
%\beq \label{eq:noise_model}
%	d_{ij}^2 = \|p_i-p_j\|^2(1+\sigma\varepsilon_{ij})^2, \quad \mbox{for all $ij \in E$}.
%\eeq
%
%The associated least squares problem for determining the \emph{maximum likelihood}%
%	\index{maximum log-likelihood} 
%positions for $p_1,\ldots,p_n \in \R^r$ is
%\[
%	\begin{array}{lcl}
%		\mbox{minimize}   	& \displaystyle\sum_{ij \in E} v_{ij}^2 & \\
%		\mbox{subject to} 	& \|p_i-p_j\|^2(1+v_{ij})^2 = d_{ij}^2, & \mbox{for all $ij \in E$} \\
%							& \sum_{i=1}^n p_i = 0 \\
%						  	& p_1,\ldots,p_n \in \R^r.
%	\end{array}
%\]
%Let $H$ be the $0$--$1$ adjacency matrix associated with the $n$-by-$n$ partial Euclidean distance matrix $D := (d_{ij}^2)$.  Letting $V := (v_{ij}) \in \R^{n \times n}$ and $V_H := H \circ V$, we can rewrite this problem as
%\[
%	\begin{array}{lcl}
%		\mbox{minimize}   	& \norm{V_H}_F^2 & \\
%		\mbox{subject to} 	& \KK(PP^T) \circ (H + 2V_H + V_H \circ V_H) = H \circ D \\
%							& P^T e = 0 \\
%							& P \in \R^{n \times r}.
%	\end{array}
%\]
%Removing the rank constraint, we obtain the (nonlinear) semidefinite relaxation
%\beq \label{eq:noisy_problem}
%	\begin{array}{lcl}
%		\mbox{minimize}   	& \norm{V_H}_F^2 & \\
%		\mbox{subject to} 	& \KK(Y) \circ (H + 2V_H + V_H \circ V_H) = H \circ D \\
%							& Y \in \Snp \cap \Sc.
%	\end{array}
%\eeq
%
%We compare the following two approaches.  Let $D$ be an $n$-by-$n$ Euclidean distance matrix corrupted by noise (hence $D$ may not even be a true Euclidean distance matrix since $\KK^\dag(D)$ may have negative eigenvalues).
%\begin{enumerate}
%\item  We compute the eigenvalue decomposition $\KK^\dag(D) = U \Lambda U^T$, and let $P := U_r \Lambda_r^{1/2} \in \R^{n \times r}$.  This matrix $P$ minimizes $\norm{PP^T-\KK^\dag(D)}_F$ over all $P \in \R^{n \times r}$, and satisfies
%\[
%	\norm{PP^T-\KK^\dag(D)}_F = \sqrt{ \sum_{i=r+1}^n \lambda_i^2(\KK^\dag(D))}.
%\]
%Since we assume that $\diag(D) = 0$, we have that $\KK\KK^\dag(D) = D$.  Therefore,
%\[
%\begin{split}
%	\norm{\KK(PP^T)-D}_F 
%	& = \norm{\KK(PP^T-\KK^\dag(D))}_F \\
%	& \leq \norm{\KK}_F \cdot \norm{PP^T-\KK^\dag(D)}_F \\
%	& = 2\sqrt{n} \sqrt{ \sum_{i=r+1}^n \lambda_i^2(\KK^\dag(D))}.
%\end{split}
%\]
%\item We can compute better Euclidean distance matrix approximations of $D$ by increasing the rank of the approximation $PP^T$ of $\KK^\dag(D)$.  That is, we let $P := U_k \Lambda_k^{1/2} \in \R^{n \times k}$, for some $k > r$; see, for example, \cite[Lemma~2]{AlKaWo:97}.  Our facial reduction technique lends well to this approach.  There is no problem for us to compute the intersection of different faces that occupy different dimensions.
%\end{enumerate}
%





%\subsection{Solving the Least Squares Formulation}
%We chose the Frobenius norm in \eqref{eq:bestapprox}. 
%We have an overdetermined nonlinear 
%least squares problem, i.e. it is a sum of $n(n-1)/2$ squares
% with $(n-1)r$ variables. We can therefore use the Gauss-Newton (\GN),
%or Newton, method
%to find a (local) minimum.  Recall that $\usvec$ is an isometry
%from $\Sh$ to $\R^{n(n-1)/2}$. Let 
%\beq
%\label{eq:Fandf}
%F(U):= \usvec\left(\KK_V(UU^T) - D_\epsilon\right), \qquad
%f(U):=\frac 12 \left \| F(U) \right\|^2,
%\eeq
%\index{$\usvec$}
%i.e. we minimize $f(U)$. The \GN method uses the search direction found
%from the (best) least squares solution of the overdetermined linear system
%\beq \label{eq:GNlin}
%F^\prime(U) \Delta U = - F(U), \qquad
%\Delta U = -F^\prime(U)^\dagger  F(U).
%\eeq

%
%\subsubsection{Derivatives}
%Since $F^\prime : \MM^{(n-1)r} \rightarrow \R^{n(n-1)/2}$ and
%$\usvec, K_V$ are linear transformations, we get
%\[
%\begin{array}{rcl}
%F^\prime(U)(\Delta U) 
%&=&
% \usvec\left(  \KK_V \left[\Sprod^\prime( U ) (\Delta U)\right] \right) 
%\\&=&
% \usvec\left(\KK_V \left[2\Ssum(   \Delta UU^T)\right] \right) 
%\\&=&
% \usvec\left(\KK_V \left[ U  \Delta U^T+   \Delta UU^T)\right] \right) 
%\end{array}
%\]
%and
%\[
%\begin{array}{rcl}
%F^\prime(U)^* (d)  
%&=&
% 2((\usvec \KK_V  \Ssum) (\cdot U^T))^*( d)
%\\&=&
% (\cdot U^T)^*\left(2\Ssum^*\KK_V^*\usvec^*   ( d)\right)
%\\&=&
% \left(2\Ssum^*\KK_V^*\usvec^*   ( d)\right)U
%\\&=&
% 2\Ssum^*\left(\KK_V^*\left[\usMat(d)\right]\right)U
%\\&=&
%2\KK_V^*(\usMat(d))U.
%\\&=&
%\end{array}
%\]
% \index{$\usMat$}
%In addition, the gradient of $f$ at $U \in \Mnmr$ acting on 
%$\Delta U \in \Mnmr$ is
%\[
%\begin{array}{rcl}
%\nabla f(U) (\Delta U)
%&=& 
%\left\langle F^\prime(U) \Delta U, F(U) \right\rangle
%\\&=&
%\left\langle \Delta U, F^\prime(U)^* (F(U)) \right\rangle
%\\&=&
% \langle \Delta U, 
%           (\cdot U^T)^*\left(2\Ssum^*\KK_V^*\usvec^*   ( F(U))\right) \rangle
%\\&=&
% \langle \Delta UU^T, 
%           \left(2\Ssum^*\KK_V^*\usvec^*   ( F(U))\right) \rangle
%\\&=&
% \langle 2\Ssum(\Delta UU^T), 
%           \left(\KK_V^*\usvec^*   ( F(U))\right) \rangle
%\\&=&
% \left\langle 
%        (U \Delta U^T+\Delta U U^T) , 
%                           \KK_V^* (\KK_V(UU^T)-D_\epsilon)
%         \right\rangle,
%\end{array}
%\]
%since $\usMat \usvec =  I$ on $\Sh$, and $D_\epsilon \in \Sh$.
%Therefore,
%\[
%\begin{array}{rcl}
%  \nabla f(U) 
%&=&
%           (\cdot U^T)^*\left[2\Ssum^*\KK_V^*\usvec^*   ( F(U))\right]
%\\&=&
%           \left[2\Ssum^*\KK_V^*\usvec^*   ( F(U))\right]U
%\\&=&
% 2 \left(\KK_V^* \left[\KK_V(UU^T)-D_\epsilon\right] \right)U
%\\ &\in& \Mnr.
%\end{array}
%\]
%\begin{lem}
%The Hessian of $f$ at $U$ in \eqref{eq:Fandf} is
%\[
%\nabla^2f(U) =
%2 \vec \left(
%\LL_U^* \KK_V^* \KK_V \Ssum \LL_U  
%+ \KK_V^* \left(\KK_V(UU^T)-D_\epsilon\right) 
%\right)
%\Mat,
%\]
%where $\LL_U(\cdot) = \cdot U^T$.
%??????
%Moreover, it is positive semidefinite if $D_\epsilon=\KK_V(UU^T)$
%with dimension of nullspace $1$.
%Also????  
%	$\langle \Delta U,\nabla^2 f(U) (\Delta U) \rangle = ???$
%psd???? nullspace??? so 1,2,3 terms have common nullspace????
%Moreover,
%we can expand the function as follows.
%\[
%\begin{array}{lr}
%f(U+\Delta U)
%=
%   \frac 12\left \| 
%      \KK_V((U+\Delta U)(U+\Delta U)^T) - D_\epsilon \right\|^2_F 
%            &\mbox{order}
%\\
% \quad  =   \frac 12\left \| \KK_V(UU^T) - D_\epsilon \right\|^2_F 
%                & (0)
%\\  \quad \quad  +
% \left\langle 
%        \KK_V(U \Delta U^T+\Delta U U^T), 
%                           (\KK_V(UU^T)-D_\epsilon)
%         \right\rangle
%                  &(1)
%\\ 
%\quad \quad \quad  +
%   \frac 12\left \| 
%      \KK_V(U\Delta U^T+\Delta UU^T)  \right\|^2_F 
% +\left\langle 
%        \KK_V(UU^T)-D_\epsilon, 
%                           (\KK_V(\Delta U\Delta U^T))
%         \right\rangle
%                  &(2)
%\\ 
%\quad \quad \quad \quad  +
% \left\langle \KK_V(U\Delta U^T+ \Delta U U^T), 
%                           \KK_V(\Delta U\Delta U^T) \right\rangle
%                  &(3)
%\\ 
%\quad \quad \quad \quad \quad  +
%   \frac 12\left \| 
%      \KK_V(\Delta U\Delta U^T)  \right\|^2_F 
%                  &(4)
%\end{array}
%\]
%\end{lem}
%\begin{proof}
%????
%The curvature in the direction $\Delta U$ is
%\beq
%\label{eq:curvtr1}
%\begin{array}{rcl}
%	\langle \Delta U,\nabla^2 f(U) (\Delta U) \rangle
%	&=&
%	\langle
%	\Delta U,
%	2 \KK_V^* \left(\KK_V( U \Delta U^T+\Delta U U^T ) \right)U +
%	2 \KK_V^* \left(\KK_V(UU^T)-D_\epsilon\right) \Delta U
%	\rangle
%\end{array}
%\eeq
%Using $\Delta U = \Mat \kvec \Delta U$, we see that
%the second term in \eqref{eq:curvtr1} yields the symmetric matrix
%\[
%H_2=2 \vec \KK_V^* \left(\KK_V(UU^T)-D_\epsilon\right) \Mat \in
%\Ss^{2(k-1)}.
%\]
%To find the first term, we see that
%\[
%\begin{array}{rcl}
%	\langle
%	\Delta U,
%	2 \KK_V^* \left(\KK_V( U \Delta U^T+\Delta U U^T ) \right)U 
%	\rangle
%	&=&
%	\langle
%	\Delta UU^T,
%	2 \KK_V^* \left(\KK_V( U \Delta U^T+\Delta U U^T ) \right)
%	\rangle
%	\\&=&
%	\langle
%	\LL_U(\Delta U),
%	2 \KK_V^* \left(\KK_V(\Ssum(\LL_U \Delta U ) \right)
%	\rangle
%	\\&=&
%	\langle
%	\Delta U,
%	2 \LL_U^*(\KK_V^* \left(\KK_V(\Ssum(\LL_U \Delta U ) \right)
%	\rangle,
%\end{array}
%\]
%i.e. the second term is
%\[
%H_2= 2 \vec \LL_U^* \KK_V^* \KK_V \Ssum \LL_U  \Mat \in \Ss^{2(k-1)},
%\qquad  \LL_U(\cdot) = \cdot U^T.
%\]
%Therefore, the Hessian of $f$ is
%\index{Hessian of $f$}
%\beq
%\label{eq:hessf}
%\nabla^2f(U) =
%2 \vec \left(
%\LL_U^* \KK_V^* \KK_V \Ssum \LL_U  
%+ \KK_V^* \left(\KK_V(UU^T)-D_\epsilon\right) 
%\right)
%\Mat 
%\eeq

%We see that both the gradient and part of the Hessian 
%(for small residual) {\em vanish} on a nontrivial
%subspace.  Therefore, first and second order optimization algorithms will
%not distinguish ??????  how to proceed ?????
%\end{proof}

%Note that for small $\Delta U$, the third order term is dominated by the
%norm part of the second order term.
%????Suppose that the residual is $0$ at the optimal $U$, 
%i.e.  $D_\epsilon=D, N_\epsilon =0$. Then the expansion and the fact that
%the objective function is a norm, show that we have a local minimum ???? is
%it a strict local min??? yes!!! we can use the expansion to show that the
%second order term dominates the third order term and it corresponds to a
%positive semidefinite Hessian. Moreover, the third order term is zero in the
%direction of the nullspace of the second order term.
%\[
%\left|
% \left\langle \KK_V(U\Delta U^T+ \Delta U U^T), 
%                           \KK_V(\Delta U\Delta U^T) \right\rangle
%\right|
%<<
%   \frac 12\left \| 
%      \KK_V(U\Delta U^T+\Delta UU^T)  \right\|^2_F 
%\]
%The following lemma shows that the minimum of $f$ in the case of a zero residual is
%a {\em strict} global minimum. In addition, in the case of a nonzero
%residual, if we get $\nabla f(\bar U)=0$, then we have a good chance of
%having a strict local minimum due to the structure of the expansion of $f$. 
%\begin{lem}
%Suppose that $h:\Rn \rightarrow \R$, 
%$\nabla
%h(\bar x) =0$ and $\nabla^2 h(\bar x) \succeq 0$. And suppose that $d \in
%\NN(\nabla^2h(\bar x))$ implies that the third order term
%$h^{\prime\prime\prime}(\bar x;d,d,d) = 0$. Then $\bar x \in \argmin h(x)$
%and $\bar x$ is a {\em strict} local minimum of $h$, 
%\end{lem}
%\begin{proof}
%The proof is clear along any direction $d\in \NN(\nabla^2h(\bar x))$, i.e.
%$\bar x$ is  a strict local minimum along this line from $\bar x$. For other
%$d$, the third order term is dominated by the second order term which comes
%from a positive definite matrix, since the $d$ is not in the nullspace.
%\end{proof}

%We first consider the special case that $r=2$.
%\begin{lem}
%Suppose that $U \in \Mnmr$, with $r=2$ and $\rank(U)=2$.  Let 
%\[
%\bar U = \begin{bmatrix} U(:,2)&  -U(:,1) \end{bmatrix}, \quad
%                \hat U= U\bar U^T.
%\]
%Then $\hat U$ is skew-symmetric
%and 
%\[
%\left\{U\Delta U^T + \Delta U U^T = 0 \right\}   \Leftrightarrow
% \left\{\Delta U = \alpha \bar U, \mbox{  for some  } \alpha \in \R\right\}.
%\] 
%\end{lem}
%\begin{proof}
%From the definition, we get $\hat U$ is skew-symmetric. Therefore, 
%$U\bar U^T + \bar U U^T = 0$. The converse follows from the fact that
%we end up with equations of the type  $v_i a - v_j a=0$. This means
%that $v_i=v_j$ unless the elements $a$ are $0$.  ??? more details????
%\end{proof}
%???? Now to construct an algorithm????

%To find the search direction in the Gauss-Newton method at the current
%estimate $U$, we need to find the least squares solution
%of the Gauss-Newton equation, i.e. solve the normal equation
%\[
%F^\prime(U)^* (F^\prime(U) (\Delta U)) = -F^\prime(U)^*(F(U)).
%\]
%Therefore, we need the least squares solution $\Delta U$ of
%the overdetermined system
%\beq
%\label{eq:lssGN}
%\usvec \KK_V(\Delta UU^T+ U \Delta U^T)=  -\usvec (\KK_V(UU^T) - D_\epsilon).
%\eeq
%use e.g. lsqr???? is there an explicit solution since we know
%$\KK_V^\dagger$???

%
%?????add $V$ to this starting point?????
%A good starting point would be $U_0=U_\epsilon \Sigma_\epsilon^{1/2}$,
%where $U_\epsilon$, $\Sigma_\epsilon$ are found using the truncated spectral
%decomposition in \eqref{eq:trunceigdecomp}.

%Also, the Jacobian has a zero singular value. We need to find the
%nullspace of $U\cdot^T +\cdot U^T$ and factor it out, i.e. replace
%it by $U (R\cdot)^T +(R\cdot) U^T$, for appropriate $R$.
%Consider the case $r=2$ and suppose that $U \in \Mnmr$ is fixed.
%Let 
%\[
%\bar U = \begin{bmatrix} U(:,2)&  -U(:,1) \end{bmatrix}, \quad
%                \hat U= U\bar U^T.
%\]
%So that $(\hat U)_{ij}= 
%  U_{i1} U_{j2}-U_{i2} U_{j1} =-(\hat U)_{ji} , \forall i \neq j$ and
%$\diag (U \bar U^T) = 0$, i.e. $\hat U$ is skew-symmetric. Now
%\[
%\begin{array}{rcl}
%U \Delta U^T+ \Delta UU^T = 0 
%&\mbox{ iff }&
%\svec(U \Delta U^T+ \Delta UU^T) = 0 
%\\&\mbox{ iff }&
%\usvec(U \Delta U^T+ \Delta UU^T) = 0, \quad
%\diag(U \Delta U^T+ \Delta UU^T) = 0
%\\&\mbox{ iff }&
%\usvec(U \bar U^T\Diag(v) + \Diag(v) \bar UU^T) = 0, 
%       \quad \Delta U= \Diag(v) \bar U,
%\\&\mbox{ iff }&
%\usvec\left(\hat U\Diag(v) + \Diag(v) \hat U^T\right) = 0,
%       \quad \Delta U= \Diag(v) \bar U.
%\end{array}
%\]
%Therefore, $\Delta U = \bar U$ implies that we have the desired
%nullspace.
%Equivalently, we have $(n-1)(n-2)/2$ equations with $n-1$ unknowns.
%The equations are:
%\[
%\hat U_{ij} v_j + \hat U_{ji} v_i = 0, \quad \forall i < j,
%\]
%i.e. we want the null space of the matrix
%\[
%\begin{bmatrix}
%\hat U_{21}  & \hat U_{12}  &  0 & \dots &\dots &\dots & 0  \cr
%\hat U_{31}  & 0 & \hat U_{13}  &  0 & \dots &\dots & 0  \cr
%\ldots  & \ldots & \ldots  &  \ldots & \dots &\ldots &\ldots  \cr
%\hat U_{(n-1)1}  & 0 & 0  &  0 & \dots &0 & \hat U_{1(n-1)}  \cr
%0 & \hat U_{32}  & \hat U_{23}  &  0 & 0 & \dots  & 0  \cr
%0 & \hat U_{42}  & 0 & \hat U_{24}  &  0 & \dots  & 0  \cr
%\ldots  & \ldots & \ldots  &  \ldots & \dots &\ldots &\ldots  \cr
%0 & \hat U_{(n-1)2}  & 0 & 0  &   \dots & 0 & \hat U_{2(n-1)}  \cr
%\ldots  & \ldots & \ldots  &  \ldots & \dots &\ldots&\ldots   \cr
%\ldots  & \ldots & \ldots  &  \ldots & \dots &\ldots &\ldots  \cr
%\end{bmatrix}
%\]

%
%%----------------------------------------------------
%\subsection{Sensitivity Analysis}
%We look at how changes in the magnitude of the noise
%and in the radio range parameter $R$ influences the
%accuracy in the solution, i.e. the accuracy in locating the sensors.
%?????
%%----------------------------------------------------

%%----------------------------------------------------
%\subsection{Gauss-Newton versus Newton}
%%----------------------------------------------------
%We can use a Newton direction and/or a Gauss-Newton directon? GN
%gives sparsity??? Newton has better asymptotics?? why?? 
%local convergence even though nullspace of Hessian/Jacobian is not zero???



%----------------------------------------------------
\subsection{Clique Reductions Algorithm}
\label{sect:cliqredalg}
%----------------------------------------------------
\index{clique}
In \cite{kriswolk:09}, we presented an algorithm for exact \SNL given
exact data. We now outline this algorithm and extend to include new
cases.
The algorithm in \cite{kriswolk:09} considered/handled four 
different cases:
\begin{enumerate}
\item
Rigid clique intersection:
\item
Non-rigid clique intersection:
\item
Rigid node absorption:
\item
Non-rigid node absorption:
\end{enumerate}
Each of these cases made use of the essential fact that we knew the
embedding dimension is $r$ and that the operation resulted in a unique
face of dimension $r$ from the intersection of two faces.
This resulted in a very successful algorithm.
However, the algorithm could fail when the graph is very sparse. This is
due to the fact that the intersection process did not result in a unique
face of proper dimension. 

In the case that we end up with more than one clique after applying the
above four techniques,  we now
extend it to allow the intersection of faces to have dimension  $>r$.
For example, the singular intersections with the application of
lower bounds may not yield a unique solution, so we let the solution be
in a higher dimension. This still reduces the dimension  of the current
face of the problem. 
%So, we project into this current face and continue.
%??? how to project??? still maintain graph properties????




%----------------------------------------------------
\section{Localization using Dimensionality Reduction}
\label{sect:dimreduct}
%----------------------------------------------------
\index{dimensionality reduction}
The approach for the nearest \EDM in Section 
\ref{sect:nearestedm} concentrated on finding the best approximation of
the  Gram matrix $X$ with
the correct rank $r$, e.g.,~\eqref{eq:minxrankr}.
This involves solving a nonlinear nearest matrix optimization problem.
We could delay the restriction to the best rank, and obtain $X$ with
higher rank, i.e.,~we could delay fixing the rank $r$ till the end.
(????done in section ???? with facial reductions also reducing rank???)

We now study an alternate approach that finds the localization for cliques and then postpones further localizations in order
to find all the locations, all of $P$, in one step. This uses
dimensionality reductions with a linear projection.
\begin{lem}
\label{lem:dimredL}
Let $\alpha$ be a clique corresponding to the possibly noisy \EDM
$\bar D=D[\alpha]$ with $|\alpha|=k$. 
Let $B \cong \KK^\dagger(\bar D)$ with $B = YY^T \succeq 0$ an
approximate Gram matrix for this clique, where
\[
Y = U \begin{bmatrix}
\Sigma_{r_Y} \cr 0
\end{bmatrix}  V^T
= U_1 \Sigma_{r_Y} V^T
 \in \R^{k\times r_Y}
\]
is full column rank with its SVD, and
$U=\begin{bmatrix} U_1 U_2 \end{bmatrix}$ is partitioned appropriately.
Then, an optimal least squares approximation for the linear dimensionality
reducing transformation is
\beq
\label{eq:optL}
L^*_\alpha =Y^\dagger JP[\alpha,:]
\in \argmin \|JP[\alpha,:] - YL_\alpha  \|.
\eeq
And, the best $P_\alpha:=P[\alpha,:] \in C$ is found explicitly from the
data $Y$ using
\[
\min_{P_\alpha \in C} \|(I-YY^\dagger)  JP_\alpha  \|.
\]
In addition, the projection $I-YY^\dagger = I - U_1U_1^T$; and, if $Y^Te=0$
(centered), then $(I-YY^\dagger)J = J - U_1U_1^T$.
\end{lem}
\begin{proof}
The proof is immediate, since this is the Frobenius norm. For
completeness, we use the SVD of $Y$ and see that
\[
\begin{array}{rcl}
\min_{L_\alpha}
 \|JP[\alpha,:] - YL_\alpha  \|_F^2
&=&
\min\limits_{L_\alpha}
\|JP[\alpha,:] - U 
\begin{bmatrix}
\Sigma_{r_Y} \cr 0
\end{bmatrix}
( V^T L_\alpha)
  \|_F^2
\\&=&
\min\limits_{L_\alpha}
\|U^TJP[\alpha,:] -  
\begin{bmatrix}
\Sigma_{r_Y} \cr 0
\end{bmatrix}
( V^T L_\alpha)
  \|_F^2
\\&=&
\min\limits_{L_\alpha}
\|
\begin{bmatrix}
\Sigma_{r_Y}^{-1} & 0
\end{bmatrix}
U^TJP[\alpha,:] -  
( V^T L_\alpha)
  \|_F^2
\\&=&
\min\limits_{L_\alpha}
\|
V\begin{bmatrix}
 \Sigma_{r_Y}^{-1} & 0
\end{bmatrix}
U^TJP[\alpha,:] -  
L_\alpha
  \|_F^2.
\end{array}
\]
The final expression is found by substituting for $L_\alpha$ in
\eqref{eq:optL}.
\end{proof}

\subsection{Global Localization}
The above Lemma \ref{lem:dimredL} allows one to find the explicit
expression for the dimension reduction linear transformation $\L_\alpha$
implicitly in terms of the unknown points $P[\alpha,:]$. Therefore, if
$\A$ is a set of cliques, then we can find the set of points in the union of
all the rows of $P[\alpha,:], \alpha \in \A$ all at once.
\[
\min_{P_\alpha \in C_\alpha, \alpha \in \A} 
         \sum_{\alpha \in \A} \|(J_\alpha-(U_\alpha)_1(U_\alpha)_1^T)  P_\alpha  \|^2.
\]
The objective function is convex and \emph{partially separable}, see
e.g.,~\cite{MR760465,GrTo:82c,MR1480634,MR1307166,MR1282736,MR866412}.
Therefore, these special techniques can be used to exploit the sparsity
if one can properly choose the constraint sets $C_\alpha$.
However, by using simple choices for $C_\alpha$,
we now present two methods of obtaining \emph{explicit} optimal solutions.

Now let $S_\alpha \in \{0,1\}^{|\alpha| \times n}$ be the 
\emph{selection matrix}
\index{selection matrix}
that chooses the appropriate rows of $P$ so that
\[
P_\alpha = S_\alpha P.
\]
Therefore, with $Q_\alpha := R_\alpha S_\alpha :=
    \left(J_\alpha-(U_\alpha)_1(U_\alpha)_1^T\right)  S_\alpha$, we get
\index{$Q_\alpha := R_\alpha S_\alpha :=
    \left(J_\alpha-(U_\alpha)_1(U_\alpha)_1^T\right)  S_\alpha$}
\[
Q_\alpha^T Q_\alpha =  
S_\alpha^T (R_\alpha^TR_\alpha)  S_\alpha
=S_\alpha^T (J_\alpha-(U_\alpha)_1(U_\alpha)_1^T)  S_\alpha,
\]
and
\[
\begin{array}{rcl}
\sum_{\alpha \in \A} 
              \|(J_\alpha-(U_\alpha)_1(U_\alpha)_1^T)  P_\alpha  \|^2
&=&
\sum_{\alpha \in \A} 
    \|(J_\alpha-(U_\alpha)_1(U_\alpha)_1^T)  S_\alpha P  \|^2
\\&=&
\sum_{\alpha \in \A} \|Q_\alpha P  \|^2
\\&=&
\trace \sum_{\alpha \in \A} P^T Q_\alpha^T Q_\alpha P 
\\&=&
\langle P, \sum_{\alpha \in \A} Q_\alpha^T Q_\alpha P  \rangle.
\end{array}
\]
This is a quadratic form in $P$. The corresponding positive
semidefinite matrix is
\beq
\label{eq:quadform}
\begin{array}{rcl}
Q:=\sum_{\alpha \in \A} Q_\alpha^T Q_\alpha  
&=&
\begin{bmatrix} S_{\alpha_1} \cr S_{\alpha_2} \cr \ldots \cr  S_{\alpha_k} \end{bmatrix}^T
\begin{bmatrix}
 R_{\alpha_1}  & 0& 0& \ldots & 0 \cr
 0 & R_{\alpha_2}  & 0 &\ldots & 0\cr
\ldots & \ldots & \ldots  & \ldots               \cr
\ldots & \ldots & \ldots & \ldots               \cr
 0 & \ldots  & 0 &\ldots & R_{\alpha_k}\cr
\end{bmatrix}
\begin{bmatrix} S_{\alpha_1} \cr S_{\alpha_2} \cr \ldots \cr  S_{\alpha_k} \end{bmatrix}.
\end{array}
\eeq

\subsubsection{Orthogonality Constrained Localization}
An optimal solution of points $P$ can be assumed centered $Pe=0$.
Since $Qe=0$ ($e$ is an eigenvector with eigenvalue $0$), 
without further normalization, we get $P=0$ as the trivial optimal solution.
First, for normalization, we add an orthogonality constraint on $P$.
\begin{thm}
\label{thm:bigprob}
The solution for the points $P$ of the localization problem for all the
cliques at once using the program
\beq
\label{eq:bigprob}
\min_{P^TP=I,Pe=0}
         \sum_{\alpha \in \A} \|(J_\alpha-(U_\alpha)_1(U_\alpha)_1^T)  
 S_\alpha P  \|^2
\eeq
is found by forming the full rank factorization 
\[
PP^T=B=\sum_{i=1}^r \lambda_i v_iv_i^T
\]
using the $r$ smallest \underline{positive} 
eigenvalues and corresponding orthonormal eigenvectors 
$\lambda_i, v_i, i=1,\ldots,r$ of the
matrix of the quadratic form $Q$ given in \eqref{eq:quadform}.
\end{thm}
\begin{proof}
We eliminate the smallest eigenvalue as it is zero and choose the next
$r$ smallest.
The result follows immediately from applying
a theorem of Fan \cite{Fan:49} (also \cite[Ch 9.B]{MarshOlk:79}) to the
quadratic form in the objective function in \eqref{eq:bigprob}.
\end{proof}

\subsubsection{Trace Constrained Localization}
The orthogonal normalization  $P^TP=I_r$
in \eqref{eq:bigprob} appears to be quite arbitrary.
Without any normalization, the optimal solution is  $P=0$. The
normalization constraint forces the optimal solution to have $\rank P= 3$.

An alternative $P$ can be found by solving using a weaker normalization
from a trace
constraint,  yielding a homogeneous trust region subproblem.
Rather than the $t(r):=r(r+1)/2$ constraint of the normalization, the
trace constraint is a single constraint and so we can expect an optimal
solution of $\rank P=1$????. with better optimal value????
\begin{thm}
The solution for the points $P$ of the localization problem for all the
cliques at once using the program
\beq
\label{eq:bigprobTRS}
\min_{\trace P^TP=1,Pe=0}
         \sum_{\alpha \in \A} \|(J_\alpha-(U_\alpha)_1(U_\alpha)_1^T)  
 S_\alpha P  \|^2
\eeq
is found using the Rayleigh quotient to be
\[
??????
\]
using the smallest \underline{positive} eigenvalue and corresponding 
normalized eigenvector $v$
of the matrix of the quadratic form $Q$ given in \eqref{eq:quadform}.
???????
i.e. all possible combinations of the eigenvectors of the Kronecker
product:   $\Mat(e_i \otimes v)$, where $v$ is the eigenvector
corresponding to the smallest eigenvalue other than $0$ of
\end{thm}
\begin{proof}
The constraint $Pe=0$ forces the solution to be orthogonal to the $0$
eigenvalue.
As in Theorem \ref{thm:bigprob}, the objective function is a quadratic
form with the matrix $Q$, i.e.,~we can use the Kronecker product and the
vectorization of $P$ (by columns) and get the equivalent objective function
\[
\min_{\trace P^TP=1} \trace P^TQP = 
\min_{\|\kvec P\|=1} (\kvec P)^T (I_r \otimes Q) (\kvec P).
\]
Let $Q=UDU^T$ be the spectral decomposition.
The Lagrangian is $\trace P^TQP - \lambda (P^TP-1)$ with stationarity
condition
\[
0=QP - \lambda P =UD(U^TP) - \lambda P,
\]
or $D(U^TP) -\lambda(U^TP)=0$.
We choose the smallest eigenvalue and corresponding eigenvector of $Q$
(equivalently of $D$). ???? does $Q$ have multiple smallest eigenvalue
corresponding to smallest clique????? 
\end{proof}

%----------------------------------------------------
\section{Generating/Testing Instances}
%----------------------------------------------------
The \SNL is closely related to the molecular distance geometry problem; see, for example, 
\cite{MR2457932,MR1977953,MR1878150,MR1758031}.
In particular, the {\em Extended Geometric Build-up Algorithm, (EGBA)}
\index{Extended Geometric Build-up Algorithm, EGBA}
is presented in \cite{MR2457932}. This algorithm, for $r=3$ starts with
a clique made up of $4$ atoms, and then builds up the 
size of the clique by adding one atom at a time.
They include a discussion on how to avoid the build up of round-off
error.  See \cite{MR2206961} for information on generating instances for the molecular distance geometry problem.






%%----------------------------------------------------
%\section{Alternate Introduction}
%%----------------------------------------------------
%In this paper we derive and test an algorithm for solving large
%scale sensor localization problems (\SNL) with noisy data. 
%The \SNL problem consists in locating sensors using the fact
%that some of the sensors are anchors/beacons for which the
%locations are given, and that the distances between sensors
%withing {\em radio range} are approximately known.
%The algorithm extends the results in \cite{kriswolk:09} 
%where exact data is assumed. 

%We initialize the algorithm by finding cliques in the
%corresponding graph. We then localize the positions for 
%each of the cliques by treating them as an
%anchorless \SNL problem, i.e. as a {\em Euclidean Distance
%Matrix} (\EDM) completion problem. This is done using a Newton
%type method on the corresponding unconstrained minimization
%problem.
%Thus we find a good initial estimate for a noisy \EDM.

%Following the approach in \cite{kriswolk:09},
%we exploit the implicit
%degeneracy in the semidefinite programming (\SDP) relaxation of \SNL.
%This involves finding the subspace representation of the faces
%of $\Snp$ corresponding to the faces of $\En$.
%We continuously find the intersection of faces by finding the
%intersection of the subspace representations. We delay the
%completion of the original \EDM till the end.

%After we find the \EDM completion from the noisy data, 
%we then rotate the
%problem using the original positions of the anchors.

%We still obtain excellent CPU times and accuracy. 

%We include a discussion of {\em backward stability} or our
%algorithm. After the initialization of finding and localizing
%the cliques, the algorithm can be compared to Gaussian
%elimination (\GE) with partial pivoting. At each step, we find a pair
%of cliques that correspond to the {\em best condition number} for 
%the subspace intersection step, i.e. this can be compared to the
%partial pivoting step in \GE.

%
%Our problem is closely related to {\em graph realizability}. A
%characterization for the cases with the embedding
%dimension $r=2,3$ is given in \cite{ConnellySloughter2004}. In the $r=2$
%case: {\em if and only if it is a partial 2-tree, i.e. a subgraph of the
%2-sum of triangles in the sense of graph theory}.
%In the case $r=3$: {\em if and only if it does not have $K_5$ or the
%1-skeleton of the octahedron as a minor}.

%%----------------------------------------------------
%\subsection{Background}
%%----------------------------------------------------
%spaceloc Carter/Jin/Saunders/Ye \cite{MR2274505} ????

%Recently Nie \cite{NieCOAP07} using SOS includes an error analysis????

%

%%----- rendls preliminary results ---------
%\subsection{Euclidean distance matrices}

%For given points $p_{1}, \ldots, p_{n} \in \R^{r}$ the 
%matrix $D=(d_{ij})$ with
%$$
%d_{ij} = \| p_{i} - p_{j} \|^{2}
%$$
%is called {\bf Euclidean distance matrix}.
%We collect the points $p_{i}$ in the matrix 
%$$
%P^{T} = (p_{1}, \ldots, p_{n}),
%$$
%so the rows of $P$ correspond to the given points.
%We assume without loss of generality that $rk(P)=r$.
%Otherwise, if $rk(P)<r$, we could replace $P$ by
%$Q$ such that $range(P)=range(Q)$ and $rk(Q)=rk(P)$.

%To express $D$ through $PP^{T}$, we introduce the following
%operator $K$.
%$$
%K(Y) := e~ diag(Y)^{T} + diag(Y)e^{T} - 2Y.
%$$
%The definition implies that
%$K(PP^{T})_{ij}= \|p_{i} -p_{j} \|^{2}$, hence $K(PP^{T})=D$.
%We are interested in the reverse question. Given $D$, how can we find
%(some) $P$ such that $K(PP^{T})=D$ ?
%Obviously, such a representation is not unique, because distances
%are invariant under translation and rotation. 
%We could for instance assume without loss of generality that
%$P^{T}e=0$. 
%The following mapping $T$ will be the key to find, for given $D$, 
%some $P$ such that $D=K(PP^{T})$.
%Let $J=I - \frac{1}{n}ee^{T}$ be the orthogonal projection onto
%the subspace $e^{\perp}$. Consider
%$$
%T(D): = - \frac{1}{2}JDJ. 
%$$
%The following results are implicitly or explicitly stated and
%proved in \cite{kriswolk:09}. We provide short proofs to make
%the paper self-contained.
%\begin{lemma}
%Let $P \in \R^{n \times r}$. Then $T(K(PP^{T}))= JPP^{T}J$.
%Moreover, if $P^{T}e=0$, then $T(K(PP^{T}))=PP^{T}$.
%\end{lemma}
%\begin{proof} 
%We have $D= K(PP^{T}) = diag(PP^{T})e^{T} + e~diag(PP^{T})^{T} - 2PP^{T}$.
%Since $Je=0$ we get $T(D)= JPP^{T}J$.
%If in addition $P^{T}e=0$, then $JP=P$.
%\end{proof}
%We observe in particular that $T(D) \succeq 0$.
%Using this lemma, we can characterize realisability of $D$
%in $\R^{r}$.
%\begin{thm}
%Let $D$ be an $n \times n$ Euclidean distance matrix. 
%Then there exists $Q \in \R^{n \times r}$ such that 
%$rank(Q)=r, Q^{T}e=0$ and $D=K(QQ^{T})$ if and only if
%$rank(T(D))=r$.
%In particular, if $T(D)=PP^{T}$, then $D=K(PP^{T})$.
%\end{thm}
%\begin{proof}
%Since $D$ is a Euclidean distance matrix, there exists $Q$ such that
%$Q^{T}e=0$ and $K(QQ^{T}) = D$.
%Suppose first that $rank(Q)=r$.
%The previous lemma shows that
%$T(D) = T(K(QQ^{T})) = QQ^{T}$, so $rank(T(D))=r$.

%Conversely, let $T(D)$ have rank $r$. 
%Since $T(D)\succeq 0$, this implies that there exists
%$P$ with $rank(P)=r$ and $T(D)=PP^{T}$.
%Now note that 
%$$K(PP^{T}) = K(T(D))= K(T(K(QQ^{T})))=K(QQ^{T}).$$ 
%The last equation follows again from 
%the previous lemma.
%Therefore $K(PP^{T})=D$. 
%Let $R$ be the translation of $P$ such that
%$R^{T}e=0$. Then $D=K(PP^{T})=K(RR^{T})$ and $rank(R)=rank(P)$. 
%\end{proof}
%This theorem is constructive in the sense that for given EDM matrix
%$D$, it shows how to find $P \in \R^{n \times r}$ such
%that $K(PP^{T})=D$ and there is no realisation of $D$ in smaller
%dimension.

%
%% ---------------
%\subsection{EDM matrix completion problem}
%% ---------------

%The EDM matrix completion problem is defined as follows. 
%Let $H$ be the adjacency matrix of an undirected graph $G$
%having vertices $V=\{ 1, \ldots, n \}$. Thus $H$ is a symmetric
%0-1 matrix. Let $D_{p}$
%be a Euclidean distance matrix where $(D_{p})_{ij}$ is defined only
%if $h_{ij}=1$, i.e. if $ij$ is an edge of $G$.
%The {\bf Euclidean distance completion problem} consists
%in finding $Q$ such that
%$$
%D_{p} \circ H = K(QQ^{T}) \circ H.
%$$
%Thus $K(QQ^{T})$ is a completion of $D_{p}$ in the sense that 
%the restriction of $K(QQ^{T})$ to the graph $G$ gives $D_{p}$. 

%We are now going to describe a procedure to solve the Euclidean
%distance completion problem based on the previous theorem.

%The problem is easy if the underlying graph consists of two
%cliques $C_{1}$ and $C_{2}$ which intersect in at least $r+1$
%vertices (and possibly some additional edges).
%In this case, the submatrices $D_{1}$ and $D_{2}$, indexed by
%$C_{1}$ and $C_{2}$ have realizations $P_{1}$ and $P_{2}$,
%i.e.
%$$
%D_{1} = K(P_{1}P_{1}^{T}), D_{2} = K(P_{2}P_{2}^{T}).
%$$
%Let $P_{12}$ be the submatrix of $P_{1}$ indexed by $C_{1} \cap C_{2}$,
%and similarly by $P_{21}$ be the part of $P_{2}$ corresponding again to
%the intersection of the two cliques.  If these two matrices would be
%equal, we could extend the two realizations onto the union of the
%two cliques, i.e. onto the whole graph. But this can easily be achieved.
%We assume without loss of generality that $P_{12}^{T}e= P_{21}^{T}e=0$.
%Finding a rotation of $P_{2}$ such that $P_{2}$ agrees at the intersection
%now amounts to solving the following {\bf Procrustres problem}
%$$
%\min \| P_{12 } - P_{21}X \|^{2}
%$$
%such that $X^{T}X=I$.



%????use separable least squares? \cite{GoPe:73}
% "G.H. GOLUB and V. PEREYRA",
%         title     = "The differentiation of pseudo-inverses and non-linear
%                      least squares problems whose variables separate",
%         journal   = "SIAM J. Numer. Anal.",

%\appendix

%\section{Ideas}

%\paragraph{(Introduce the problem)}
%Let $D^\mathrm{true} \in \Ss^n$ be a Euclidean distance matrix with embedding dimension $r$.
%Suppose the measured distances in the matrix $D$ satisfy 
%\[
%	D = D^\mathrm{true} + N,
%\]
%where $N$ represents the noise in the measurements.
%We would like to find the maximum-likelihood estimate of $D^\mathrm{true}$, 
%which can is done by solving the least squares problem
%\begin{equation}
%	\begin{tabular}{lc}
%		minimize & $\| \KK(PP^T) - D \|_F$ \\
%		subject to & $P \in \mathbb{R}^{n \times r}$.
%	\end{tabular}
%\end{equation}

%\paragraph{(Describe the method)}
%Let $B = \KK^\dagger(D) = -\frac{1}{2}JDJ$.
%Then
%\[
%	B = \KK^\dagger(D^\mathrm{true}) + \KK^\dagger(N).
%\]
%Choose $P \in \mathbb{R}^{n \times r}$ that minimizes $\|PP^T - B\|_F$.
%For example, we can choose $P = U_r \Lambda_r^{1/2}$, 
%where $B = U \Lambda U^T$ is the eigenvalue decomposition of $B$.
%Then
%\begin{equation}
%	\|PP^T - B\|_F 
%	= \sqrt{ \sum_{i=r+1}^n \lambda_i(B)^2 },
%\end{equation}
%implying that
%\begin{eqnarray*}
%	\| \KK(PP^T) - D \|_F & = & \|\KK(PP^T) - \KK(B)\|_F \\
%	& \leq & \|\KK\|_F \cdot \|PP^T - B\|_F \\
%	& = & 2\sqrt{n} \sqrt{ \sum_{i=r+1}^n \lambda_i(B)^2 }.
%\end{eqnarray*}

%\paragraph{(Show $\|\KK\|_F = 2\sqrt{n}$)}
%In the above bound we have used the fact that 
%\begin{equation}
%	\|\KK\|_F := \max_{S \in \Ss^n} \frac{\|\KK\|_F}{\|S\|_F}
%	= 2\sqrt{n}.
%\end{equation}
%This fact can be verified by first noting that $\|\KK\|_F = \sqrt{\rho(\KK^*\KK)}$, 
%and then noting that $\KK^*\KK$ keeps the cone $\Ss_+^n \cap \Ss_C$ invariant; that is,
%\[
%	\KK^*(\KK(\Ss_+^n \cap \Ss_C)) = \Ss_+^n \cap \Ss_C.
%\]
%This implies that we need only determine the Perron-vector $Y \in \relint(\Ss_+^n \cap \Ss_C)$
%satisfying 
%\[
%	\KK^*(\KK(Y)) = 4nY.
%\]
%Let $Y$ be defined by
%\[
%	Y_{ij} = 
%	\begin{cases}
%		n-1 & \mbox{if $i = j$} 
%		\cr
%		-1 & \mbox{if $i \neq j$}
%		.
%	\end{cases}
%\]
%Then,
%\begin{eqnarray*}
%	\KK(Y)_{ij} 
%		& = & Y_{ii} + Y_{jj} - 2Y_{ij} \\
%		& = & 
%		\begin{cases}
%			0 & \mbox{if $i = j$}
%			\cr
%			2n & \mbox{if $i \neq j$}
%			,
%		\end{cases}
%\end{eqnarray*}
%which implies that
%\begin{eqnarray*}
%	\KK^*(\KK(Y))_{ij} 
%	& = & 
%	\begin{cases}
%		2( \sum_{k=1}^n \KK(Y)_{ik} - \KK(Y)_{ii} ) & \mbox{if $i=j$}
%		\cr
%		-2\KK(Y)_{ij} & \mbox{if $i \neq j$} 
%	\end{cases} \\
%	& = &
%	\begin{cases}
%		4n(n-1) & \mbox{if $i=j$}
%		\cr
%		-4n & \mbox{if $i \neq j$} 
%	\end{cases} \\
%	& = &
%	4n Y_{ij}.
%\end{eqnarray*}

%

%

%\section{Two Methods}

%\paragraph{(Provide the motivation)}
%Recall that if we choose $P = U_k \Lambda_k^{1/2}$, where $B = \KK^\dagger(D) = U \Lambda U^T$, then
%\begin{equation}
%	\label{eq:error_bound}
%	\frac{1}{2n}\| \KK(PP^T) - D \|_F^2 \leq \sum_{i=k+1}^n \lambda_i(B)^2.
%\end{equation}
%The implication is that larger values of $k$ will result in a smaller upper bound in equation~(\ref{eq:error_bound}).  This idea results in two possible methods.

%\paragraph{(Method 1: $k = r$)}
%For the first method, we always choose $k = r$, where $r$ is the embedding dimension of the true Euclidean distance matrix, $D^\mathrm{true}$.  In this method, when we compute the intersection of faces by computing a matrix $U$ satisfying
%\[
%	\RR(U) = 
%	\RR\left(
%	\begin{bmatrix} 
%	U_1 & 0 \cr 0 & I
%	\end{bmatrix}
%	\right)
%	\cap
%	\RR\left(
%	\begin{bmatrix} 
%	I & 0 \cr 0 & U_2
%	\end{bmatrix}
%	\right),
%\]
%we will have $U_1$ and $U_2$ with the same number of columns, implying that $U$ which will also have the same number of columns as $U_1$ and $U_2$.  Therefore, we will always maintain the same number of columns as we intersect faces.

%\paragraph{(Method 2: $k \geq r$)}
%In the second method, we are given a tolerance $\varepsilon > 0$, and we choose the smallest $k \geq r$ such that
%\begin{equation}
%	\sum_{i=k+1}^n \lambda_i(B)^2 < \varepsilon.
%\end{equation}
%In this case, when we compute the intersection of faces by computing a matrix $U$ satisfying
%\[
%	\RR(U) = 
%	\RR\left(
%	\begin{bmatrix} 
%	U_1 & 0 \cr 0 & I
%	\end{bmatrix}
%	\right)
%	\cap
%	\RR\left(
%	\begin{bmatrix} 
%	I & 0 \cr 0 & U_2
%	\end{bmatrix}
%	\right),
%\]
%the matrices $U_1$ and $U_2$ will not necessarily have the same number of columns.

%
%\section{Equations}

%\begin{equation}
%	\KK(Y) = \diag(Y)e^T + e\diag(Y)^T - 2Y
%\end{equation}

%
%\begin{equation}
%	\KK(Y)_{ij} = Y_{ii} + Y_{jj} - 2Y_{ij}
%\end{equation}

%
%\begin{equation}
%	\KK^*(D) = 2(\Diag(De)-D)
%\end{equation}

%\begin{equation}
%	\KK^*(D)_{ij} = 
%	\begin{cases}
%		2( \sum_{k=1}^n D_{ik} - D_{ii} ) & \mbox{if $i=j$}
%		\cr
%		-2D_{ij} & \mbox{if $i \neq j$} 
%	\end{cases}
%\end{equation}

 











\addcontentsline{toc}{section}{Bibliography}
\bibliography{.master,.edm,.psd,.bjorBOOK}

\cleardoublepage
\addcontentsline{toc}{section}{Index}
\label{ind:index}
\printindex




\end{document}
