%  Solution for I.1.6
\documentstyle[11pt]{article}
\begin{document}
\begin{center}
{\bf Solution for Chap I prob 1.6}
\end{center}
\begin{enumerate}
\item
{\bf A Global Minimum Exists}\\
Let 
\[
f(x) 
 = \sum_{i=1}^m f_i(x) 
= \sum_{i=1}^m w_i ||x-y^i||.
\]
The function $||x-y^i||$ diverges to $+\infty$ as $||x||$ goes to
$\infty$, i.e. this function (and so $f$) is coercive. Moreover it is
bounded below by 0 and is continuous.
Therefore the function has a global minimum which is attained.

\item
{\bf The Global Minimum Can be Realized by the Mechanical Model}\\
From the triangle inequality for the norm, we conclude that
\[  || \lambda x +(1-\lambda)x^\prime ||
 \leq  \lambda || x||  +(1-\lambda)|| x^\prime ||, ~\forall~ 0 \leq
\lambda \leq 1,
\]
i.e. $ ||x-y^i||$ is a convex function. Therefore $f$ is a positive
weighted sum of convex functions and is convex. Therefore local minima
are global minima and are characterized by stationary points.

First suppose that $x \neq y_i,~ \forall ~ i.$ Then $x$ optimal implies
that it is a stationary point, i.e.
\[
  0=\nabla f(x) = 
  \sum_{i=1}^m \nabla f_i(x) 
 =  \sum_{i=1}^m w_i \frac 1{||x-y^i||} (x-y^i).
\]
This means that the sum of the forces in the normalized directions is 0.

If the optimum $x$ occurs at one of the points $y_i$, say $x=y_1$, then
$f$ is not differentiable. Then we can use the subgradient
of $f$, denoted $\partial f$, (see text for
definitions). Without loss of generality, we can assume (after
translation) that $y_1=0.$
Since $f$ is a sum of finite valued functions (i.e.
defined for all $x$), the subgradient of $f$ is the sum of subgradients
of the functions. Therefore, $x$ is optimal if and only if
\[
  0 \in \partial f(x) = 
   \partial f_1(x) +
  \sum_{i=2}^m \nabla f_i(x).
\]
From the definition of subgradient we see
\[ 
\partial f_1(x) =  w_1 \left\{ \phi : ||\phi || \leq 1 \right\}.
\]
Therefore $x$ is optimal if and only if
\[
  0 = \phi + \sum_{i=1}^m w_i \frac 1{||x-y^i||} (x-y^i),
\]
for some $\phi$ with $|| \phi || \leq w_i.$

\item
{\bf Uniqueness}\\
Again, first suppose that $x \neq y_i,~ \forall ~ i.$ Then we can
evaluate the Hessian
\[
  0=\nabla^2 f(x) 
 =  \sum_{i=1}^m w_i \frac 1{||x-y^i||^3} 
\left( ||x-y^i||^2  I - (x-y^i)(x-y_i)^t  \right).
\]
But the matrix
\[
 ||x-y^i||^2  I - (x-y^i)(x-y_i)^t  
\]
has $(n-1)$ eigenvalues valued $||x-y^i||^2$ and one 0 eigenvalue
corresponding to the eigenvector $x-y_i$, i.e.
it is positive semidefinite. Therefore the Hessian is positive
semidefinite. (We knew this already since the function is convex.)
The Hessian is not positive definite if and only if all the eigenvectors
corresponding to the 0 eigenvalue are the same, i.e. $x-y_i$ is collinear
for all $i$. Thus the optimum is unique except in the collinear case.
(An example where it is not unique is easily seen for points which are
collinear - even two points.)

This case covers the case where $x=y_i$ as well, i.e. if the optimum is
not unique, then a differentible optimium would exist necessarily.


\end{enumerate}

\end{document}

