\documentclass[12pt,a4paper]{article}
\usepackage{amsmath,amsxtra,amsthm,amssymb,makeidx,graphics}
\theoremstyle{plain}
\newtheorem{theorem}{Theorem}[section]
\newtheorem{lemma}[theorem]{Lemma}
\newtheorem{corollary}[theorem]{Corollary}
\newtheorem{exercise}[theorem]{Exercise}
\newtheorem{example}[theorem]{Example}
\newtheorem{definition}[theorem]{Definition}
\newtheoremstyle{citing}{3pt}{3pt}{\itshape}{0pt}{\bfseries}%
{.}{ }{\thmnote{#3}}\theoremstyle{citing}\newtheorem*{varthm}{}
\DeclareMathOperator{\var}{var}
\DeclareMathOperator{\Cl}{Cl}
\DeclareMathOperator{\Int}{Int}
\renewcommand{\emptyset}{\varnothing}
\theoremstyle{remark}
\newtheorem{question}[theorem]{Exercise}
\begin{document}
\title{Topics in Analysis}
\author{T.~W.~K\"{o}rner}
\maketitle
\begin{footnotesize}
{\bf Small print} This course is not intended as a foundation
for further courses, so the lecturer is allowed substantial
flexibility. Exam questions will be set on the course as given
(so, if I do not have time to cover everything in these notes,
the topics examined will be those actually lectured)
and will only cover topics in these notes. Unless there
is more time to spare than I expect, I will not talk about
topological spaces but confine myself to ordinary Euclidean
spaces ${\mathbb R}^{n}$, ${\mathbb C}$ and complete
metric spaces.
These notes lay out the results of the course. There is
a separate set of notes giving the associated proofs,
but those notes are intended for home use and not
as a means of following the live lectures.
I can provide some notes on the exercises
for supervisors by e-mail. The exercises themselves
form sections~\ref{S;sheet one} to~\ref{S;sheet four}
of these notes.
These notes are written in
\LaTeXe\ and should be available
in tex, ps, pdf and dvi format
from my home page
\begin{center}
{\bf http://www.dpmms.cam.ac.uk/\~{}twk/}
\end{center}
\end{footnotesize}
\tableofcontents
\section{Metric spaces} This section is devoted to
fairly formal preliminaries. Things get more interesting
in the next section and the course gets fully under way
in the third.
Both those students who find the early material worryingly
familiar and those who find it worryingly unfamiliar are asked
to suspend judgement until then.
Most Part~II students will
be familiar with the notion of a metric space.
\begin{definition}\label{D;metric}
Suppose that
$X$ is a non-empty set
and $d:X^{2}\rightarrow{\mathbb R}$ is a function
such that
(i) $d(x,y)\geq 0$ for all $x,\,y\in X$.
(ii) $d(x,y)=0$ if and only if $x=y$.
(iii) $d(x,y)=d(y,x)$ for all $x,\,y\in X$.
(iv) $d(x,y)+d(y,z)\geq d(x,z)$ for all $x,\,y,\,z\in X$.
Then we say that $d$ is a metric on $X$ and that $(X,d)$
is a metric space.
\end{definition}
For most of the course we shall be concerned with
metrics which you already know well.
\begin{lemma}\label{L;some metrics}
(i) Consider ${\mathbb R}^{n}$.
If we take $d$ to be ordinary Euclidean distance
\[d({\mathbf x},{\mathbf y})=\|{\mathbf x}-{\mathbf y}\|=
\left(\sum_{j=1}^{n}|x_{j}-y_{j}|^{2}\right)^{1/2},\]
then $({\mathbb R}^{n},d)$ is a metric space.
We refer to this space as \emph{Euclidean space}.
(ii) Consider ${\mathbb C}$. If we take
$d(z,w)=|z-w|$, then $({\mathbb C},d)$ is a metric space.
\end{lemma}
\begin{proof} Proved in previous courses
(and set as Exercise~\ref{Q;some metrics}).
\end{proof}
The next definitions work in any metric
space, but you
can concentrate on what they mean for ordinary Euclidean space.
\begin{definition} If $(X,d)$ is a metric space
$x_{n}\in X$, $x\in X$ and $d(x_{n},x)\rightarrow 0$, then
we say that $x_{n}\xrightarrow[d]{}x$ as $n\rightarrow\infty$.
\end{definition}
\begin{definition}
If $(X,d)$ is a metric space
$x\in X$ and $r>0$, then we write
\[B(x,r)=\{y\in X\,:\,d(x,y)0$ such that
$B(u,\delta)\subseteq U$.
\end{definition}
\begin{exercise}\label{E;Balls}
(i) Show that, if $x\in X$ and $r>0$,
then $B(x,r)$ is open in the sense of Definition~\ref{Def;Open}.
(ii) Show that, if $x\in X$ and $r>0$, then the set
\[\bar{B}(x,r)=\{y\in X\,:\,d(x,y)\leq r\}\]
is closed. (Naturally enough, we call $\bar{B}(x,r)$
a closed ball.)
(iii) If $E$ is closed then $X\setminus E$ is open.
(iv) If $E$ is open then $X\setminus E$ is closed.
\end{exercise}
We recall (without proof) the following important
results from~1A.
\begin{theorem}{\bf[Cauchy criterion]}\label{T;junior Cauchy}
A sequence
$a_{n}\in{\mathbb R}$ converges if and only if,
given $\epsilon>0$, we can find an $N(\epsilon)$
such that $|a_{n}-a_{m}|<\epsilon$ for all
$n,\,m\geq N(\epsilon)$
\end{theorem}
Generalisation
leads us to the following definitions.
\begin{definition} If $(X,d)$ is a metric space, then a sequence
$(a_{n})$ with $a_{n}\in X$ is called a \emph{Cauchy sequence}
if, given $\epsilon>0$, we can find an $N(\epsilon)$
such that $d(a_{n},a_{m})<\epsilon$ for all
$n,\,m\geq N(\epsilon)$.
\end{definition}
\begin{definition} We say that a metric space $(X,d)$ is
\emph{complete} if every Cauchy sequence converges.
\end{definition}
\begin{exercise}\label{E;convergent Cauchy}
Show that if $(X,d)$ is a metric space
(complete or not), then every convergent sequence is Cauchy.
\end{exercise}
We note the following useful remarks.
\begin{lemma}\label{L;fast Cauchy}
Let $(X,d)$ be a metric space.
(i) If a Cauchy sequence $x_{n}$ in $(X,d)$ has a convergent
subsequence with limit $x$ then $x_{n}\rightarrow x$.
(ii) Let $\epsilon_{n}>0$
and $\epsilon_{n}\rightarrow 0$ as $n\rightarrow\infty$.
If $(X,d)$ has the property that, whenever
$d(x_{n},x_{n+1})<\epsilon_{n}$ for $n\geq 1$,
it follows that the sequence
$x_{n}$ converges, then $(X,d)$ is complete.
\end{lemma}
Lemma~\ref{L;fast Cauchy}~(ii) is most useful when we have
$\sum_{n=1}^{\infty}\epsilon_{n}$ convergent,
for example if $\epsilon_{n}=2^{-n}$.
The next exercise is simply a matter of disentangling notation.
\begin{exercise}\label{E;closed Cauchy}
Suppose that $(X,d)$ is a metric space
and $Y$ is a non-empty subset of $X$.
(i) Show that, if $d_{Y}(a,b)=d(a,b)$ for all $a,\,b\in Y$,
then $(Y,d_{Y})$ is a metric space.
(ii) Show that, if $(X,d)$ is complete and $Y$ is closed in $(X,d)$,
then $(Y,d_{Y})$ is complete.
(iii) Show that, if $(Y,d_{Y})$ is complete, then
(whether $(X,d)$ is complete or not) $Y$ is closed
in $(X,d)$.
\end{exercise}
We now come to our first real theorem.
\begin{theorem}\label{T;Euclid}
The Euclidean space ${\mathbb R}^{n}$
with the usual metric is complete.
\end{theorem}
We shall usually prove such theorems in the case $n=2$
and remark that the general case is similar.
\section{Compact sets in Euclidean Space}
In Part~1A we showed that any bounded sequence had a convergent
subsequence. This result generalises to $n$ dimensions.
\begin{theorem}\label{T;Bolzano}
If $K>0$ and ${\mathbf x}_{r}\in {\mathbb R}^{m}$
satisfies $\|{\mathbf x}_{r}\|\leq K$ for all $r$, then we can find
an ${\mathbf x}\in {\mathbb R}^{m}$ and $r(k)\rightarrow\infty$
such that ${\mathbf x}_{r(k)}\rightarrow{\mathbf x}$
as $k\rightarrow\infty$.
\end{theorem}
\begin{exercise}\label{E;Bolzano product}
Prove Theorem~\ref{T;Bolzano} in the case $n=2$.
\end{exercise}
We now prove a very useful theorem.
\begin{theorem}\label{T;closed bounded}
(i) If $E$ is closed bounded subset of ${\mathbb R}^{m}$,
then any sequence ${\mathbf x}_{r}\in E$ has a subsequence with a limit
in $E$.
(ii) Conversely, if $E$ is a subset of ${\mathbb R}^{m}$
with the property that
any sequence ${\mathbf x}_{r}\in E$ has a subsequence with a limit
in $E$, then $E$ is closed and bounded.
\end{theorem}
We shall refer to the property described in (i) as the
Bolzano--Weierstrass property.
If you cannot see how to prove a result in ${\mathbb R}^{m}$
using the Bolzano--Weierstrass property then the 1A proof
for ${\mathbb R}$ will often provide a hint.
We often refer to closed bounded subsets of ${\mathbb R}^{m}$
as compact sets. (The reader is warned that, in general metric spaces,
`closed and bounded' does not mean the same thing as `compact'
(see, for example, Exercise~\ref{Q;unit ball}). If
we deal with the even more general case of topological spaces we have
to distinguish between compactness and sequential
compactness\footnote{If you
intend to climb Everest you need your own oxygen supply. If you intend
to climb the Gog Magogs you do not.}.
We shall only talk about compact sets in ${\mathbb R}^{m}$.)
The reader will be familiar with definitions of the following
type.
\begin{definition} If $(X,d)$ and $(Y,\rho)$ are metric spaces
and $E\subseteq X$, we
say that a function $f:E\rightarrow Y$ is continuous if, given
$\epsilon>0$ and $x\in E$, we can find a $\delta(x,\epsilon)>0$
such that, whenever $z\in E$ and $d(z,x)<\delta(x,\epsilon)$,
we have $\rho(f(z),f(x))<\epsilon$.
\end{definition}
Even if the reader has not met the general metric space definition,
she will probably have a good idea of the properties of
continuous functions $f:E\rightarrow{\mathbb R}^{n}$
when $E$ is a subset of ${\mathbb R}^{m}$.
We now prove that the continuous image of a compact
set is compact.
\begin{theorem}\label{T;Compact image}
If $E$ is a compact subset of ${\mathbb R}^{m}$
and $f:E\rightarrow{\mathbb R}^{n}$ is continuous, then $f(E)$
is a compact subset of ${\mathbb R}^{n}$.
\end{theorem}
At first sight Theorem~\ref{T;Compact image} seems too abstract to be
useful but it has an immediate corollary.
\begin{theorem}\label{T;compact bound}
If $E$ is a non-empty compact subset of ${\mathbb R}^{m}$
and $f:E\rightarrow{\mathbb R}$ is continuous, then there exist
${\mathbf a},\,{\mathbf b}\in E$ such that
\[f({\mathbf a})\geq f({\mathbf x})\geq f({\mathbf b})\]
for all ${\mathbf x}\in E$.
\end{theorem}
Thus a continuous real valued function on a compact set
is bounded and attains its bounds.
\begin{exercise}\label{E;1A bounds}
Deduce the theorem in~1A
which states that if $f:[c,d]\rightarrow{\mathbb R}$ is continuous,
then $f$ is bounded and attains its bounds.
\end{exercise}
Theorem~\ref{T;compact bound} gives a neat proof of the fundamental
theorem of algebra (which, contrary to its name, is a theorem of analysis).
\begin{theorem}{\bf [Fundamental Theorem of Algebra]}%
\label{T;Fundamental Algebra}
If we work
in the complex numbers, every non-constant polynomial has a root.
\end{theorem}
The reader will probably have seen, but may well have forgotten,
the contents of the next exercise.
\begin{exercise}\label{E;roots}
We work in the complex numbers.
(i) Use induction on $n$ to show that,
if $P$ is a polynomial of degree $n$ and $a\in{\mathbb C}$,
then there exists a polynomial $Q$ of degree $n-1$ and an
$r\in{\mathbb C}$ such that
\[P(z)=(z-a)Q(z)+r.\]
(ii) Deduce that, if $P$ is a polynomial of degree $n$
with root $a$, then there exists a polynomial $Q$ of degree $n-1$
such that
\[P(z)=(z-a)Q(z).\]
(iii) Use induction and the fundamental theorem of algebra
to show that every polynomial $P$ of degree $n$ can be written in the form
\[A\prod_{j=1}^{n}(z-a_{j})\]
with $A,\,a_{1},\,a_{2},\,\dots,\,a_{n}\in{\mathbb C}$
and $A\neq 0$.
(iv) Show that, if $P$ is a polynomial of degree at most $n$ which vanishes
at $n+1$ points, then $P$ is the zero polynomial.
\end{exercise}
\section{Laplace's equation}
We need a preliminary definition.
\begin{definition} Let $(X,d)$ be a metric space and $E$
a subset of $X$.
(i) The interior of $E$ is the set of all points
$x$ such that there
exists a $\delta>0$ (depending on $x$) such that
$B(x,\delta)\subseteq E$. We write $\Int E$ for the interior of $E$.
(ii) The closure of $E$ is the set of points $x$ in $X$ such that we can find
$e_{n}\in E$ with $e_{n}\xrightarrow[d]{}x$.
We write $\Cl E$ for the closure of $E$.
(iii) The boundary of $E$ is the set $\partial E=\Cl E\setminus \Int E$.
\end{definition}
\begin{exercise}\label{E;boundaries}
(i) Show that $\Int E$ is
open. Show also that, if $V$ is open and $V\subseteq E$, then
$V\subseteq \Int E$.
(ii) Show that $\Cl E$ is
closed. Show also that, if $F$ is closed and $F\supseteq E$, then
$F\supseteq \Cl E$.
(Thus $\Int E$ is the largest open set contained in $E$
and $\Cl E$ is the smallest closed set containing $E$.)
(iii) Show that $\partial E$ is closed.
(iv) Suppose that we work in ${\mathbb R}^{m}$ with the usual metric.
Show that if $E$ is bounded, then so is $\Cl E$.
\end{exercise}
Recall that, if $\phi$ is a real valued function in ${\mathbb R}^{m}$ with
sufficiently many derivatives, then we write
\[\triangledown^{2}\phi=\sum_{j=1}^{m}
\frac{\partial^{2}\phi}{\partial x_{j}^{2}}.\]
The next few results concern solutions of Laplace's
equation
\[\triangledown^{2}\phi=0.\]
\begin{lemma}\label{L;up Laplace} Let $\Omega$ be a bounded
open subset of ${\mathbb R}^{m}$
and $\epsilon>0$.
Suppose $\phi:\Cl \Omega\rightarrow {\mathbb R}$ is continuous
and satisfies
\[\triangledown^{2}\phi>\epsilon\]
on $\Omega$. Then $\phi$ cannot attain its maximum on $\Omega$.
\end{lemma}
\begin{theorem}\label{T;down Laplace}
Let $\Omega$ be a bounded open subset of ${\mathbb R}^{m}$.
Suppose $\phi:\Cl \Omega\rightarrow {\mathbb R}$ is continuous
on $\Cl \Omega$
and satisfies
\[\triangledown^{2}\phi=0\]
on $\Omega$. Then $\phi$ attains its maximum on $\partial \Omega$.
\end{theorem}
\begin{exercise}\label{E;analytic maximum}
Let $\Omega$ be a
bounded open subset of ${\mathbb C}$.
Suppose that
\[f:\Cl \Omega\rightarrow{\mathbb C}\]
is continuous
and $f$ is analytic on $\Omega$. Recall that the real
and imaginary parts of $f$ satisfy Laplace's equation
on $\Int \Omega$. Show that $|f|$ attains its maximum on $\partial \Omega$.
\noindent$[$Hint: Why can we assume that $\Re f(z_{0})=|f(z_{0})|$
at any particular point $z_{0}$?$]$
\end{exercise}
\begin{theorem}\label{T;unique Laplace}
Let $\Omega$ be a bounded open subset of ${\mathbb R}^{m}$.
Suppose that the functions
$\phi,\psi:\Cl \Omega\rightarrow {\mathbb R}$ are continuous
and satisfy
\[\triangledown^{2}\phi=0,\ \triangledown^{2}\psi=0\]
on $\Omega$. Then, if $\phi=\psi$ on $\partial \Omega$,
it follows that $\phi=\psi$ on $\Cl \Omega$.
\end{theorem}
You proved a version of Theorem~\ref{T;unique Laplace}
in Part~1A but only for `nice boundaries' and functions that
behaved `nicely' near the boundary.
You have met the kind of arguments used above when you proved
Rolle's theorem in 1A. Another use of this argument is given
in Exercise~\ref{Q;Schwarz} which provides a nice
revision for this section.
In Part~1A you assumed that you could always solve Laplace's equation.
The next exercises (which is part of the course)
shows that this is not the case.
\begin{exercise}\label{E;no Laplace}
(i) Let
\[\Omega=\{{\mathbf x}\in{\mathbb R}^{2}\,:\,0<\|{\mathbf x}\|<1\}.\]
Show that $\Omega$ is open, that
\[Cl \Omega=\{{\mathbf x}\in{\mathbb R}^{2}\,:\,\|{\mathbf x}\|\leq 1\}\]
and
\[\partial\Omega=\{{\mathbf x}\in{\mathbb R}^{2}\,:\,\|{\mathbf x}\|=1\}
\cup\{{\boldsymbol 0}\}.\]
(ii) Suppose that
$\phi:\Cl \Omega\rightarrow {\mathbb R}$ is continuous
that $\phi$ is twice differentiable on $\Omega$
and satisfies
\[\triangledown^{2}\phi=0\]
on $\Omega$ together with the boundary conditions
\[\phi({\mathbf x})
=\begin{cases}
0&\text{if $\|{\mathbf x}\|=1$},\\
1&\text{if ${\mathbf x}={\boldsymbol 0}$}.
\end{cases}
\]
Use the uniqueness of solutions of Laplace's equation to show that
$\phi$ must be radially symmetric in the sense that
\[\phi({\mathbf x})=f(\|{\mathbf x}\|)\]
for some function $f:[0,1]\rightarrow {\mathbb R}$.
(iii) Show that
\[\frac{d\ }{dr}\big(rf(r)\big)=0\]
for $00$,
then ${\mathbf y}^{*}$ with $y_{j}^{*}=a_{j}x_{j}^{*}+b_{j}$
is a best point for $E'$.
\begin{exercise}\label{E;Convex transform}
Show that the $E'$ defined above is closed
bounded and convex.
\end{exercise}
There is no difficulty in remembering these conditions
since they each play a particular role in the proof.
If the reader prefers only to deal with the case $m=2$, she will
lose nothing of the argument\footnote{And no marks in the
exam}.
We need a preliminary lemma.
\begin{lemma}\label{L;push to one side}
If $K$ is a convex set in ${\mathbb R}^{n}$
such that $(1,1,\ldots,1)\in K$
and $\prod_{j=1}^{n}x_{j}\leq 1$ for all ${\mathbf x}\in K$
with $x_{j}\geq 0$ $[1\leq j\leq n]$, then
\[K\subseteq\{{\mathbf x}\,:\,x_{1}+x_{2}+\ldots x_{n}\leq n\}.\]
\end{lemma}
\begin{theorem}\label{T;happy Nash}
Suppose that we agree to the Nash conditions.
If $E$ is closed bounded convex set in ${\mathbb R}^{m}$,
${\mathbf s}$ is the
status quo point and the function
$f:E\rightarrow{\mathbb R}$ given by
\[f({\mathbf x})=\prod_{j=1}^{m}(x_{j}-s_{j})\]
has a maximum (in $E$) with $x_{j}-s_{j}>0$
at ${\mathbf x}^{*}$, then
${\mathbf x}^{*}$ is the unique best point.
\end{theorem}
We complete our discussion by observing
that is a consequence of Lemma~\ref{L;push to one side}.
\begin{lemma}\label{L;pushier}
If $E$ is closed bounded convex set in ${\mathbb R}^{m}$,
${\mathbf s}\in \Int E$
and the function
$f:E\rightarrow{\mathbb R}$ given by
\[f({\mathbf x})=\prod_{j=1}^{m}(x_{j}-s_{j}),\]
then there is a unique point in $E$ with $x_{j}-s_{j}\geq 0$
where $f$ attains a maximum.
\end{lemma}
`There is no patent for immortality under the the moon'
but I suspect that Nash's results will be remembered
long after the last celluloid copy of \emph{A Beautiful Life}
has crumbled to dust.
The book \emph{Games, Theory and Applications}~\cite{Thomas} by L.~C.~Thomas
maintains a reasonable balance between the technical and non-technical
and would make a good port of first call if you wish to learn more
along these lines.
\section{Approximation by polynomials} It is a guiding
idea of both the calculus and of numerical analysis that
`well behaved functions look like polynomials'.
Like most guiding principles, it needs to be used judiciously.
If asked to justify it, we might mutter something about
Taylor's Theorem, but Cauchy produced the following example
to show that this is not sufficient.
\begin{exercise}\label{E;Cauchy example}
Let $E:{\mathbb R}\rightarrow{\mathbb R}$
be defined by
\[E(t)=
\begin{cases}
\exp(-1/t^{2})&\text{if $t\neq 0$,}\\
0&\text{if $t= 0$.}
\end{cases}
\]
(i) $E$ is infinitely differentiable, except, possibly, at $0$,
with
\[E^{(n)}(t)=P_{n}(1/t)E(t)\]
for all $t\neq 0$ for some polynomial $P_{n}$.
(ii) $E$ is infinitely differentiable everywhere
with
\[E^{(n)}(0)=0.\]
(iii) We have
\[E(t)\neq\sum_{n=0}^{\infty}\frac{E^{(n)}(0)}{n!}t^{n}\]
for all $t\neq 0$.
\end{exercise}
(It is very unlikely that you have not seen this exercise
before, but if you have not you should do it.)
We could also mutter something like `interpolation'.
The reader probably knows all the facts given in the next lemma.
\begin{lemma}\label{L;interpolate polynomial}
Let $x_{0}$, $x_{1}$,
\dots, $x_{n}$ be distinct points of $[a,b]$.
(i) If $f:[a,b]\rightarrow{\mathbb R}$ then there is at most
one polynomial of degree no greater than $n$ with $P(x_{j})=f(x_{j})$
for $0\leq j\leq n$.
(ii) Write
\[e_{j}(t)=\prod_{k\neq j}\frac{t-x_{k}}{x_{j}-x_{k}}.\]
If $f:[a,b]\rightarrow{\mathbb R}$ then
\[P=\sum_{j=0}^{n}f(x_{j})e_{j}\]
is a polynomial of degree at most $n$ with
$P(x_{i})=f(x_{i})$ for $0\leq i\leq n$.
(Thus we can replace `at most' by `exactly'
in (i).)
(iii) In the language of vector spaces, the $e_{j}$
form a basis for the vector space of polynomials ${\mathcal P}_{n}$
of degree $n$ or less. We can replace `at most' by `exactly'
in (i).
\end{lemma}
However polynomials can behave in rather odd ways.
\begin{theorem}\label{T;Chebychev polynomials}
There exist polynomials $T_{n}$ of degree
$n$ and $U_{n-1}$ of degree $n-1$ such that
\[T_{n}(\cos\theta)=\cos n\theta\]
for all $\theta$ and
\[U_{n-1}(\cos\theta)=\frac{\sin n\theta}{\sin\theta}\]
for $\sin\theta\neq 0$. The value of $U_{n-1}(\cos\theta)$
when $\sin\theta=0$ is given by continuity and will be $\pm n$.
The roots of $U_{n-1}$ are $\cos(r\pi/n)$ with $1\leq r\leq n-1$
and the roots of $T_{n}$ are $\cos\big((r+\tfrac{1}{2})\pi/n\big)$ with
$0\leq r\leq n-1$.
The coefficient of $t^{n}$ in $T_{n}$ is $2^{n-1}$ for $n\geq 1$.
\end{theorem}
We call $T_{n}$ the Chebychev\footnote{Or Tchebychev, hence the $T$.}
polynomial of degree $n$.
The $U_{n}$ are called Chebychev polynomials of the second kind.
Looking at the Chebychev polynomials of the second kind, we see
that we can choose a well behaved function $f$ which is
well behaved at $n+1$ reasonably well spaced points but
whose $n$th degree interpolating polynomial is very large
at some other point. It can be shown (though this is harder to prove)
that this kind of thing can happen however we choose
our points of interpolation.
A little thought shows that we are not even sure what
it means for one function to look like another. It is natural
to interpret $f$ looks like $g$
as saying that $f$ and $g$ are close in
some metric. However there are a number of `obvious' metrics.
The next exercise will be familiar to almost all my audience.
\begin{exercise}\label{E;distinct metrics}
Show that the following define metrics
on the space $C([0,1])$ of continuous functions
$f:[0,1]\rightarrow{\mathbb R}$.
(i) $\|f\|_{1}=\int_{0}^{1}|f(t)|\,dt$ defines
a norm with associated distance
\[d_{1}(f,g)=\|f-g\|_{1}=\int_{0}^{1}|f(t)-g(t)|\,dt.\]
(ii) The equation
\[\langle f,g \rangle=\int_{a}^{b}f(t)g(t)\,dt\]
defines an inner product on $C([a,b])$
with associated norm $\|\ \|_{2}$ and so a distance
\[d_{2}(f,g)=\|f-g\|_{2}=\left(\int_{a}^{b}(f(t)-g(t))^{2}\,dt\right)^{1/2}\]
for the derived norm.
(iii) The equation $\|f\|_{\infty}=\sup_{t\in[0,1]}|f(t)|$
defines a norm and so a distance
\[d_{3}(f,g)=\|f-g\|_{\infty}=\sup_{t\in[0,1]}|f(t)-g(t)|.\]
Show that
\[\|f-g\|_{\infty}\leq \|f-g\|_{1}\leq \|f-g\|_{2}.\]
Let
\[f_{n}(t)=
\begin{cases}
(1-nt)&\text{for $0\leq t\leq 1/n$},\\
0&\text{otherwise}.
\end{cases}
\]
Compute $\|f_{n}\|_{\infty}/\|f_{n}\|_{1}$
and $\|f_{n}\|_{1}/\|f_{n}\|_{2}$. Comment.
\end{exercise}
Each of these metrics has its advantages and all are used
in practice. We shall concentrate on the metric $d_{3}$.
We quote the following result from a previous course
(where it is known as the General Principle of Uniform Convergence).
\begin{theorem} If $[a,b]$ is a closed interval and
$C([a,b])$ is the space of continuous functions on $[a,b]$
then the uniform metric
\[d(f,g)=\|f-g\|_{\infty}\]
is complete.
\end{theorem}
We have now obtained a precise question. If $f$ is a continuous function
can we find polynomials which are
arbitrarily close in the uniform norm?
This question was answered in the affirmative
by Weierstrass in a paper published
when he was 70 years old.
Since then, several different proofs have been discovered.
We present one due to Bernstein based on probability
theory\footnote{Some other proofs
are given in Exercises~\ref{E;long Bernstein},
\ref{E;kernel Weierstrass} and~\ref{E;Lebesgue}.}
Before that, we need a definition and theorem which the
reader will have met in a simpler form earlier.
\begin{definition} Let $(X,d)$ and $(Y,\rho)$ be metric spaces.
We say that a function $f:X\rightarrow Y$
is uniformly continuous if, given $\epsilon>0$,
we can find a $\delta>0$ such that $\rho\big(f(a),f(b)\big)<\epsilon$
whenever $d(a,b)<\delta$.
\end{definition}
\begin{theorem}\label{T;uniform continuity}
If $E$ is a bounded closed set in ${\mathbb R}^{m}$
and $f:E\rightarrow{\mathbb R}^{p}$ is continuous, then
$f$ is uniformly continuous.
\end{theorem}
Although we require probability theory we only need deal
with the simplest case of a random variable taking a finite number
of values and, if the reader wishes, she need only prove
the next result in that case.
\begin{theorem}{\bf [Chebychev's inequality]}%
\label{T;Chebychev}
If $X$ is a real valued bounded random variable, then,
writing
\[\sigma^{2}=\var X={\mathbb E}(X-{\mathbb E}X)^{2},\]
we have
\[\Pr(|X-{\mathbb E}X|\geq a)\leq \frac{\sigma^{2}}{a^{2}}\]
for all $a>0$.
\end{theorem}
\begin{theorem}{\bf [Bernstein]}\label{T;Bernstein}
Suppose $f:[0,1]\rightarrow{\mathbb R}$
is continuous.
Let $X_{1}$, $X_{2}$,
\dots $X_{n}$ be independent
identically distributed random variables
with $\Pr(X_{r}=0)=1-t$ and $\Pr(X_{r}=1)=t$ (think of
tossing a biased coin). Let
\[Y_{n}(t)=\frac{X_{1}+X_{2}+\dots+X_{n}}{n}\]
and let
\[p_{n}(t)={\mathbb E}f(Y_{n}(t)).\]
Then
(i) $p_{n}$ is polynomial of degree $n$. Indeed,
\[p_{n}(t)=\sum_{j=0}^{n}\binom{n}{j}f(j/n)t^{j}(1-t)^{n-j}.\]
(ii) $\|p_{n}-f\|_{\infty}\rightarrow 0$ as $n\rightarrow\infty$.
\end{theorem}
Bernstein's result differs from many proofs of Weierstrass's
theorem in giving an elegant \emph{explicit} approximating
polynomial.
\section{Best approximation by polynomials}
Bernstein's theorem gives an explicit approximating polynomial
but, except in very special circumstances,
not the best approximating polynomial.
(Indeed, we have not yet shown that such a polynomial exists.)
Chebychev was very interested
in this problem and gave a way of telling when we do
have a best approximation.
\begin{theorem}{\bf [The Chebychev equiripple criterion]}%
\label{T;Chebychev equiripple}
Let $f:[a,b]\rightarrow{\mathbb R}$ be a continuous
function and $P$ a polynomial of degree at most $n-1$.
Suppose that we can find $a\leq a_{0}< a_{1}< \dots< a_{n}\leq b$
such that, writing $\sigma=\|f-P\|_{\infty}$ we have
either
\begin{align*}
f(a_{j})-P(a_{j})=(-1)^{j}\sigma&\ \text{for all $0\leq j\leq n$}\\
\intertext{or}
f(a_{j})-P(a_{j})=(-1)^{j+1}\sigma&\ \text{for all $0\leq j\leq n$.}
\end{align*}
Then $\|P-f\|_{\infty}\leq\|Q-f\|_{\infty}$ for all polynomials $Q$
of degree
$n-1$ or less.
\end{theorem}
We apply this to find the polynomial of degree $n-1$
which gives the best approximation to $t^{n}$ on $[-1,1]$.
\begin{theorem}\label{T;nice Chebychev}
Write $S_{n}(t)=t^{n}-2^{1-n}T_{n}(t)$,
where $T_{n}$ is the Chebychev polynomial of degree $n$.
Then (if $n\geq 1$)
\[\sup_{t\in[-1,1]}|t^{n}-Q(t)|\geq \sup_{t\in[-1,1]}|t^{n}-S_{n}(t)|
= 2^{1-n}\]
for all polynomials $Q$ of degree $n-1$.
\end{theorem}
\begin{corollary}\label{C;naught for naught}
We work on $[-1,1]$.
(i) If $P(t)=\sum_{j=0}^{n}a_{j}t^{j}$
is a polynomial of degree $n$
with $|a_{n}|\geq 1$ then $\|P\|_{\infty}\geq 2^{-n+1}$.
(ii) We can find $\epsilon(n)>0$ with the following property.
If $P(t)=\sum_{j=0}^{n}a_{j}t^{j}$
is a polynomial of degree at most $n$ and $|a_{k}|\geq 1$ for some
$n\geq k\geq 0$ then $\|P\|_{\infty}\geq \epsilon(n)$.
\end{corollary}
We can now use a compactness argument to prove that
there does exist a best approximation.
\begin{theorem}\label{T;best approximation exists}
If $f:[a,b]\rightarrow{\mathbb R}$ is a continuous
function, then
there exists a polynomial $P$ of degree at most $n$
such that $\|P-f\|_{\infty}\leq\|Q-f\|_{\infty}$ for all polynomials
$Q$ of degree
$n$ or less.
\end{theorem}
We could also have proved Corollary 8.3~(ii) directly
by a compactness argument without using Chebchev's result.
We have only shown that the Chebychev criterion
is a sufficient condition.
However, it can be shown that it is also a necessary one.
The proof is given in Exercise~\ref{Q;ripple necessary}
but is not part of the course.
\section{Gaussian integration} How should we attempt to
estimate $\int_{a}^{b}f(x)\,dx$ if we only know $f$ at
certain points. One, rather naive, approach is to find
the interpolating polynomial for those points and integrate that.
This leads rapidly, via Lemma~\ref{L;interpolate polynomial},
to the following result.
\begin{lemma}\label{L;numerical}
Let $x_{0}$, $x_{1}$,
\dots, $x_{n}$ be distinct points of $[a,b]$.
Then there are unique real numbers $A_{0}$, $A_{1}$,
\dots, $A_{n}$ with the property that
\[\int_{a}^{b}P(x)\,dx=\sum_{j=0}^{n}A_{j}P(x_{j})\]
for all polynomials of degree $n$ or less.
\end{lemma}
However our previous remarks about interpolating
polynomials suggest, and experience confirms,
that it may not always be wise to use the approximation
\[\int_{a}^{b}f(x)\,dx\approx\sum_{j=0}^{n}A_{j}f(x_{j})\]
even when $f$ well behaved.
In the particular case when the interpolation points
are equally spaced, computation suggests that as the number of points
used increases the $A_{j}$ begin to vary in sign and become large in
absolute value. It can be shown that this
is actually the case
and that this means that the approximation can actually get
worse as the number of points increase.
It is rather surprising that there is a choice of points
which avoids this problem. Earlier
(in Exercise~\ref{E;distinct metrics}~(ii))
we observed that
the definition
\[\langle f,g\rangle=\int_{-1}^{1}f(t)g(t)\,dt\]
gives an inner product on the vector space $C([-1,1])$.
Let us recall some results
from vector space theory.
\begin{lemma}\label{L;Gramm}{\bf [Gramm--Schmidt]}
Let $V$ be a vector space
with an inner product. Suppose that
${\mathbf e}_{1}$, ${\mathbf e}_{2}$,
\dots, ${\mathbf e}_{n}$ are orthonormal and ${\mathbf f}$
is not in their linear span. If we set
\[{\mathbf v}={\mathbf f}-\sum_{j=1}^{n}
\langle{\mathbf f},{\mathbf e}_{j}\rangle{\mathbf e}_{j}\]
we know that ${\mathbf v}\neq {\boldsymbol 0}$ and
that, setting ${\mathbf e}_{n+1}=\|{\mathbf v}\|^{-1}{\mathbf v}$
the vectors ${\mathbf e}_{1}$, ${\mathbf e}_{2}$,
\dots, ${\mathbf e}_{n+1}$ are orthonormal.
\end{lemma}
Lemma~\ref{L;Gramm}
enables us to make the following definition.
\begin{definition} The Legendre polynomials $p_{n}$
are the the polynomials given by the following
conditions\footnote{There are various other definitions
but they all give polynomials of the form
$b_{n}p_{n}$. The only difference is in the choice of $b_{n}$.
As may be seen from Exercise~\ref{E;compute Legendre}
our choice is not very convenient.}.
(i) $p_{n}$ is a polynomial of degree $n$ with positive leading
coefficient.
(ii) ${\displaystyle \int_{-1}^{1}p_{n}(t)p_{m}(t)\,dt=\delta_{nm}
=\begin{cases}1\qquad\text{if $n=m$,}
\\0\qquad\text{otherwise.}\end{cases}}$
\end{definition}
\begin{lemma}\label{L;Legendre roots}
The $n$th Legendre polynomial $p_{n}$ has $n$ distinct
roots all lying in $(-1,1)$.
\end{lemma}
Gauss had the happy idea of choosing the evaluation
points to be the roots of a Legendre polynomial.
\begin{theorem}\label{T;Gauss} (i) If $\alpha_{1}$, $\alpha_{2}$,
\dots $\alpha_{n}$ are the $n$ roots of the
$n$th Legendre polynomial $p_{n}$ and
the $A_{j}$ are chosen so that
\[\int_{-1}^{1}P(x)\,dx=\sum_{j=1}^{n}A_{j}P(\alpha_{j})\]
for every polynomial $P$ of degree $n-1$ or less, then,
in fact
\[\int_{-1}^{1}Q(x)\,dx=\sum_{j=1}^{n}A_{j}Q(\alpha_{j})\]
for every polynomial $Q$ of degree $2n-1$ or less.
(ii) If $\beta_{j}\in[-1,1]$ and $B_{j}$ are such that
\[\int_{-1}^{1}Q(x)\,dx=\sum_{j=1}^{n}B_{j}Q(\beta_{j})\]
for every polynomial $Q$ of degree $2n-1$ or less,
then the $\beta_{j}$ are the roots of the $n$th Legendre polynomial.
\end{theorem}
Theorem~\ref{T;Gauss} looks impressive but it is the next result
which really shows how good Gauss' idea is.
\begin{theorem}\label{T;nice quadrature}
We continue with the notation of Theorem~\ref{T;Gauss}.
(i) $A_{j}\geq 0$ for each $1\leq j\leq n$.
(ii) $\sum_{j=1}^{n}A_{j}=2$.
(iii) If $f:[-1,1]\rightarrow{\mathbb R}$ is continuous and
$P$ is any polynomial of degree at most $2n-1$, then
\[\left|\int_{-1}^{1}f(x)\,dx-
\sum_{j=1}^{n}A_{j}f(\alpha_{j})\right|\leq 4\|f-P\|_{\infty}.\]
(iv) Write $G_{n}f$ for the estimate of $\int_{-1}^{1}f(t)\,dt$
obtained using Gauss' idea with the $n$th Legendre polynomial.
Then, if $f$ is continuous on $[-1,1]$,
\[G_{n}f\rightarrow \int_{-1}^{1}f(t)\,dt\]
as $n\rightarrow\infty$.
\end{theorem}
\section{Distance and compact sets} This section could
come almost anywhere in the notes, but provides some helpful
background to the section on Runge's theorem.
\begin{lemma}\label{L,from compact}
If $E$ is a non-empty compact set in ${\mathbb R}^{m}$
and ${\mathbf a}\in {\mathbb R}^{m}$, then there is
a point ${\mathbf e}\in E$ such that
\[\|{\mathbf a}-{\mathbf e}\|=
\inf_{{\mathbf x}\in E}\|{\mathbf a}-{\mathbf x}\|.\]
\end{lemma}
We write $d({\mathbf a},E)=
\inf_{{\mathbf x}\in E}\|{\mathbf a}-{\mathbf x}\|$.
\begin{exercise}\label{E;unique convex 1}
(i) Give an example to show that
the point ${\mathbf e}$ in Lemma~\ref{L,from compact}
need not be unique.
(ii) Show that, if $E$ is convex, ${\mathbf e}$
is unique.
\end{exercise}
\begin{lemma}\label{L;compact compact}
(i) If $E$ and $F$ are non-empty compact sets in ${\mathbb R}^{m}$,
then there exist points ${\mathbf e}\in E$ and ${\mathbf f}\in F$
such that
\[\|{\mathbf e}-{\mathbf f}\|=\inf_{{\mathbf y}\in E}d({\mathbf y},F).\]
(ii) The result in~(i) remains true when $F$ is compact and non-empty
and $E$ is closed and non-empty.
(iii) The result in~(i) may fail when $E$
and $F$ are closed and non-empty.
\end{lemma}
Let us write $\tau(E,F)=\inf_{{\mathbf y}\in E}d({\mathbf y},F).$
\begin{exercise}\label{E;unique convex 2}
Give an example to show that
the points ${\mathbf e}$ and ${\mathbf f}$
in Lemma~\ref{L;compact compact}
need not be unique.
\end{exercise}
The statement of the next exercise requires us to recall
the definition of a metric.
\begin{varthm}[Definition~\ref{D;metric}]
Suppose that
$X$ is a non-empty set
and $d:X^{2}\rightarrow{\mathbb R}$ is a function
such that
(i) $d(x,y)\geq 0$ for all $x,\,y\in X$.
(ii) $d(x,y)=0$ if and only if $x=y$.
(iii) $d(x,y)=d(y,x)$ for all $x,\,y\in X$.
(iv) $d(x,y)+d(y,z)\geq d(x,z)$ for all $x,\,y,\,z\in X$.
Then we say that $d$ is a metric on $X$ and that $(X,d)$
is a metric space.
\end{varthm}
\begin{exercise}\label{E;first try Hausdorff}
Show that, if we consider the space
${\mathcal K}$ of non-empty
compact sets in ${\mathbb R}^{m}$, then $\tau$ obeys
conditions~(i) and~(iii) for a metric but not conditions~(ii)
and~(iv).
Show that, if $E,F\in{\mathcal K}$, then $\tau(E,F)=0$ if and
only if $E\cap F\neq\emptyset$.
\end{exercise}
Since $\tau$ does not provide a satisfactory metric on
${\mathcal K}$, we try some thing else. If $E$ and $F$ are
compact sets in ${\mathbb R}^{m}$, let us set
$\sigma(E,F)=\sup_{{\mathbf y}\in E}d({\mathbf y},F)$.
\begin{exercise}\label{E;Hausdorff try 2}
Suppose that $E$ and $F$ are non-empty
compact sets. Show that there exists an ${\mathbf e}\in E$
such that $d({\mathbf e},F)=\sigma(E,F)$.
\end{exercise}
\begin{exercise}\label{E;Hausdorff try 3} Show that,
if we consider the space
${\mathcal K}$ of non-empty
compact sets in ${\mathbb R}^{m}$, then $\sigma$ obeys
condition~(i) for a metric but not conditions~(ii)
and~(iii).
Show that $\sigma(E,F)=0$ if and only if $E\subseteq F$.
\end{exercise}
However, $\sigma$ does obey the triangle inequality.
\begin{lemma}\label{L;start Hausdorff}
If $E$, $F$ and $G$ are non-empty compact sets
then
\[\sigma(E,G)\leq \sigma(E,F)+\sigma(F,G).\]
\end{lemma}
This enables us to define the Hausdorff metric $\rho$.
\begin{definition} If $E$ and $F$ are non-empty compact
subsets of ${\mathbb R}^{m}$, we set
\[\rho(E,F)=\sigma(E,F)+\sigma(F,E),\]
that is to say,
\[\rho(E,F)=\sup_{{\mathbf e}\in E}\inf_{{\mathbf f}\in F}
\|{\mathbf e}-{\mathbf f}\|
+\sup_{{\mathbf f}\in F}\inf_{{\mathbf e}\in E}
\|{\mathbf e}-{\mathbf f}\|.
\]
\end{definition}
\begin{theorem}\label{T;Hausdorff hurrah}
The Hausdorff metric $\rho$ is
indeed a metric on the space ${\mathcal K}$ of non-empty
compact subsets of ${\mathbb R}^{m}$.
\end{theorem}
Indeed, we can say something even stronger which will come in
useful when we give examples of the use of Baire's theorem.
\begin{theorem}\label{T;Hausdorff complete}
The Hausdorff metric $\rho$ is
a complete metric on the space ${\mathcal K}$ of non-empty
compact subsets of ${\mathbb R}^{m}$.
\end{theorem}
Our proof of Theorem~\ref{T;Hausdorff complete} makes use
of two observations.
\begin{theorem}\label{T;decreasing compact} (i) Suppose
that we have a sequence of non-empty compact sets
in ${\mathbb R}^{m}$ such that
\[K_{1}\supseteq K_{2}\supseteq K_{3}\supseteq\ldots.\]
Then $K=\bigcap_{p=1}^{\infty}K_{p}$ is a non-empty compact set.
(ii) Further, $K_{p}\underset{\rho}{\rightarrow} K$
as $p\rightarrow\infty$.
\end{theorem}
\begin{lemma}\label{L;extend compact}
If $K$ is compact in ${\mathbb R}^{m}$ so
is
\[K+\bar{B}(0,r)=\{{\mathbf x}+{\mathbf y}\,:\,
{\mathbf x}\in K,\ \|{\mathbf y}\|\leq r\}.\]
\end{lemma}
\section{Runge's theorem} Weierstrass' theorem tells us that
every continuous real valued function on $[a,b]$ can be
uniformly approximated by polynomials. Does a similar theorem
hold for complex variable?
Cauchy's theorem enables us to answer with a resounding no.
\begin{example}\label{E;conjugate}
Let $\bar{D}=\{z\,:\,|z|\leq 1\}$ and define
$f:\bar{D}\rightarrow{\mathbb C}$ by
\[f(z)=\bar{z}.\]
If $P$ is any polynomial, then
\[\sup_{z\in\bar{D}}|f(z)-p(z)|\geq 1.\]
\end{example}
After looking at this example the reader may recall
the following theorem (whose proof does not form part
of this course).
\begin{theorem}\label{T;uniform limit}
If $\Omega$ is an open subset of
${\mathbb C}$ and $f_{n}:\Omega\rightarrow{\mathbb C}$ is analytic,
then if $f_{n}\rightarrow f$ uniformly on $\Omega$
(or, more generally, if $f_{n}\rightarrow f$ uniformly on
each compact subset of $\Omega$) then $f$ is analytic.
\end{theorem}
We might now conjecture that every analytic function
on a well behaved set
can be uniformly approximated by polynomials.
Cauchy's theorem again shows that the matter is not straightforward.
\begin{example}\label{E;big hole}
Let $\bar{T}=\{z\,:\,1/2\leq |z|\leq 2\}$ and define
$f:\bar{T}\rightarrow{\mathbb C}$ by
\[f(z)=\frac{1}{z}.\]
If $P$ is any polynomial then
\[\sup_{z\in\bar{T}}|f(z)-p(z)|\geq 1.\]
\end{example}
Thus the best we can hope for is a theorem that tells
us that every analytic function on a suitable set
`without holes' can be uniformly approximated by polynomials.
We shall see that the following definition
gives a suitable `no holes' condition.
\begin{definition}\label{D;connected}
An open set $U\subseteq {\mathbb C}$ is
\emph{path connected} if, given $z_{0},\,z_{1}\in U$, we
can find a continuous map $\gamma:[0,1]\rightarrow U$
with $\gamma(0)=z_{0}$ and $\gamma(1)=z_{1}$.
\end{definition}
We will obtain results for a sets whose
\emph{complement} is path connected.
The reader may ask why we could not simply use Taylor's
theorem in complex variables. To see that this
would not work
we recall various earlier results. (The proofs are not part of the
course but you are expected to know the results.)
\begin{lemma} If $a_{j}\in{\mathbb C}$, then there exists
an $R\in[0,\infty]$ (with suitable conventions when $R=\infty$)
such that $\sum_{j=0}^{\infty}a_{j}z^{j}$ converges
for $|z|R$.
\end{lemma}
We call $R$ the radius of convergence of
$\sum_{j=0}^{\infty}a_{j}z^{j}$.
\begin{lemma} If $\sum_{j=0}^{\infty}a_{j}z^{j}$
has radius of convergence $R$ and $R'0$, we can find a polynomial $P$ with
\[\sup_{z\in K}|f(z)-P(z)|<\epsilon.\]
\end{theorem}
I shall make a number of remarks before moving
on to the proof. The first is that (as might be expected)
Theorem~\ref{T;Runge} is the simplest of a family of
results which go by the name of Runge's theorem.
However, I think that it is fair to say that, once the proof
of this simplest case is understood, both the proofs
and the meanings of the more general theorems
are not hard to grasp.
The second remark is that the reader will
lose very little understanding\footnote{And,
provided she does not twist the examiner's nose, few
marks in the exam.}
if she concentrates
on the example of Runge's theorem for geometrically simple
$K$ and $\Omega$ (like rectangles and triangles).
Our proof of Runge's theorem splits into several steps.
\begin{lemma}\label{L;Runge contour} Suppose that $K$
is a compact set with $K\subseteq \Omega$.
Then we can find a finite set
of piecewise linear contours $C_{m}$
lying entirely within $\Omega\setminus K$ such that
\[f(z)=\sum_{m=1}^{M}\int_{C_{m}}\frac{f(w)}{w-z}\,dw\]
whenever $z\in K$ and $f:\Omega\rightarrow{\mathbb C}$
is analytic.
\end{lemma}
It is worth making the following observation explicit.
\begin{lemma}\label{L;note Runge}
With the notation and conditions of
Lemma~\ref{L;Runge contour}, we can find a $\delta>0$
such that $|z-w|\geq\delta$ whenever $z\in K$ and
$w$ is a point of one of the contours $C_{m}$.
\end{lemma}
We use Lemma~\ref{L;Runge contour} to prove the following
result which takes us closer to our goal.
\begin{lemma}\label{L:Runge to simple}
Suppose that $K$
is a compact set with $K\subseteq \Omega$.
Then given any analytic $f:\Omega\rightarrow{\mathbb C}$
and any $\epsilon>0$ we can find an integer $N$,
complex numbers $A_{1},\,A_{2},\, \dots,\,A_{N}$
and $\alpha_{1},\,\alpha_{2},\,\dots,\,\alpha_{N}\in\Omega\setminus K$
such that
\[\left|f(z)-\sum_{n=1}^{N}\frac{A_{n}}{z-\alpha_{n}}\right|
<\epsilon\]
for all $z\in K$.
\end{lemma}
Thus Runge's theorems follows at once from the following
special case.
\begin{lemma}\label{L;simple Runge 1}
Suppose that $K$ is a compact set
and ${\mathbb C}\setminus K$ path connected. Then, given
any $\alpha\notin K$ and
any $\epsilon>0$, we can find a polynomial $P$ with
\[\left|P(z)-\frac{1}{z-\alpha}\right|
<\epsilon\]
for all $z\in K$.
\end{lemma}
Let us make a temporary definition.
\begin{definition} Let $K$ be a compact set in ${\mathbb C}$.
We write $\Lambda(K)$ for the set of points $\alpha\notin K$
such that, given any $\epsilon>0$, we can find a polynomial $P$ with
\[\left|P(z)-\frac{1}{z-\alpha}\right|
<\epsilon\]
for all $z\in K$.
\end{definition}
A series of observations about $\Lambda(K)$ brings the proof
of Runge's theorem to a close.
\begin{lemma}\label{L;far Runge}
Let $K$ be a compact set in ${\mathbb C}$.
Then there exists an $R$ such that $|\alpha|>R$ implies
$\alpha\in \Lambda(K)$.
\end{lemma}
\begin{lemma}\label{L;simple Runge 2}
Let $K$ be a compact set in ${\mathbb C}$.
If $\alpha\in\Lambda(K)$ and $|\alpha-\beta|0$ (depending on the $a_{j}$)
such that
\[\left|\alpha-\frac{p}{q}\right|\geq \frac{c}{q^{n}}\]
for all $p,\,q\in{\mathbb Z}$ with $q\neq 0$
\end{theorem}
We say that `irrational algebraic numbers are not
well approximated by rationals'.
\begin{theorem}\label{T;Liouville number} The number
\[\sum_{j=0}^{\infty}\frac{1}{10^{j!}}\]
is transcendental.
\end{theorem}
\begin{exercise}\label{E;uncountable Liouville}
By considering
\[\sum_{n=0}^{\infty}\frac{b_{n}}{10^{n!}}\]
with $b_{j}\in\{1,\,2\}$, give another proof that the
set of transcendental numbers is uncountable.
\end{exercise}
As might be expected, it turned out to be very hard to
show that particular numbers are transcendental.
Hermite proved that $e$ is transcendental and
Lindemann adapted Hermite's method to show that
$\pi$ is transcendental (and so the circle can not be squared).
Alan Baker has contributed greatly to this field,
and his book \emph{Transcendental number theory}~\cite{Baker}
contains accessible\footnote{But miraculous.} proofs
of the transcendence of $e$ and $\pi$.
\section{The Baire category theorem} The following theorem turns
out to be much more useful than its somewhat abstract
formulation makes it appear.
\begin{theorem} {\bf [The Baire category theorem]}%
\label{T;Baire}
If $(X,d)$ is a complete non-empty metric space and $U_{1}$, $U_{2}$,
\dots are open sets whose complements have empty interior,
then
\[\bigcap_{j=1}^{\infty}U_{j}\neq\emptyset.\]
\end{theorem}
Taking complements, gives the following equivalent form.
\begin{theorem}\label{T;complement Baire}
If $(X,d)$ is a complete non-empty metric space and $F_{1}$, $F_{2}$,
\dots are closed sets with empty interior,
then
\[\bigcup_{j=1}^{\infty}F_{j}\neq X.\]
\end{theorem}
I think of Baire's theorem in yet another equivalent form.
\begin{theorem}\label{T;informal} Let
$(X,d)$ be a non-empty complete metric space.
Suppose that $P_{j}$ is a property such that:-
(i) The property of being $P_{j}$
is \emph{stable} in the sense that, given
$x\in X$ which has property $P_{j}$,
we can find an $\epsilon>0$ such that whenever
$d(x,y)<\epsilon$ the point $y$ has the property $P_{j}$.
(ii) The property of not being $P_{j}$
is \emph{unstable} in the sense that, given
$x\in X$ and $\epsilon>0$, we can find a $y\in X$
with $d(x,y)<\epsilon$ which has the property $P_{j}$.
Then there is an $x_{0}\in X$ which has all of the
of the properties $P_{1}$, $P_{2}$, \ldots.
\end{theorem}
Baire's theorem has given rise to the following
standard definitions\footnote{Your lecturer thinks
that, whilst the concepts defined are very useful, the nomenclature
is particularly unfortunate.}.
\begin{definition} A set in a metric space is said to be
\emph{nowhere dense} if its closure has empty interior.
A set in a metric space is said to be of \emph{first category} if it
is a subset of a countable union of nowhere dense closed sets. Any set which is
not of first category is said to be of \emph{second category}.
\end{definition}
Your lecturer will try never to use the words second category
but always to talk about `not first category'.
\begin{lemma}\label{L;Baire obvious}
(i) If $(X,d)$ is a non-empty complete metric space
and $E$ is a subset of first category, then $E\neq X$.
(ii) The countable union of sets of first category is of first category.
\end{lemma}
We need one more definition.
\begin{definition} (i) If $(X,d)$ is a metric space,
we say that a point $x\in X$ is \emph{isolated} if
we can find a $\delta>0$ such that $B(x,\delta)=\{x\}$.
(ii) If $(X,d)$ is a metric space, we say that a subset
$E$ of $X$ contains no isolated points if, whenever $x\in E$
and $\delta>0$, we have $B(x,\delta)\cap E\neq\{x\}$.
\end{definition}
\begin{theorem}\label{T;no isolated}
A non-empty complete metric space without isolated
points is uncountable.
\end{theorem}
\begin{corollary}\label{C;Cantor} The real numbers are uncountable.
\end{corollary}
The proof we have given here is much closer to Cantor's original
proof than that given in~1A.
It avoids the use of extraneous concepts like decimal representation.
Banach was a master of using the Baire category theorem.
Here is one of his results.
\begin{theorem}\label{T;Banach}\label{T;nowhere}
Consider $C([0,1])$
with the uniform norm. The set of anywhere differentiable
functions is a set of the first category. Thus continuous nowhere
differentiable functions exist.
\end{theorem}
Here is another corollary of Theorem~\ref{T;no isolated}.
\begin{corollary}\label{C;perfect} A non-empty closed subset
of ${\mathbb R}$ without isolated
points is uncountable.
\end{corollary}
Do there exist nowhere dense closed subsets of ${\mathbb R}$ with
no isolated points\footnote{Such sets are called perfect.
If they make pretty pictures they are called fractals.
You do not have to remember either name.}?
We shall answer this question by applying Baire's theorem in
the context of the Hausdorff metric.
\begin{lemma}\label{L;splendid isolation}
Consider the space $\mathcal K$ of
non-empty compact subsets of $[0,1]$
with the Hausdorff metric $\rho$. Let
${\mathcal E}_{k}$ be the collection
of compact sets $E$ such that
there exists an $x\in E$ with $B(x,1/k)\cap E=\{x\}$.
(i) The
set ${\mathcal E}_{k}$ is closed in the Hausdorff metric.
(ii) The
set ${\mathcal E}_{k}$ is nowhere dense in the Hausdorff metric.
(iii) The set ${\mathcal E}$ of compact sets
with an isolated point is of first category
with respect to the Hausdorff metric.
\end{lemma}
\begin{lemma}\label{L;empty interior} Consider the space ${\mathcal K}$
of non-empty compact subsets of $[0,1]$
with the Hausdorff metric $\rho$.
Let ${\mathcal F}_{j,k}$ be the collection
of compact sets $F$ such that
$F\supseteq [j/k,(j+1)/k]$
is closed and nowhere dense $[0\leq j\leq k,\ 1\leq k$].
(i) The
set ${\mathcal F}_{j,k}$ is closed in the Hausdorff metric.
(ii) The
set ${\mathcal F}_{j,k}$ is nowhere dense in the Hausdorff metric.
(ii) The set ${\mathcal F}$ of compact sets
with non-empty interior is of first category.
\end{lemma}
\begin{theorem}\label{T;many Cantor}
The set ${\mathcal C}$ of non-empty
compact sets with empty interior
and no isolated points is the complement of a set of first category
in the space ${\mathcal K}$
of non-empty compact subsets of $[0,1]$
with the Hausdorff metric $\rho$.
\end{theorem}
Since ${\mathcal K}$ with the Hausdorff metric is complete
it follows that non-empty compact sets with empty interior
and no isolated points exist.
The following example provides a background to
our next use of Baire category.
\begin{exercise}\label{E;many witches}
(i) Show that we can find continuous functions
$g_{n}:[0,1]\rightarrow{\mathbb R}$ such that
$g_{n}(x)\rightarrow 0$ for each $x\in[0,1]$
but
\[\sup_{t\in[0,1]}g_{n}(t)\rightarrow\infty\]
as $n\rightarrow\infty$.
\noindent[Hint: Witch's hat.]
(ii)
Show that we can find continuous functions
$f_{n}:[0,1]\rightarrow{\mathbb R}$ such that
$f_{n}(x)\rightarrow 0$ for each $x\in[0,1]$
but
\[\sup_{t\in[2^{-r-1},2^{-r}]}f_{n}(t)\rightarrow\infty\]
as $n\rightarrow\infty$
for each integer $r\geq 0$.
\end{exercise}
In spite of the previous example we have the following
remarkable theorem.
\begin{theorem}\label{T;bound on interval} Suppose that we have
a sequence of continuous functions
$f_{n}:[0,1]\rightarrow{\mathbb R}$ such that
$f_{n}(x)\rightarrow 0$ for each $x\in[0,1]$
as $n\rightarrow\infty$. Then we can find
a non-empty interval $(a,b)\subseteq[0,1]$
and an $M>0$ such that
\[|f_{n}(t)|\leq M\]
for all $t\in(a,b)$ and all $n\geq 1$.
\end{theorem}
A slightly stronger version of the result is given
as Exercise~\ref{E;small bound}.
\section{Continued fractions}\label{S;continued}
We are used to writing real numbers as decimals, but there are other
ways of specifying real numbers which may be more convenient.
The oldest of these is the method of continued fractions.
Suppose that $x$ is irrational and $1\geq x>0$.
We know that there is a strictly positive integer $N(x)$
such that
\[\frac{1}{N(x)}\geq x>\frac{1}{N(x)+1}\]
so we can write
\[x=\frac{1}{N(x)+T(x)}\]
where $T(x)$ is irrational and $1> T(x)\geq 0$.
We can do the same things to $T(x)$ as we did to $x$,
obtaining
\[T(x)=\frac{1}{N(T(x))+T(T(x))}\]
and so, using the standard notation for composition of functions,
\[x=\cfrac{1}{N(x)+\cfrac{1}{NT(x)+T^{2}(x)}}\ .\]
The reader\footnote{As opposed to typesetters; this sort
of thing turned their hair prematurely grey.}
will have no difficulty in proceeding to the next step
and obtaining
\[x=\cfrac{1}{N(x)+\cfrac{1}{NT(x)+\cfrac{1}{NT^{2}(x)+T^{3}(x)}}}\ ,\]
and so on indefinitely. We write
\[x=\cfrac{1}{N(x)+\cfrac{1}{NT(x)+\cfrac{1}{NT^{2}(x)+
\cfrac{1}{NT^{3}(x)+\dots}}}}\ .\]
If $y$ is a general irrational number, we can take $x=y-[y]$
and write
\[y=[y]+
\cfrac{1}{N(x)+\cfrac{1}{NT(x)+\cfrac{1}{NT^{2}(x)+
\cfrac{1}{NT^{3}(x)+\dots}}}}\ .\]
We can do the same thing if $y$ is rational, but
we must allow for the possibility that the process does not continue
indefinitely. It is instructive to carry out the process
in a particular case.
\begin{exercise}\label{E;100/37} Carry out the process outlined above
for $100/37$. Carry out the process for the rational
of your choice.
\end{exercise}
Once we have done a couple of examples it is clear that
we are simply echoing Euclid's algorithm\footnote{I think
historians would reverse the order and say that continued fractions
gave rise to Euclid's algorithm}.
\begin{lemma}\label{L;continued Euclid}
(i) Suppose that $00$ such that
\[\left|\frac{p}{q}-\sigma\right|>\frac{m}{q^{2}}\]
whenever $p$ and $q$ are integers with $q\geq 1$.
Exercise~\ref{Q;bounded entry} gives a more general
version of this idea.
\end{exercise}
\begin{exercise}\label{E;Carroll}
In one of Lewis Carroll's favourite
puzzles an $8\times 8$ square is reassembled
to form a $13\times 5$ rectangle as shown in Figure~\ref{puzzle}.
\begin{figure}[h]
\vspace{10cm}
\caption{Carroll's puzzle}\label{puzzle}
\end{figure}
What is the connection with Exercise~\ref{E;Lewis}?
Can you design the next puzzle in the sequence?
\end{exercise}
Hardy and Wright's
\emph{An Introduction to the Theory of Numbers}~\cite{Hardy} contains
a chapter on approximation by rationals in which they
show, among other things, that $\sigma$ is indeed
particularly resistant to being so approximated by rationals.
If I was asked to nominate a book to be taken away
by some one leaving \emph{professional} mathematics,
but wishing to keep up an interest in the subject,
this book would be my first choice.
\section{A nice formula}
This section is non-examinable
and will only be done if time allows.
The notion of a continued fraction can be extended
in many ways.
We are used to the idea of approximating functions $f$
by polynomials $P$. Sometimes it may be more useful
to approximate $f$ by a rational function $P/Q$ where
$P$ and $Q$ are polynomials. If we approximate by
polynomials we are led to look at Taylor series.
If we approximate by
rational functions it might be worth
looking at some generalisation of continued
fractions.
Here is a very pretty formula along these lines.
\[\tan x=\cfrac{x}
{1-\cfrac{x^{2}}
{3-\cfrac{x^{2}}
{5-\cfrac{x^{2}}
{7-\dots}}}}.\]
The following theorem makes the statement precise.
\begin{theorem}\label{T;Lambert}
If we write
\[R_{n}(x)=\cfrac{x}
{1-\cfrac{x^{2}}
{3-\cfrac{x^{2}}
{\ddots-\cfrac{x^{2}}
{2n-3 -\cfrac{x^{2}}
{2n-1}}}}},\]
then $R_{n}(x)\rightarrow\tan x$
as $n\rightarrow\infty$ for all
real $x$ with $|x|\leq 1$.
\end{theorem}
In order to attack this we start by generalising an earlier result
\begin{exercise}\label{E;generalise continued matrices}
Suppose that $a_{j}$
and $b_{j}$ $[j=0,1,2,\dots]$ are chosen so that we never divide by zero
(for example all strictly positive).
Show that if
\[\begin{pmatrix}p_{n}&b_{n}p_{n-1}\\q_{n}&b_{n}q_{n-1}\end{pmatrix}
=\begin{pmatrix}a_{0}&b_{0}\\1&0\end{pmatrix}
\begin{pmatrix}a_{1}&b_{1}\\1&0\end{pmatrix}
\dots
\begin{pmatrix}a_{n}&b_{n}\\1&0\end{pmatrix}.
\]
then
\[\frac{p_{n}}{q_{n}}
=a_{0}+\cfrac{b_{0}}
{a_{1}+\cfrac{b_{1}}
{a_{2}+\cfrac{b_{2}}
{a_{3}+\cfrac{b_{3}}
{a_{4}+\cfrac{b_{4}}
{\ddots\cfrac{\ }
{a_{n-1}+\cfrac{b_{n-1}}
{a_{n}}}}}}}}\]
Show that
\begin{align*}
p_{n}&=a_{n}p_{n-1}+b_{n-1}p_{n-2}\\
q_{n}&=a_{n}q_{n-1}+b_{n-1}q_{n-2}.
\end{align*}
\end{exercise}
We now use the following result which is clearly related
to the manipulations used in Lemma~\ref{L;onto pi}.
\begin{lemma}\label{L;more Lambert}
Let us write
\[S_{n}(x)=\frac{1}{2^{n}n!}\int_{0}^{x}(x^{2}-t^{2})^{n}\cos t\,dt.\]
Then $S_{n}(x)=q_{n}(x)\sin x-p_{n}(x)\cos x$
where $p_{n}$ and $q_{n}$ satisfy the recurrence relations
\begin{align*}
p_{n}(x)&=(2n-1)p_{n-1}(x)-x^{2}p_{n-2}(x),\\
q_{n}(x)&=(2n-1)q_{n-1}(x)-x^{2}q_{n-2}(x)
\end{align*}
for $n\geq 2$
and $p_{0}(x)=0$, $q_{0}(x)=1$, $p_{1}(x)=x$, $q_{1}(x)=1$.
\end{lemma}
The results of this section and other interesting
topics are discussed in a book~\cite{Ball}
which is a model of how a high level
recreational mathematics text should be put together.
\section{Winding numbers} We all know that complex analysis
has a lot to say about `the number of times a curves
goes round a point'. In this final section we make the notion precise.
\begin{theorem}\label{T;track argument} Let
\[{\mathbb T}=\{z\in{\mathbb C}\,:\,|z|=1\}.\]
If $g:[0,1]\rightarrow{\mathbb T}$ is continuous
with $g(0)=e^{i\theta_{0}}$, then there is a unique
continuous function $\theta:[0,1]\rightarrow{\mathbb R}$
with $\theta(0)=\theta_{0}$
such that $g(t)=e^{i\theta(t)}$ for all $t\in[0,1]$.
\end{theorem}
The uniqueness part of Theorem~\ref{T;track argument}
follows from the next exercise.
\begin{exercise}\label{E;start argument}
Suppose $\psi,\,\phi:[0,1]\rightarrow{\mathbb R}$
are continuous with $e^{i\psi(t)}=e^{i\phi(t)}$ for all
$t\in[0,1]$. Show that there exists an integer $n$
such that $\psi(t)=\phi(t)+2n\pi$ for all
$t\in[0,1]$.
\end{exercise}
\begin{corollary}\label{C;unwind}
If $\gamma:[0,1]\rightarrow{\mathbb C}\setminus\{0\}$ is continuous
with $\gamma(0)=|\gamma(0)|e^{i\theta_{0}}$,
then there is a unique
continuous function $\theta:[0,1]\rightarrow{\mathbb R}$
with $\theta(0)=\theta_{0}$
such that $\gamma(t)=|\gamma(t)|e^{i\theta(t)}$.
\end{corollary}
\begin{definition}\label{D;winding}
If $\gamma:[0,1]\rightarrow{\mathbb C}\setminus\{0\}$
and $\theta:[0,1]\rightarrow{\mathbb R}$ are continuous
with $\gamma(t)=|\gamma(t)|e^{i\theta(t)}$ then we define
\[w(\gamma,0)=\frac{\theta(1)-\theta(0)}{2\pi}.\]
\end{definition}
Exercise~\ref{E;start argument} shows that $w(\gamma,0)$
does not depend on the choice of $\theta$.
\begin{exercise}\label{E;closed integer} (i)
If $\gamma:[0,1]\rightarrow{\mathbb C}\setminus\{0\}$
is continuous and $\gamma(0)=\gamma(1)$ (that is to say,
the path is \emph{closed}) show that $w(\gamma,0)$ is an integer.
(ii) Give an example to show that, under the conditions of~(i),
$w(\gamma,0)$ can take any integer value.
\end{exercise}
We are only interested in the winding number of \emph{closed}
curves.
If $a\in{\mathbb C}$, it is natural to define the winding number
round $a$ of a curve given by a continuous map
\[\gamma:[0,1]\rightarrow{\mathbb C}\setminus\{a\}\]
to be
\[w(\gamma,a)=w(\gamma-a,0)\]
but we shall not use this slight extension.
\begin{lemma}\label{L;dogmatic} If
$\gamma_{1},\,\gamma_{2}:[0,1]\rightarrow{\mathbb C}\setminus\{0\}$
then the product $\gamma_{1}\gamma_{2}$ satisfies
\[w(\gamma_{1}\gamma_{2},0)=w(\gamma_{1},0)+w(\gamma_{2},0).\]
\end{lemma}
\begin{lemma} {\bf[Dog walking lemma]}\label{L;dog walk}
If
$\gamma_{1},\,\gamma_{2}:[0,1]\rightarrow{\mathbb C}\setminus\{0\}$
are continuous, $\gamma_{1}(0)=\gamma_{1}(1)$,
$\gamma_{2}(0)=\gamma_{2}(1)$ and
\[|\gamma_{2}(t)|<|\gamma_{1}(t)|\]
for all $t\in[0,1]$, then $\gamma_{1}+\gamma_{2}$
never takes the value $0$ and $w(\gamma_{1}+\gamma_{2},0)=w(\gamma_{1},0)$.
\end{lemma}
Many interesting results in `applied complex analysis'
are obtained by `deforming contours'. The idea of
`continuously deforming curves'
can be made precise in a rather clever manner.
\begin{definition}\label{D;homotopy}
Suppose that $\gamma_{0}$, $\gamma_{1}$ are closed paths
not passing through $0$
(so we have, $\gamma_{j}:[0,1]\rightarrow{\mathbb C}\setminus\{0\}$).
Then we say that $\gamma_{0}$ is homotopic to $\gamma_{1}$
by closed curves not passing through zero if
we can find a continuous function
$\Gamma:[0,1]^{2}\rightarrow{\mathbb C}\setminus\{0\}$
such that
\begin{alignat*}{2}
\Gamma(s,0)&=\Gamma(s,1)&&\qquad\text{for all $s\in[0,1]$},\\
\Gamma(0,t)&=\gamma_{0}(t)&&\qquad\text{for all $t\in[0,1]$},\\
\Gamma(1,t)&=\gamma_{1}(t)&&\qquad\text{for all $t\in[0,1]$}.
\end{alignat*}
\end{definition}
We often write $\gamma_{s}(t)=\Gamma(s,t)$.
\begin{exercise}\label{E;homotopy equivalence}
If $\gamma_{0}$ and $\gamma_{1}$
satisfy the conditions of Definition~\ref{D;homotopy},
we write $\gamma_{0}\simeq\gamma_{1}$. Show that $\simeq$
is an equivalence relation on closed curves not passing through zero.
\end{exercise}
The proof of the next theorem illustrates
the utility of Definition~\ref{D;homotopy}.
The proof itself is sometimes referred to
as `dog walking along a canal'.
\begin{theorem}\label{T;canal} If $\gamma_{0}$ and $\gamma_{1}$
satisfy the conditions of Definition~\ref{D;homotopy},
then $w(\gamma_{0},0)=w(\gamma_{1},0)$.
\end{theorem}
As before, let us write
\begin{gather*}
{\bar D}=\{z\in{\mathbb C}\,:\,|z|\leq 1\},\\
D=\{z\in{\mathbb C}\,:\,|z|<1\},\\
\partial D=\{z\in{\mathbb C}\,:\,|z|= 1\}.
\end{gather*}
\begin{corollary}\label{C;dizy dog} Suppose
$f:{\bar D}\rightarrow{\mathbb C}$
is continuous, $f(z)\neq 0$ for $z\in\partial D$,
and we define $\gamma:[0,1]\rightarrow{\mathbb C}$
by
\[\gamma(t)=f(e^{2\pi it})\]
for all $t\in[0,1]$. If $w(\gamma,0)\neq 0$, then
there
must exist a $z\in D$ with $f(z)=0$.
\end{corollary}
This gives us another proof of the Fundamental Theorem of Algebra
(Theorem~\ref{T;Fundamental Algebra}).
\begin{corollary}\label{C;money}
If we work
in the complex numbers, every non-trivial polynomial has a root.
\end{corollary}
We also obtain a second proof of Brouwer's theorem
in two dimensions in the `no retraction' form
of Theorem~\ref{T;Brouwer retracts}.)
\begin{corollary}\label{C;second Brouwer}
There does not exist a continuous function
$f:{\bar D}\rightarrow\partial D$
with $f(z)=z$ for all $z\in \partial D$.
\end{corollary}
The earlier combinatorial
proof that we gave requires less technology to extend
to higher dimensions.
The contents of this section show that parts of complex analysis
are really just special cases of general `topological theorems'.
On the other hand, other parts (such as Taylor's theorem
and Cauchy's theorem itself) depend crucially on the
the fact that we are dealing with the very restricted class
of functions which satisfy the Cauchy-Riemann equations.
In traditional courses on complex analysis, this fact appears,
if it appears at all, rather late in the day. Beardon's
\emph{Complex Analysis}~\cite{Beardon} shows that
it is possible to do things differently and is well worth
a look\footnote{Though, like other good texts, it will not please
those `who want from books, plain cooking made still plainer
by plain cooks.'}.
\begin{thebibliography}{9}
\bibitem{Adams} C. Adams, \emph{Zombies and Calculus},
Princeton University Press, 2014. (I have not read this,
but according to one reviewer,
the book
`shows how calculus can be used to understand
many different real-world phenomena.')
\bibitem{Baker} A. Baker, \emph{Transcendental number theory},
CUP, 1975.
\bibitem{Ball} K. Ball,
\emph{Strange Curves, Counting Rabbits
and Other Mathematical Explorations},
Princeton University Press, 2003.
\bibitem{Beardon} A. F. Beardon \emph{Complex Analysis},
Wiley, 1979.
\bibitem{Hardy} G.~H.~Hardy and E.~M.~Wright
\emph{An Introduction to the Theory of Numbers}
OUP, 1937.
\bibitem{Thomas} L.~C.~Thomas \emph{Games, Theory and Applications},
Dover reprint of a book first published by Wiley in 1984.
\end{thebibliography}
\clearpage
\section{Question sheet 1}\label{S;sheet one}
\noindent{\bf Note} The Pro-Vice-Chancellor for Education has
determined that the amount of work required by a student to
understand a course should be the same as that required by
any other student. Traditionally, teachers and students in the Faculty
of Mathematics have interpreted this as meaning that each example
sheet should have exactly twelve questions. The number of questions in the
example sheets for this course may be reduced to twelve by
omitting any marked with a $\bigstar$.
\begin{question}\label{Q;some metrics}\label{A1.1}
(i) Consider ${\mathbb R}^{n}$.
If we take $d$ to be ordinary Euclidean distance
\[d({\mathbf x},{\mathbf y})=\|{\mathbf x}-{\mathbf y}\|=
\left(\sum_{j=1}^{n}|x_{j}-y_{j}|^{2}\right)^{1/2},\]
show that $({\mathbb R}^{n},d)$ is a metric space.
\noindent[Hint: Use inner products.]
(ii) Consider ${\mathbb C}$. If we take
$d(z,w)=|z-w|$, show that $({\mathbb C},d)$ is a metric space.
(iii) Let $X$ be a non-empty set. If we write
\[d(x,y)=
\begin{cases}
0&\text{if $x=y$},\\
1&\text{if $x\neq y$},
\end{cases}
\]
show that $(X,d)$ is a metric space ($d$ is called the discrete metric).
(iv) Consider $X=\{0,1\}^{n}$. If we take
\[d({\mathbf x},{\mathbf y})=\sum_{j=1}^{n}|x_{j}-y_{j}|\]
then $(X,d)$ is a metric space.
\end{question}
\begin{question}\label{A1.2}
(i) Give an example of a continuous bijection
$f:{\mathbb R}\rightarrow{\mathbb R}$ with no fixed points.
(ii) If $A$ is an infinite countable set, show that
there exists a bijection $f:A\rightarrow A$
with no fixed points. What can you say if $A$ is finite non-empty set?
(Be careful to cover all possible cases.)
$[$Remark: We can replace `infinite countable' by `infinite'
provided we accept the appropriate set theoretic axioms.$]$
\end{question}
\begin{question}\label{A1.3} (i) Show that a subset $E$ of ${\mathbb R}^{m}$
(with the usual metric) is compact if
every continuous function $f:E\rightarrow{\mathbb R}$
is bounded.
(ii) Show that a subset $E$ of ${\mathbb R}^{m}$
(with the usual metric) is compact if
every bounded continuous function $f:E\rightarrow{\mathbb R}$
attains its bounds.
\end{question}
\begin{question}\label{A1.4}
Suppose that $d$ is the usual metric on ${\mathbb R}^{m}$,
$X$ is a compact set in ${\mathbb R}^{m}$
and $f:X\rightarrow X$ is a continuous
\emph{distance increasing} map. In other words,
\[d(f(x),f(y))\geq d(x,y)\]
for all $x,\,y\in X$. The object of this question
is to show that $f$ must be a surjection.
(i) Let $f^{0}(x)=x$
$f^{n}(x)=f(f^{n-1}(x))$. Explain why
$X_{\infty}=\bigcap_{n=0}^{\infty}f^{n}(X)$
is compact and why the map $g:X_{\infty}\rightarrow X_{\infty}$
is well defined by $f\big(g(y)\big)=y$.
(ii) If $z\in X$, consider the sequence $f^{n}(z)$.
By using compactness
and part~(i) show that given $\epsilon>0$
we can find an $w\in X_{\infty}$ such that $d(w,z)<\epsilon$.
Deduce that $f$ is surjective.
(iii) Let $X=\{x\in{\mathbb R}\,:\,x\geq 0\}$.
Find a continuous function $f:X\rightarrow X$
such that $|f(x)-f(y)|\geq 2|x-y|$ for all $x,\,y\in X$
but $f(X)\neq X$. Why does this not contradict (ii)?
(iv) We work in ${\mathbb C}$.
Let $\alpha$ be irrational and let $\omega=\exp(2\pi\alpha i)$.
If
\[X=\{\omega^{n}\,:\,n\geq 1\},\]
find a continuous function $f:X\rightarrow X$
such that $|f(w)-f(z)|=|w-z|$ for all $w,\,z\in X$
but $f(X)\neq X$. Why does this not contradict (ii)?
\end{question}
\begin{question}\label{A1.5} A function $f:{\mathbb R}^{n}
\rightarrow{\mathbb R}$ is called upper semi-continuous
if, given ${\mathbf x}\in {\mathbb R}^{n}$ and $\epsilon>0$,
we can find a $\delta>0$ such that
\[\|{\mathbf x}-{\mathbf y}\|<\delta
\Rightarrow f({\mathbf y})\leq f({\mathbf x})+\epsilon.\]
(i) If $E$ is a subset of ${\mathbb R}^{n}$ and
we define the indicator function ${\mathbb I}_{E}$
by
\[{\mathbb I}_{E}({\mathbf x})=
\begin{cases}1&\text{if ${\mathbf x}\in E$,}\\
0&\text{otherwise,}
\end{cases}
\]
show that ${\mathbb I}_{E}$ is upper semi-continuous
if and only if $E$ is closed.
(ii) State and prove necessary and sufficient conditions
for $-{\mathbb I}_{E}$ to be upper semi-continuous.
(iii) If $f:{\mathbb R}^{n}
\rightarrow{\mathbb R}$ is upper semi-continuous
and $K$ is compact show that there exists a ${\mathbf z}\in K$
such that $f({\mathbf z})\geq f({\mathbf k})$
for all ${\mathbf k}\in K$. (In other words $f$ attains a
maximum on $K$.)
(iv) If $g:{\mathbb R}\rightarrow{\mathbb R}$ is defined by
\[g(x)=
\begin{cases}-1/|x|&\text{if $x\neq 0$,}\\
0&\text{if $x=0$,}
\end{cases}
\]
show that $g$ is upper semi-continuous but
that $g$ is unbounded on $[-1,1]$.
\end{question}
\begin{question}\label{A1.6} Let
\begin{align*}
{\mathbb H}&=\{z\in{\mathbb C}\,:\,\Re z>0\},\\
\bar{\mathbb H}&=\{z\in{\mathbb C}\,:\,\Re z\geq 0\},\\
{\mathbb D}&=\{z\in{\mathbb C}\,:\,|z|<1\},\\
\bar{\mathbb D}&=\{z\in{\mathbb C}\,:\, |z|\leq 1\}.
\end{align*}
(i) Does every bijective continuous map $f:{\mathbb C}\rightarrow{\mathbb C}$
have a fixed point?
(ii) Does every bijective continuous map $f:{\mathbb H}\rightarrow{\mathbb H}$
have a fixed point?
(iii) Does every bijective continuous map
$f:\bar{\mathbb H}\rightarrow\bar{\mathbb H}$
have a fixed point?
(iv) Does every bijective continuous map $f:{\mathbb D}\rightarrow{\mathbb D}$
have a fixed point?
(v) Does every bijective continuous map
$f:\bar{\mathbb D}\rightarrow\bar{\mathbb D}$
have a fixed point?
Give reasons for your answers.
\end{question}
\begin{question}(Exercise~\ref{E;two Brouwer})\label{A1.7}
Show that the following four statements are equivalent.
(i) If $f:[0,1]\rightarrow [0,1]$ is continuous, then we can find
a $w\in[0,1]$ such that $f(w)=w$.
(ii) There does not exist a continuous function $g:[0,1]\rightarrow\{0,1\}$
with $g(0)=0$ and $g(1)=1$.
(In topology courses we say that $[0,1]$ is connected.)
(iii) If $A$ and $B$ are closed subsets of $[0,1]$ with
$0\in A$, $1\in B$ and $A\cup B=[0,1]$
then $A\cap B\neq\emptyset$.
(iv) If $h:[0,1]\rightarrow {\mathbb R}$ is continuous
and $h(0)\leq c\leq h(1)$, then we can find
a $y\in[0,1]$ such that $g(y)=c$.
\end{question}
\begin{question}(Exercise~\ref{E;two Sperner})\label{A1.8}
Suppose
that we colour the points $r/n$
red or blue $[r=0,\,1,\,\dots,\,n]$ with $0$ red
and $1$ blue. Show that there are a pair of neighbouring points
$u/n$, $(u+1)/n$ of different colours. Use this result
to prove statement~(iii) of Exercise~\ref{E;two Brouwer}.
\end{question}
\begin{question}\label{Q;surjection}\label{A1.9}
Suppose that $g:{\mathbb R}^{2}\rightarrow{\mathbb R}^{2}$
is a continuous function such that there exists a $K>0$ with
$\|g({\mathbf x})-{\mathbf x}\|\leq K$.
(i) By constructing a function $f:{\mathbb R}^{2}\rightarrow{\mathbb R}^{2}$,
taking a disc into itself, and such that
\[f({\mathbf t})={\mathbf t}\Rightarrow g({\mathbf t})={\boldsymbol 0}\]
show that ${\boldsymbol 0}$ lies in the image of $g$.
(ii) Show that, in fact, $g$ is surjective.
(iii) Is it necessarily true that $g$ has a fixed point? Give reasons.
(iv) Is $g$ necessarily injective? Give reasons.
\end{question}
\begin{question}\label{A1.10}
Use the Brouwer fixed point theorem to show that there
is a complex number $z$ with $|z|\leq 1$ and
\[z^{4}-z^{3}+8z^{2}+11z+1=0.\]
\end{question}
\begin{question}\label{Q;Brouwer intersection}\label{A1.11}
Consider the square $S=[-1,1]^{2}$. Suppose
that $\beta,\,\gamma:[-1,1]\rightarrow S$ are continuous
with $\beta(-1)=(-1,-1)$, $\beta(1)=(1,1)$,
$\gamma(-1)=(-1,1)$, $\gamma(1)=(1,-1)$.
The object of this question is to show that
there exist $(s_{0},t_{0})\in [-1,1]^{2}$
such that $\beta(s_{0})=\gamma(t_{0})$.
(Note that this is just a formal version
of Exercise~\ref{E;informal intersection}.)
Our proof will be by contradiction, so assume that no
such $(s_{0},t_{0})$ exists. We write
\[\beta(s)=\big(\beta_{1}(s),\beta_{2}(s)\big),
\ \gamma(t)=\big(\gamma_{1}(t),\gamma_{2}(t)\big)\]
(i) Show carefully that the function $F:S\rightarrow S$
given by
\[F(s,t)=\frac{-1}{\max\{|\beta_{1}(s)-\gamma_{1}(t)|,
|\beta_{2}(s)-\gamma_{2}(t)|\}}
\big(\beta_{1}(s)-\gamma_{1}(t),\beta_{2}(s)-\gamma_{2}(t)\big)\]
is well defined and continuous.
(ii) Show by considering
the possible values of the fixed points of $F$ or otherwise
that $F$ has no fixed points, Brouwer's fixed point theorem now gives a
contradiction.
\end{question}
\begin{question}\label{A1.12}
Here is a variation on Lemma~\ref{L;colour me}~(ii).
It can be proved in the same way.
Suppose that $T$, $I$, $J$, $K$ are as in Lemma~\ref{L;colour me}
and that $A$, $B$ and $C$ are closed subsets of $T$
with
\begin{gather*}A\cup B\cup C=T,\\
A\cup B\supseteq I,\ B\cup C\supseteq J,
\ C\cup A\supseteq K,\\
A\supseteq K\cap I,\ B\supseteq I\cap J,\ C\supseteq J\cap K.
\end{gather*}
Show that $A\cap B\cap C\neq\emptyset$.
\end{question}
\begin{question}\label{Q;Schwarz}$\bigstar$\label{A1.13}
Cantor started the researches which led him to
his studies of infinite sets by looking at work of Riemann on
trigonometric series. He needed to show that if
$F:[a,b]\rightarrow{\mathbb R}$ is continuous and
\[\frac{F(x+h)-2F(x)+F(x-h)}{h^{2}}\rightarrow 0\]
as $h\rightarrow 0$ for all $x\in (a,b)$ then $F$ is linear.
(Note that there are no differentiability conditions on $F$.)
Schwarz was able to supply a proof.
(i) Suppose that $F:[a,b]\rightarrow{\mathbb R}$ is continuous,
$F(a)=F(b)$ and there exists an $\epsilon>0$ such that
\[\limsup_{h\rightarrow 0}\frac{F(x+h)-2F(x)+F(x-h)}{h^{2}}\geq \epsilon\]
for all $x\in (a,b)$. Show that $F$ cannot attain a maximum at
any $x\in(a,b)$. Deduce that
\[F(x)\leq F(a)\]
for all $x\in [a,b]$.
(ii) Suppose that $F:[a,b]\rightarrow{\mathbb R}$ is continuous,
$F(a)=F(b)$ and
\[\limsup_{h\rightarrow 0}\frac{F(x+h)-2F(x)+F(x-h)}{h^{2}}\geq 0\]
for all $x\in (a,b)$. Let $c=(a+b)/2$.
By considering $G(x)=F(x)+\epsilon (x-c)^{2}/4$ or otherwise,
show that
\[F(x)\leq F(a)\]
for all $x\in [a,b]$.
(iii) Suppose that $F:[a,b]\rightarrow{\mathbb R}$ is continuous,
$F(a)=F(b)$ and
\[\lim_{h\rightarrow 0}\frac{F(x+h)-2F(x)+F(x-h)}{h^{2}}=0\]
for all $x\in (a,b)$. By considering $F$ and $-F$ show
that
\[F(x)=F(a)\]
for all $x\in [a,b]$.
(iv) Show that if
$F:[a,b]\rightarrow{\mathbb R}$ is continuous and
\[\frac{F(x+h)-2F(x)+F(x-h)}{h}\rightarrow 0\]
as $h\rightarrow 0$ for all $x\in (a,b)$ then $F$ is linear.
\end{question}
\begin{question}$\bigstar$\label{A1.14}
As usual $\bar{D}$ is the closed unit disc in ${\mathbb R}^{2}$
and $\partial D$ its boundary. Let us write
\[\Delta=\{({\mathbf x},{\mathbf y})\in\bar{D}^{2}\,:\,
{\mathbf x}\neq{\mathbf y}\}\]
and consider $\Delta$ as a subset of ${\mathbb R}^{4}$ with the
usual metric.
We define $F:\Delta\rightarrow\partial D$ as follows.
Given $({\mathbf x},{\mathbf y})\in\Gamma$, take the line
from ${\mathbf x}$ to ${\mathbf y}$ and extend it
(in the ${\mathbf x}$ to ${\mathbf y}$ direction) until it first
hits the boundary at ${\mathbf z}$. We write
$F({\mathbf x},{\mathbf y})={\mathbf z}$.
The lecturer claimed that it was obvious that $F$ was continuous.
Suppose, if possible, that $g:\bar{D}\rightarrow\bar{D}$ is a continuous
map with $g({\mathbf x})\neq{\mathbf x}$ for all ${\mathbf x}\in\bar{D}$.
Explain why it follows from the lecturers claim that the map
\[{\mathbf x}\mapsto F\big({\mathbf x},g({\mathbf x})\big)\]
is a continuous map.
The claimed result is obvious (in some sense) and you may take
it as obvious in the exam. However, if we can not prove the
obvious it ceases to be obvious. This question
outlines one method of proof, but, frankly, the
reader may find it easier to find their own method.
Any correct method counts as a solution.
(i) Suppose that $00$ and $x_{0}^{2}+y_{0}^{2}=1$.
Show that, given $\epsilon>0$
we can find an $\eta>0$ such that if $x^{2}+y^{2}=1$,
$x>0$ and $|y-y_{0}|<\eta$ implies $\|(x,y)-(x_{0},y_{0})\|<\epsilon$.
(ii) Suppose that $(x_{1},y_{0}),(x_{2},y_{0})\in{\bar D}$,
$y_{0}\geq 0$ and
$x_{1}\neq x_{2}$. By using~(i), or otherwise, show
that, given any $\epsilon>0$, we can find a $\delta>0$
such that, whenever
\[\|(x_{1}',y_{1}')-(x_{1},y)\|,\,\|(x_{2}',y_{2}')-(x_{2},y)\|<\delta\]
and $(x_{1}',y_{1}'),\,(x_{2}',y_{2}')\in\bar{D}$, we have
$(x_{1}',y_{1}')\neq (x_{2}',y_{2}')$ and
\[F\big((x_{1}',y_{1}'),(x_{2}',y_{2}')\big)=(u,v)
\ \text{with}\ |v-y|<\epsilon.\]
(iii) Hence show that $F:\Delta\rightarrow\bar{D}$ is continuous.
\end{question}
\clearpage
\section{Question sheet 2}
\begin{question}\label{A2.1}
In the two player game of Hawks and Doves,
player~$i$ chooses a probability $p_{i}$ which
announce publicly. Players may change their mind
before the game begins but must stick to their last
announced decision.
One the game begins, player~$i$ becomes a hawk with probability
$p_{i}$ and a dove with probability $1-p_{i}$. Two doves
divide food so that each gets $V/2$. A hawk scares
of a dove so the hawk gets $V$ and the dove $0$.
Two hawks fight, the winner gets $V-D$ and the looser
$-D$ ($D$ is the damage). The probability of winning such
an encounter is $1/2$ for each bird.
If $V>2D$ show that there is only one Nash equilibrium point.
Give a simple explanation of this fact.
If $V<2D$ show that there are three equilibrium points
and identify them.
What happens if $V=2D$?
\end{question}
\begin{question}\label{A2.2}
(i) Suppose that $E$ is a compact convex set
in ${\mathbb R}^{n}$, that
$\alpha:{\mathbb R}^{n}\rightarrow {\mathbb R}^{m}$ is linear
and ${\mathbf b}\in{\mathbb R}^{m}$, then
\[\{{\mathbf b}+\alpha{\mathbf x}\,:\,{\mathbf x}\in E\}\]
is compact and convex.
(ii) Suppose that $E$ is a compact convex set
in ${\mathbb R}^{n}$, that
$f:{\mathbb R}^{n}\rightarrow {\mathbb R}^{m}$ is continuous
and ${\mathbf b}\in{\mathbb R}^{m}$. Set
\[E'=\{{\mathbf b}+f({\mathbf x})\,:\,{\mathbf x}\in E\}\]
\ (a) Is $E'$ necessarily convex if $n=1$?
\ (b) Is $E'$ necessarily convex if $m=1$?
\ (c) $E'$ necessarily convex for general $m$ and $n$?
Give reasons.
\end{question}
\begin{question}\label{A2.3}
(If you have done the 1B optimisation course.)
We use the notation of Theorem~\ref{T;Nash stable}.
Suppose that $a_{ij}=-b_{ij}$, that is to say that
Albert's gain is Bertha's loss.
Explain why the 1B
game theoretic solution will always be a Nash equilibrium
point.
\end{question}
\begin{question}\label{A2.4}(This is Exercise~\ref{E;Advertising})
Consider
two rival firms $A$ and $B$ engaged in an advertising war.
So long as the war continues, the additional costs
of advertising mean that the larger firm $A$ loses $3$ million
pounds a year and the smaller firm $B$ loses $1$ million pounds a year.
If they can agree to cease hostilities then $A$ will make
$8$ million a year and $B$ will make $1$ million a year.
How much does Nash say
should $A$ pay $B$ per year to achieve this end.
\end{question}
\begin{question}\label{Q;unit ball}\label{A2.5}
Consider the continuous functions on $[0,1]$ with the
uniform norm.
Show that the unit ball
\[\{f\in C([0,1])\,:\,\|f\|_{\infty}\leq 1\}\]
is a closed bounded subset of the complete space
$(C([0,1]),\|\ \|_{\infty}$,
but is not compact.
\end{question}
\begin{question}\label{A2.6}
(i) Let $f:[0,1]\rightarrow{\mathbb R}$
be a continuous function which is not a polynomial.
If $p_{n}$ is a polynomial of degree $d_{n}$
and $p_{n}\rightarrow f$ uniformly on $[0,1]$, show
that $d_{n}\rightarrow \infty$.
$[$Hint. Look at Corollary~\ref{C;naught for naught}.$]$
(ii) If $q_{n}$ is a polynomials of degree $e_{n}$
with $e_{n}\rightarrow\infty$
and $q_{n}\rightarrow g$ uniformly on $[0,1]$,
does it follow that $g$ is not a polynomial?
Give reasons.
\end{question}
\begin{question}\label{A2.7}
Show that no formula of the form
\[\int_{-1}^{1}f(t)\,dt=\sum_{j=1}^{n}A_{j}f(x_{j})\]
(with $x_{j}, A_{j}\in{\mathbb R}$) can hold
for polynomials $f$ of degree $2n$.
\end{question}
\begin{question}\label{A2.8} Let $f:[0,1]\rightarrow{\mathbb R}$
and $g:[-1,1]\rightarrow{\mathbb R}$ be continuous.
(i) By using the Weierstrass approximation theorem,
show that
\[\int_{0}^{1}x^{n}f(x)\,dx=0\ \text{for all $n\geq 0$}
\Rightarrow f\ \text{is the zero function}.\]
(ii) Show that
\[\int_{0}^{1}x^{2n}f(x)\,dx=0\ \text{for all $n\geq 0$}
\Rightarrow f\ \text{is the zero function}.\]
(iii) Is it true that if $\int_{0}^{1}x^{2n+1}f(x)\,dx=0$
for all $n\geq 0$, then $f$ must be the zero-function?
Give reasons.
(iv) Is it true that, if $\int_{-1}^{1}x^{2n}g(x)\,dx$=0
for all $n\geq 0$, then $g$ must be the zero-function?
Give reasons.
\end{question}
\begin{question}\label{A2.9}
(i) (This just to remind you that
discontinuous functions come in many shapes and sizes.)
Let $f:{\mathbb R}\rightarrow{\mathbb R}$
be given by $f(x)=\sin 1/x$ for $x\neq 0$ and $f(0)=a$. Show that,
whatever the choice of $a$, $f$ is discontinuous.
(ii) Does there exist a discontinuous function
$g:[0,1]\rightarrow{\mathbb R}$ which can be
approximated uniformly by polynomials? Why?
(iii) Does there exist a smooth function
$h:{\mathbb R}\rightarrow{\mathbb R}$ which cannot be
approximated uniformly by polynomials? Prove
your answer.
\end{question}
\begin{question}\label{A2.10}
We say that a function
$f:{\mathbb R}\rightarrow{\mathbb R}$
has the intermediate value property if whenever $a,b\in{\mathbb R}$
and $f(a)\geq c\geq f(b)$ we can find a $t$ in the closed
interval with end points $a$ and $b$ such that $f(t)=c$.
(i) Give an example of a function satisfying the intermediate value
property which is not continuous.
(ii) Show that if $f$ has the intermediate value property
and in addition $f^{-1}(\alpha)$ is closed for every $\alpha$ in a dense
subset of of ${\mathbb R}$ then $f$ is continuous.
\end{question}
\begin{question}$\bigstar$\label{A2.11}
Are the following statements true or false?
Give reasons.
(i) If $f:(0,1)\rightarrow{\mathbb R}$ is continuous,
we can find a sequence of polynomials $P_{n}$ converging
uniformly to $f$ on every compact subset of $(0,1)$.
(ii) If $g:(0,1)\rightarrow{\mathbb R}$ is continuously differentiable
we can find a sequence of polynomials $Q_{n}$ with $Q'_{n}$
converging uniformly to $g'$ and $R_{n}$
converging uniformly to $g$
on every compact subset of $(0,1)$.
(iii) If $h:(0,1)\rightarrow{\mathbb R}$ is continuous
and bounded we can find a sequence of polynomials $R_{n}$
with
\[\sup_{t\in (0,1)}|R_{n}(t)|\leq \sup_{t\in (0,1)}|h(t)|\]
converging
uniformly to $h$ on every compact subset of $(0,1)$.
\end{question}
\begin{question}$\bigstar$\label{A2.12} Compute
the Chebychev polynomials $T_{n}$ of the first kind
for $n=0,\,1,\,2\,\dots,\,4$
and the Chebychev polynomials $U_{n-1}$ of the second
kind for $n=1,\,2\,\dots,\,4$.
Recall that
we say that a function $f;[-1,1]\rightarrow{\mathbb R}$
is \emph{even} if $f(x)=f(-x)$ for all $x$ and
\emph{odd} if $f(x)=-f(-x)$ for all $x$.
Explain why we know, without calculation,
that the Chebychev polynomials $T_{n}$
are even when $n$ is even and odd when $n$ is odd.
What can you say about the Chebychev polynomials $U_{n}$
of the second kind?
\end{question}
\begin{question}\label{A2.13}
The Chebychev polynomials are orthogonal
with respect to a certain non-zero positive weight function $w$.
In other words,
\[\int_{-1}^{1}T_{m}(x)T_{n}(x)w(x)\,dx=0\]
for all $m\neq n$. Use a change of variables
to find a suitable $w$.
\end{question}
\begin{question}\label{E;compute Legendre}$\bigstar$\label{A2.14}
(i) Use the Gramm--Schmidt method
(see Lemma~\ref{L;Gramm}) to compute the Legendre
polynomials $p_{n}$ for $n=0,\,1,\,2,\,3,\,4$.
You may leave your answers in the form $A_{n}p_{n}$
(i.e. ignore normalisation).
(ii) Explain why we know, without calculation,
that the Legendre polynomials $p_{n}$
are even when $n$ is even and odd when $n$ is odd.
(iii) Explain why
\[\frac{d^{m}\ }{dx^{m}}(1-x)^{n}(1+x)^{n}\]
vanishes when $x=1$ or $x=-1$ whenever $m0$. Why can we find
an infinitely differentiable function $g:[a,b]\rightarrow{\mathbb R}$
such that $\|f-g\|_{\infty}<\epsilon$.
(ii) By using Chebychev polynomials
and Weierstrass's approximation theorem, show that
given any continuous $f:[0,\pi]\rightarrow{\mathbb R}$
and any $\epsilon>0$
we can find $N$ and $a_{j}\in{\mathbb R}$ $0\leq j\leq N$
such that
\[\left|f(s)-\sum_{j=0}^{N}a_{j}\cos js\right|<\epsilon\]
for all $s\in[0,\pi]$.
(iii) Let $\epsilon>0$. If $f:[0,\pi]\rightarrow{\mathbb R}$
is continuous with $f(0)=0$ show that
we can find $N$ and $b_{j}\in{\mathbb R}$ $0\leq j\leq N$
such that
\[\left|f(s)-b_{0}s-\sum_{j=0}^{N}b_{j}\sin js\right|<\epsilon\]
for all $s\in[0,\pi]$.
(iv) Let $\epsilon>0$. If $f:[0,\pi]\rightarrow{\mathbb R}$
is continuous with $f(0)=f(\pi)=0$ show that
we can find $N$ and $b_{j}\in{\mathbb R}$ $0\leq j\leq N$
such that
\[\left|f(s)-\sum_{j=0}^{N}b_{j}\sin js\right|<\epsilon\]
for all $s\in[0,\pi]$.
(v) Hence show that
given any continuous $f:[-\pi,\pi]\rightarrow{\mathbb R}$
with $f(-\pi)=f(\pi)$
and any $\epsilon>0$
we can find $N$ and $\alpha_{j},\beta_{j}\in{\mathbb R}$
such that
\[\left|f(t)-\sum_{j=0}^{N}b_{j}\cos jt
-\sum_{j=1}^{N}c_{j}\sin jt\right|<\epsilon\]
for all $t\in[-\pi,\pi]$.
\end{question}
\clearpage
\section{Question Sheet 3}
\begin{question}\label{A3.1} Let $f:[-1,1]\rightarrow{\mathbb R}$ be
a function and let $M>0$. Show that there
exists at most one polynomial of degree $n$ such
that
\[|f(x)-P(x)|\leq M|x|^{n+1}\]
for all $x\in [-1,1]$.
Must there always exist such a $P$ if $f$ is everywhere infinitely
differentiable and we choose $M$ sufficiently large?
\end{question}
\begin{question}\label{E;slow Tchebychev}\label{A3.2}
Let $T_{j}$ be the $j$th Chebychev
polynomial. Suppose that $\gamma_{j}$ is a sequence
of non-negative numbers such that $\sum_{j=1}^{\infty}\gamma_{j}$
converges. Explain why $\sum_{j=1}^{\infty}\gamma_{j}T_{3^{j}}$
converges uniformly on $[-1,1]$ to a continuous function $f$.
Let us write $P_{n}=\sum_{j=1}^{n}\gamma_{j}T_{3^{j}}$.
Show that we can find points
\[-1\leq x_{0}0$,
we can find a polynomial $P$ in
two variables such that
\[|f(x,y)-P(x,y)|<\epsilon\]
for all $x,\,y\in[0,1]$.
\end{question}
\begin{question}\label{A3.5}
(Not very much to do with the course
but a nice question which you should have
met at least once in your life.)
Suppose $f:[-1,1]^{2}\rightarrow{\mathbb R}$
is a bounded function such that the map $x\mapsto f(x,y)$ is continuous for
each fixed $y$ and the map $y\mapsto f(x,y)$ is continuous for
each fixed $x$. By means of a proof or counterexample
establish whether $f$ is necessarily continuous.
\end{question}
The next three questions give alternative proofs of
Weierstrass's theorem. Each involves some heavy lifting
but each introduces ideas which are very useful in a variety
of circumstances. If you are finding the course heavy going
or your busy social schedule limits the time you can spend
thinking to an absolute minimum you can skip them.
If you want to do any sort of analysis in the future
they are highly recommended.
\begin{question}\label{A3.6}\label{E;long Bernstein}
Here is an alternative
proof of Bernstein's theorem using a different set of ideas.
(i) Let $f\in C([0,1])$. Show that given $\epsilon>0$ we can find an $A>0$
such that
\[f(x)+A(t-x)^{2}+\epsilon/2\geq f(t)\geq f(x)-A(t-x)^{2}-\epsilon/2\]
for all $t,\,x\in [0,1]$.
(ii) Now show that we can find an $N$ such that, writing
\[h_{r}(t)=f(r/N)+A(t-r/N)^{2},\ g_{r}(t)
=f(r/N)-A(t-r/N)^{2},\]
we have
\[g_{r}(t)+\epsilon\geq f(t)\geq h_{r}(t)-\epsilon\]
for $|t-r/N|\leq 1/N$. (You may find it helpful
to draw diagrams here and in~(iii).)
(iii) We say that a linear map $S:C([0,1])\rightarrow C([0,1])$ is positive
if $F(t)\geq 0$ for all $t\in[0,1]$ implies $SF(t)\geq 0$ for all $t\in [0,1]$.
Suppose that $S$ is such a positive linear operator.
Show that if $F(t)\geq G(t)$ for all $t\in[0,1]$,
then $(SF)(t)\geq (SG)(t)$ for all $t\in [0,1]$
$[F,\,G\in C([0,1])]$.
Show also that if, $F\in C([0,1])$,
then $\|SF\|_{\infty}\leq\|S1\|_{\infty}\|F\|_{\infty}$.
(iv) Write $e_{r}(t)=t^{r}$. Suppose that $S_{n}$
is a sequence of positive linear
functions such that $\|S_{n}e_{r}-e_{r}\|_{\infty}\rightarrow 0$
as $n\rightarrow\infty$
for $r=0$, $r=1$ and $r=2$. Show, using~(ii), or otherwise, that
$\|S_{n}f-f\|_{\infty}\rightarrow 0$ as $n\rightarrow\infty$ for
all $f\in C([0,1])$.
(v) Let
\[(S_{n}f)(t)=\sum_{j=0}^{n}\binom{n}{j}f(j/n)t^{j}(1-t)^{n-j}.\]
Verify that $S_{n}$ satisfies the hypotheses of part~(iv) and
deduce Bernstein's theorem.
\end{question}
\begin{question}$\bigstar$\label{E;kernel Weierstrass}\label{A3.7}
Here is another proof of Weierstrass's
theorem which is closer to his original
proof. We wish to show show that any continuous function
function $f:[-1/2,1/2]\rightarrow{\mathbb R}$ can
be uniformly approximated by polynomials on $[-1/2,1/2]$.
To do this we
show that any continuous function
$g:{\mathbb R}\rightarrow{\mathbb R}$ with $g(x)=0$
for $|x|\geq 1$ can
be uniformly approximated by polynomials on $[-1/2,1/2]$.
Why does this give the desired result?
Let
\[L_{n}(x)=\begin{cases}(4-x^{2})^{n}&\text{for $|x|\leq 2$,}\\
0&\text{otherwise,}
\end{cases}
\]
let
\[A_{n}=\int_{-\infty}^{\infty}L_{n}(x)\,dx\]
and let $K_{n}(x)=A_{n}^{-1}L_{n}(x)$.
(i) Show that
\[P_{n}(x)=K_{n}*g(x)=\int_{-\infty}^{\infty}K_{n}(x-t)g(t)\,dt\]
is a polynomial in $x$ on the interval $[-1/2,1/2]$
It may be helpful to recall that
$f*g=g*f$.)
(ii) Let $\delta>0$ be fixed. Show that
$K_{n}(x)\rightarrow 0$ uniformly for $|x|\geq\delta$
and
\[\int_{-\delta}^{\delta}K_{n}(x)\,dx\rightarrow 1\]
as $n\rightarrow\infty$.
(iii) Use the fact that $g$ is bounded and uniformly
continuous together with the formula
\[P_{n}(x)=\int_{-\delta}^{\delta}K_{n}(t)g(x-t)\,dt
+\int_{t\notin(-\delta,\delta)}K_{n}(t)g(x-t)\,dt\]
to show that $P_{n}(x)\rightarrow g(x)$ uniformly
on $[-1/2,1/2]$.
\end{question}
\begin{question}\label{E;Lebesgue}\label{A3.8}
Here is another proof
of Weierstrass's theorem, this time due to Lebesgue.
(i) If $a**0$, then we can find $n\geq 1$,
$\lambda_{j}\in{\mathbb R}$ and $a_{j}\in[0,1]$
such that
\[\left|f(t)-\lambda_{0}-\sum_{j=1}^{n}\lambda_{j}|t-a_{j}|\right|
<\epsilon.\]
(iii) Let
\[u_{n}(t)=3\sqrt{\left(1+\frac{1}{n}\right)-
\left(1-\frac{t^{2}}{9}\right)}.\]
Explain using results on the general binomial expansion
(which you need not prove) why $u_{n}$ can be uniformly
approximated by polynomials on $[-2,2]$.
Explain why $u_{n}(t)\rightarrow |t|$ uniformly on $[-2,2]$
as $n\rightarrow\infty$. Deduce that there exist polynomials $q_{r}$
with $q_{r}(t)\rightarrow|t|$ uniformly on $[-1,1]$ as
$r\rightarrow\infty$.
(iv) Use (ii) and (iii) to prove the Weierstrass approximation
theorem.
\noindent$[$Lebesgue's idea provides the basis for the proof
of the more general Stone--Weierstrass theorem.$]$
\end{question}
\begin{question}\label{A3.9}(This will look less odd if you have done the
previous exercise.)
(i) Let the sequence of distinct
$x_{n}$ form a dense subset of $[0,1]$
with $x_{0}=0$, $x_{1}=1$. If $f\in C([0,1])$, define
$f_{n}:[0,1]\rightarrow{\mathbb R}$ to be the simplest
piecewise linear function with $f_{n}(x_{j})=f(x_{j})$
for $0\leq j\leq n$. Show that $f_{n}\rightarrow f$
uniformly.
(ii) Use (i) to show that there exists a sequence of
continuous functions $\phi_{n}$ such that, for each
$f\in C([0,1])$ there exists a unique sequence $a_{n}$
such that
\[\sum_{j=0}^{n}a_{j}\phi_{j}\rightarrow f\]
uniformly on $[0,1]$.
\noindent$[$In practice the sequence $x_{j}$ is
usually taken to be $0$, $1$, $1/2$, $1/4$, $3/4$
$1/8$, $3/8$, $5/8$, $7/8$,$1/16$, $3/16$, \ldots.$]$
\end{question}
\begin{question}$\bigstar$\label{Q;ripple necessary}
In Theorem~\ref{T;best approximation exists}\label{A3.10}
we say that, if $f:[a,b]\rightarrow{\mathbb R}$ is a continuous
function there exists a polynomial $P$, of degree at most $n-1$,
such that $\|P-f\|_{\infty}\leq\|Q-f\|_{\infty}$ for all polynomials
$Q$ degree $n$ or less.
The object of this question is to
show that the polynomial $P$ satisfies the equiripple
criterion.
We claim that we can find
$a\leq a_{0}\leq a_{1}\leq \dots\leq a_{n}\leq b$
such that, writing $\sigma=\|f-P\|_{\infty}$ we have
either
\begin{align*}
f(a_{j})-P(a_{j})=(-1)^{j}\sigma&\ \text{for all $0\leq j\leq n$}\\
\intertext{or}
f(a_{j})-P(a_{j})=(-1)^{j+1}\sigma&\ \text{for all $0\leq j\leq n$.}
\end{align*}
Our proof will be by reductio ad absurdum.
We assume without loss of generality that
$[a,b]=[0,1]$ and $\sigma=1$.
(i) Write $g=f-P$.
Explain why we can find an integer $N\geq 1$ such that
if, $1\leq r\leq N$, at least one of the following statements
must be true
\begin{align*}
g(x)\geq 1/2&\ \text{for all $x\in[(r-1)/N,r/N]$},\\
\intertext{or}
g(x)\leq -1/2&\ \text{for all $x\in[(r-1)/N,r/N]$},\\
\intertext{or}
|g(x)|\leq 3/4&\ \text{for all $x\in[(r-1)/N,r/N]$}.
\end{align*}
(ii) Using the result of~(i) show that, if our putative
theorem is false, we can find an integer $q\leq n$ and integers
\[0=u(1)-1&\ \text{for all $x\in[u(j)/N,v(j)/N]$}\\
|g(x)|<1&\ \text{for all $x\in[v(j)/N,u(j+1)/N]$}.
\end{align*}
Without loss of generality, we take $w=0$.
(iii) Explain why we can find an $\eta>0$ with
\begin{align*}
(-1)^{j}g(x)>-1+\eta&\ \text{for all $x\in[u(j)/N,v(j)/N]$}\\
|g(x)|<1-\eta&\ \text{for all $x\in[v(j)/N,u(j+1)/N]$},
\end{align*}
for all $j$. We may take $\eta<1/8$ and will do so.
(iv) Explain how to find a polynomial $R$ of degree $n$ or
less with $\|R\|_{\infty}=1$ such
that
\[(-1)^{j}R(x)>0\ \text{for all $x\in[u(j)/N,v(j)/N]$}\]
and $j=1,2,\dots,q$.
(v) Show that
\[|g(x)-(\eta/2)R(x)|<1-\eta/2\]
for all $x\in[0,1]$.
Hence obtain a contradiction.
\end{question}
\clearpage
\begin{question}\label{E;small bound}\label{A3.11}
Suppose that we have
a sequence of continuous functions
$f_{n}:[0,1]\rightarrow{\mathbb R}$ such that
$f_{n}(x)\rightarrow 0$ for each $x\in[0,1]$
as $n\rightarrow\infty$. Then, given $\epsilon>0$, we can find
a non-empty interval $(a,b)\subseteq[0,1]$
and an $N(\epsilon)$ such that
\[|f_{n}(t)|\leq \epsilon\]
for all $t\in(a,b)$ and all $n\geq N(\epsilon)$.
\noindent\emph{Hint} Consider the sets
\[E_{N}=\{x\in[0,1]\,:\,|f_{n}(x)|<\epsilon, \text{for all}\ n\geq N\}.\]
\end{question}
\begin{question}\label{A3.12}
Suppose that $f:[1,\infty)\rightarrow{\mathbb R}$
is a continuous function and $f(nx)\rightarrow 0$ as $n\rightarrow\infty$
for each $x\in[1,\infty)$. Show that $f(x)\rightarrow 0$
as $x\rightarrow\infty$.
\end{question}
\begin{question}$\bigstar$\label{A3.13}
(i) Consider $C([0,1])$ with the uniform norm.
If $M$ is strictly positive
integer, let $\mathcal{E}_{M}$ be the set of $f\in C([0,1])$ such that
whenever $N\geq 1$ and
\[0\leq x_{0}M.\]
(ii) Show that there is a set ${\mathcal H}$ which is the complement
of a set of first category
such that given any $f\in{\mathcal H}$, any $a$ and $b$ with
$0\leq a****M.\]
\end{question}
\begin{question}\label{A3.14}
Let $h:[0,1]\rightarrow{\mathbb R}$ be a
continuous
strictly increasing function with $h(0)=0$. We say that
a compact set $E$ is thin if , given $\epsilon>0$
we can find a finite collection of intervals $I_{j}$
of length $l_{j}$ $[N\geq j\geq 1$ such that
\[E\subseteq\bigcup_{j=1}^{N}I_{j},
\ \text{but}\ \sum_{j=1}^{N}h(l_{j})<\epsilon.\]
Show that the set ${\mathcal C}$ of thin compact sets
is the complement of a set of first category
in the space ${\mathcal K}$
of compact subsets of $[0,1]$
with the Hausdorff metric $\rho$.
\end{question}
\begin{question}$\bigstar$\label{A3.15}
Let $A=\{z\in{\mathbb C}\,:\,1/2<|z|<1\}$
and let $D=\{z\in{\mathbb C}\,:\,|z|<1\}$.
Suppose that $f:A\rightarrow{\mathbb C}$ is analytic
and we can find polynomials $p_{n}$ with $p_{n}(z)\rightarrow f(z)$
uniformly on $A$. Show that we can find an analytic function
$g:D\rightarrow{\mathbb C}$ with $f(z)=g(z)$ for all $z\in A$.
\noindent[Hint: Use the maximum modulus principle
and the general principle of uniform convergence.]
\end{question}
\begin{question}\label{A3.16} (i) We work in ${\mathbb C}$.
Show that
there exists a sequence of polynomials
$P_{n}$ such that
\[
P_{n}(z)\rightarrow
\begin{cases}
1&\text{if $|z|<1$ and $\Re z\geq 0$}\\
0&\text{if $|z|<1$ and $\Re z< 0$}
\end{cases}
\]
as $n\rightarrow\infty$.
\noindent[Hint: Recall that, if $\Omega_{1}$ and $\Omega_{2}$
are disjoint open sets and $f(z)=0$ for $z\in\Omega_{1}$
and $f(z)=1$ for $z\in \Omega_{2}$, then $f$ is analytic
on $\Omega_{1}\cup\Omega_{2}$.]
(ii) Show that
there exists a sequence of polynomials
$Q_{n}$ such that
\[
Q_{n}(z)\rightarrow
\begin{cases}
1&\text{if $\Re z\geq 0$}\\
0&\text{if $\Re z< 0$}
\end{cases}
\]
as $n\rightarrow\infty$.
\end{question}
\clearpage
\section{Question sheet 4}\label{S;sheet four}
\begin{question}\label{A4.1} By quoting the appropriate theorems
that, if $\Omega$ is an open set in ${\mathbb C}$,
then $f:\Omega\rightarrow{\mathbb C}$ is analytic
if and only if, whenever $K$ is a compact subset of
$\Omega$ with path-connected complement
and $\epsilon>0$, we can find a polynomial
$P$ with $|f(z)-P(z)|<\epsilon$ for all $z\in K$.
\end{question}
\begin{question}\label{A4.1a} In this exercise we suppose
that $K$ is a bounded compact subset
of ${\mathbb C}$ and $E$ is a non-empty
bounded connected component
of ${\mathbb C}\setminus K$. Give a simple example
of such a $K$ and $E$. Our object is to show that
if $a\in E$ the function $f(z)=(z-a)^{-1}$
is not uniformly approximable on $K$
by polynomials.
Suppose $P$ is a polynomial with $|p(z)-(z-a)^{-1}|\leq\epsilon$
or all $z\in K$. By observing that the boundary $\partial E$
of $E$ lies in $K$ and using the maximum modulus principle
deduce that $|p(w)(w-a)-1|\leq\epsilon\sup_{z\in K}|z-a|$.
By choosing $w$ appropriately deduce that
$\epsilon\geq (\sup_{z\in K}|z-a|)^{-1}$.
\end{question}
\begin{question}\label{A4.2} Show that $\cos 1$ is irrational.
Show more generally that $\cos 1/n$ is irrational whenever
$n$ is a non zero integer.
\end{question}
\begin{question}\label{A4.3} Use the idea of Louiville's theorem
to write down a continued fraction whose value
is transcendental. Justify your answer.
\end{question}
\begin{question}\label{Q;Chest of drawers}\label{A4.4}
Let us write $\langle y\rangle=y-[y]$ so that
$\langle y\rangle$ is the fractional part of $y$
Suppose that $x$ is irrational.
If $m$ is strictly positive integer consider
the $m+1$ points
\[0=\langle 0x\rangle,\ \langle 1x\rangle,
\ \ldots,\ \langle kx\rangle,\ \ldots,
\ \langle mx\rangle\]
and explain why there must exist integers $r$
and $s$ with $0\leq s0$
such that
\[\left|x-\frac{p}{q}\right|>\frac{M}{q^{2}}\]
for all integers $p$ and $q$ with $q\neq 0$.
\end{question}
\begin{question}\label{A4.11}
The Fibonacci
sequence has many interesting aspects. (It is,
so far as I know the only series with its own
Fanzine --- \emph{ The Fibonacci Quarterly}.)
(i) Find the general solution of the difference equation
\[u_{n+1}=u_{n}+u_{n-1}.\]
The Fibonacci series is the particular solution $F_{n}=u_{n}$
with $u_{0}=0$, $u_{1}=1$. Write $F_{n}$ in the appropriate form.
(ii) Show, by using (i), or otherwise, that if $n\geq 1$,
$F_{n}$ is the closest integer to
\[\frac{1}{\sqrt 5}\left(\frac{1+\sqrt 5}{2}\right)^{n}.\]
We call
\[\tau= \frac{1+\sqrt 5}{2}\]
the golden ratio.
(iii) Prove the two identities
\begin{gather*}
F_{2n+1}=F_{n}^{2}+F_{n+1}^{2}\\
F_{2n}=F_{n}(F_{n-1}+F_{n+1})
\end{gather*}
by using the result of (i).
(iv) Explain why
\[\begin{pmatrix}F_{n+1}&F_{n}\\F_{n}&F_{n-1}\end{pmatrix}=A^{n}\]
where
\[A=\begin{pmatrix}1&1\\1&0\end{pmatrix}.\]
Use the result $A^{n+m}=A^{n}A^{m}$ to deduce
that
\begin{gather*}
F_{n+m+1}=F_{n+1}F_{m+1}+F_{n}F_{m}\\
F_{n+m}=F_{n}F_{m+1}+F_{n-1}F_{m}.
\end{gather*}
Obtain~(iii) as a special case.
(v) Let $x_{n}=F_{n+1}/F_{n}$. Use~(iii) to express $x_{2n}$
as a rational function of $x_{n}$.
(vi) Suppose now we take $y_{k}=x_{2^{k}}$.
Write down $y_{n+1}$ as a rational function of $y_{n}$.
Use (i) to show
that $y_{k}$ converges very rapidly to $\tau$.
Can you link this with the Newton--Raphson
method for finding a root of a particular function?
\end{question}
\begin{question}\label{Q;special dog}\label{A4.12}
Let $p(z)=z^{2}-4z+3$ and let
$\gamma:[0,1]\rightarrow{\mathbb C}$ be given by $\gamma(t)=p(2e^{2\pi it})$.
Show that closed path associated with $\gamma$ does not pass through $0$.
Compute $w(\gamma,0)$
(i) Non-rigorously direct from the definition by
obtaining enough information about $\gamma$,
(You could write the real and imaginary parts
of $\gamma(t)$ in terms of $\cos t$ and $\sin t$.)
(ii) by factoring, and
(iii) by the dog walking lemma.
\end{question}
\begin{question}$\bigstar$\label{A4.13} Suppose that
$\gamma:[0,1]\rightarrow{\mathbb C}\setminus\{0\}$
is a continuously differentiable function
with $\gamma(0)=\gamma(1)$.
If we define $r:[0,1]\rightarrow {\mathbb C}$
by
\[r(t)=\exp\left(\int_{0}^{t}\frac{\gamma'(s)}{\gamma(s)}\,ds\right)\]
compute the derivative of $r(t)/\gamma(t)$ and deduce
that
\[w(\gamma,0)=\frac{1}{2\pi i}
\int_{0}^{1}\frac{\gamma'(s)}{\gamma(s)}\,ds.\]
Use this result and the residue theorem to compute
$w(\gamma,0)$ in Exercise~\ref{Q;special dog}.
\noindent$[$The example used in the last two questions has
been chosen to make things easy. However, if you are prepared
to work hard, it is possible to obtain enough information
about $\gamma$ to find the winding number
of closed curves even in quite
complicated cases. If many winding numbers are required
(as may be the case when studying stability in an engineering
context then we can use numerical methods (this question
suggests a possibility, though not necessarily a good one)
together with
the knowledge that the winding number is an integer
to obtain winding numbers on an industrial scale.$]$
\end{question}
\begin{question}$\bigstar$\label{A4.10}
Take your electronic calculator out of
your old school satchel (or use the expensive piece of equipment
on which you play DVDs) and find the first few terms
of the continued fraction for $\pi$ (or, more strictly
for the rational number that you calculator gives when
you ask it for $\pi$.) Compute first few associated convergents
(what we have called $3+p_{n}/q_{n}$).
Verify that $355/113$ is an extremely good approximation
for $\pi$ and explain why this is so. Apparently
the approximation
was first discovered by the astronomer Tsu Ch'ung-Chih
in the fifth century A.D.
The entries $a_{n}$ in the continued fraction expansion for
$\pi$ look, so far as anyone knows, just like those
you would expect from a random real number (in a sense made
precise in Corollary~\ref{C;continued random}).
I would be inclined to say that this was precisely what one should expect
if there was not a beautiful expansion (using a generalisation
of the kind of continued fraction discussed in the course)
found by Lord Brouncker in 1654.
\[\frac{\pi}{4}=1+\cfrac{1^{2}}
{1+\cfrac{3^{2}}
{2+\cfrac{5^{2}}
{2+\cfrac{7^{2}}
{2+\dots}}}}.\]
You may easily verify that the first convergents
are
\[1,\ ,1-\frac{1}{3},\, 1-\frac{1}{3}+\frac{1}{5}-,\ \dots\]
and, if your name is Euler, that the $n$th convergent
is
\[\sum_{j=0}^{n}\frac{(-1)^{j}}{2j+1}\]
and then, if your name is Leibniz, you will prove
the result
\[\sum_{j=0}^{n}\frac{(-1)^{j}}{2j+1}\rightarrow \frac{\pi}{4}.\]
The convergence is, however, terribly slow and
it is no wonder that Huygen's initially disbelieved
Brouncker's result.
\end{question}
\end{document}
**