\documentclass[12pt,a4paper]{article}
\usepackage{amsmath,amsxtra,amsthm,amssymb,makeidx,eurosym}
\newtheorem{theorem}{Theorem}[section]
\newtheorem{lemma}[theorem]{Lemma}
\newtheorem{definition}[theorem]{Definition}
\newtheorem{example}[theorem]{Example}
\newtheorem{exercise}[theorem]{Exercise}
\newtheorem{problem}[theorem]{Problem}
\renewcommand{\qedsymbol}{$\blacksquare$}
\newcommand{\var}{\operatorname{var}}
\renewcommand{\emptyset}{\varnothing}
\newcommand{\lcm}{\operatorname{lcm}}
\newcommand{\Cl}{\operatorname{Cl}}
\newcommand{\Int}{\operatorname{Int}}
\newcommand{\card}{\operatorname{card}}
\newcommand{\supp}{\operatorname{supp}}
\newtheoremstyle{citing}{3pt}{3pt}{\itshape}{0pt}{\bfseries}%
{.}{ }{\thmnote{#3}}\theoremstyle{citing}\newtheorem*{varthm}{}
\begin{document}
\title{Baire Category, Probabilistic Constructions
and Convolution Squares}
\author{T.~W.~K\"{o}rner}
\maketitle
\tableofcontents
\section{Introduction} In the past few years I have
written a number of papers using simple Baire category
and probabilistic results. The object of this course
is to give examples of the main theorems and the
methods used to obtain them.
Although we shall obtain other results, our main
concern will be the question. Knowing something
about the measure $\mu$, what can we say about
its convolution with itself (that is to say, the convolution
square) $\mu*\mu$?
This question goes back at least as far as
the paper of Wiener and Wintner~\cite{WW}
in which they show that the convolution square
of a singular measure need not be singular.
Those who already know about such things may find
it useful to see some of our main results. Those
who do not, should be reassured that we will provide
appropriate definitions and background in due course.
We give a new proof of the following theorem of Besicovitch.
\begin{varthm}[Theorem~\ref{T;short Besicovitch}]
There exists a closed
bounded set of Lebesgue measure containing lines of length
at least $1$ in every direction.
\end{varthm}
We prove a quantitative version of a theorem of Rudin.
\begin{varthm}[Theorem~\ref{T;fast Rudin}]
Suppose that $\phi:{\mathbb N}\rightarrow{\mathbb R}$
is a sequence of strictly positive numbers with
$r^{\alpha}\phi(r)\rightarrow \infty$ as $r\rightarrow\infty$
whenever $\alpha>0$.
Then there exists a probability measure $\mu$
such that $\phi(|r|)\geq |\hat{\mu}(r)|$ for all $r\neq 0$,
but $\supp\mu$ is independent.
\end{varthm}
We prove an extension (found independently by Matheron and Zelen{\'y})
of a celebrated theorem of Debs and Saint Reymond.
\begin{varthm}[Theorem~\ref{T;how}] Let $B$ be a set of first category
in ${\mathbb T}$. Then we can find a probability measure $\mu$
with $\hat{\mu}(r)\rightarrow 0$ as $|r|\rightarrow\infty$
such that $\supp\mu$ is independent and
the subgroup $G$ of ${\mathbb T}$
generated by $\supp \mu$
satisfies
\[G\cap B\subseteq\{0\}.\]
\end{varthm}
We produce two substantial extensions of the theorem
of Wiener and Wintner. The first is related to the theorem
of Debs and Saint Reymond.
\begin{varthm}[Theorem~\ref{T;Winter}] Let
$A$ be a set of first category in
${\mathbb T}$. Then we can find a
probability measure $\mu$ such that
$\supp\mu\cap A=\emptyset$ but
$d(\mu*\mu)t=f(t)\,dt$ where $f$ is a
Lebesgue $L^{1}$ function.
\end{varthm}
The second further extends a result of Saeki.
\begin{varthm}[Theorem~\ref{T;main Hausdorff}]
If $1>\alpha>1/2$, then there exists
a probability measure $\mu$ such that the Hausdorff dimension
of the support of $\mu$ is $\alpha$ and $d(\mu*\mu)(t)=f(t)\,dt$
where $f$ is Lipschitz $\alpha-\tfrac{1}{2}$.
\end{varthm}
We conclude with a result on Hausdorff dimension.
\begin{varthm}[Theorem~\ref{T;main sum}]
Given a sequence $\alpha_{j}$ with
$0\leq\alpha_{j}\leq\alpha_{j+1}<1$, we can find
a closed set $E$ such that
\[E_{[j]}=\underbrace{E+E+\ldots+E}_{j}\]
has
Hausdorff dimension $\alpha_{j}$ for each $j\geq 1$.
\end{varthm}
Any course like this is is bound to be rather uneven
in difficulty and I have chosen to accentuate this
unevenness by spending a substantial amount of time
discussing well known and relatively easy results.
The beginner should concentrate on these discussions,
leaving the technical (and, I am afraid still
imperfectly digested) details of my own arguments
to a hypothetical interested expert.
\section{Baire's theorem}
The study of complete metric spaces enables us to
replace certain recurring arguments by general principles.
One of the most important of the principles is given
by Baire's category theorem.
\begin{theorem}\label{T;Baire}
{\bf [Baire's category theorem]} Let
$(X,d)$ be a non-empty complete metric space. If $E_{1}$, $E_{2}$, \ldots
are closed subsets of $X$ with dense complements, then
$\bigcup_{j=1}^{\infty}E_{j}$ is non-empty.
\end{theorem}
Baire's theorem may be restated as follows.
\begin{theorem}\label{T;informal} Let
$(X,d)$ be a non-empty complete metric space.
Suppose that $P_{j}$ is a property such that:-
(i) The property of being $P_{j}$
is \emph{stable} in the sense that, given
$x\in X$ which has property $P_{j}$,
we can find an $\epsilon>0$ such that whenever
$d(x,y)<\epsilon$ the point $y$ has the property $P_{j}$.
(ii) The property of not being $P_{j}$
is \emph{unstable} in the sense that, given
$x\in X$ and $\epsilon>0$, we can find a $y\in X$
with $d(x,y)<\epsilon$ which does not have the property $P_{j}$.
Then there is an $x_{0}\in X$ which has all of the
of the properties $P_{1}$, $P_{2}$, \ldots.
\end{theorem}
\begin{proof}[Proof of equivalence of Theorems~\ref{T;Baire}
and~\ref{T;informal}]
Let $x$ have the property $P_{j}$ if
and only if $x\notin E_{j}$.
\end{proof}
It is unlikely that anyone who reads these notes is unfamiliar
with Theorem~\ref{T;Baire} or its proof, but I include a proof
for completeness.
We shall prove a slightly stronger version of Baire's theorem.
\begin{theorem}\label{T;Baire 2} Let $(X,d)$ be a complete metric space.
If $E_{1}$, $E_{2}$, \dots are closed sets with empty interiors,
then $X\setminus \bigcup_{j=1}^{\infty}E_{j}$ is dense in $X$.
\end{theorem}
\begin{proof} Suppose that $x_{0}\in X$ and $\delta_{0}>0$.
We shall show that there exists a $y\in B(x_{0},\delta_{0})$
such that $y\notin \bigcup_{j=1}^{\infty}E_{j}$.
To this end, we perform the following inductive construction.
Given $x_{n-1}\in X$ and $\delta_{n}>0$, we can find $x_{n}\in X$
such that $x_{n}\in B(x_{n-1},\delta_{n}/4)$, but
$x_{n}\notin E_{n}$. (For, if not, we would have
$B(x_{n-1},\delta_{n-1}/4)\subseteq E_{n}$ and $E_{n}$ would have
a non-empty interior.) Since $E_{n}$ is closed and $x_{n}\notin E_{n}$
we can now find a with $\delta_{n-1}/2>\delta_{n}>0$
such that $B(x_{n},\delta_{n})\bigcap E_{n}=\emptyset$.
Now observe that
\[\delta_{m}\leq 2^{-1}\delta_{m-1}\leq 2^{-2}\delta_{m-2}\leq\ldots
2^{n-m}\delta_{n}\]
for all $m\geq n\geq 0$. It follows that, if $r\geq s$
\[d(x_{r},x_{s})\leq\sum_{j=r}^{s-1}d(x_{j+1},x_{j})\leq
\sum_{j=r}^{s-1}\delta_{r}/4\leq 4^{-1}\delta_{0}\sum_{j=r}^{s-1}2^{-r}
\leq 2^{-r-1}\delta_{0}.\]
Thus the $x_{r}$ form a Cauchy sequence and converges in $(X,d)$
to some point $y$.
The same kind of calculation as in the last paragraph
gives
\[d(x_{r},x_{s})\leq\sum_{j=r}^{s-1}d(x_{j+1},x_{j})\leq
\sum_{j=r}^{s-1}\delta_{r}/4\leq 4^{-1}\delta_{r}\sum_{j=0}^{s-r-1}2^{-r}
\leq \delta_{r}/2,\]
whenever $s\geq r$ and so
\[d(x_{r},y)\leq d(x_{r},x_{s})+d(x_{s},y)\leq \delta_{r}/2+d(x_{s},y)
\rightarrow \delta_{r-1}/2\]
as $s\rightarrow\infty$. We thus have $d(x_{r},y)\leq \delta_{r}/2$
so $y\in B(x_{0},\delta_{r})$ and $y\notin E_{r}$ for each $r\geq 1$
as required.
\end{proof}
For historical reasons Baire's category theorem is associated
with some rather peculiar nomenclature.
\begin{definition} Let $(X,d)$ be a metric space. We say that a
a subset $A$ of $X$ is \emph{of the first category} if
it is a subset of the union of a countable collection
of closed sets with empty interior\footnote{Some authors,
say that $A$ of $X$ is of the first category if
it is the union of a countable collection
of closed sets with empty interior. They have history,
on their side, but not common usage.}. We say that \emph{quasi-all}
points of $X$ belong to the complement $X\setminus A$ of $X$.
\end{definition}
Theorem~\ref{T;Baire 2} thus states that the complement
of a set of the first category in a complete metric space
is dense in that space. The next exercise gives a simple
but very useful property of sets of first category.
\begin{exercise}\label{E;countable Baire}
Show that the countable union of sets of the
first category is itself a set of the first category.
\end{exercise}
The reader will have met the following theorem before.
\begin{theorem} ${\mathbb R}$ is uncountable.
\end{theorem}
\begin{proof} If we give ${\mathbb R}$ the usual metric,
then point sets $\{x\}$ are closed and have empty interior.
It follows that, if $E$ is a countable subset of ${\mathbb R}$,
then $E=\bigcup_{e\in E}\{e\}$ is of first category and so
$E\neq{\mathbb R}$.
\end{proof}
The standard undergraduate proof involves decimal expansions
but this proof avoids have to talk about the relation between
real numbers and decimals. It is also much closer to Cantor's
original proof.
\begin{exercise} If $(X,d)$ is a metric space we say that
a point $x\in X$ is \emph{isolated} if we can find a $\delta>0$
such that $B(x,\delta)=\{x\}$.
(i) Show that a point $x\in X$ is isolated if and only if
$\{x\}$ is open.
(ii) Show that any complete non-empty metric space without
isolated points is uncountable.
(iii) Give an example of a complete infinite metric space
which is countable.
(vi) Give an example of a uncountable complete metric space
with every point isolated.
\end{exercise}
There are several reasons for using the Baire category theorem
when we seek examples of particular types of behaviour.
The first is practical. Although any Baire category argument
can obviously be replaced by a direct argument, if there
are several properties involved, each of which involves
countably many conditions the direct argument may require
quite a lot of notation and careful interlocking of
several inductions. Such arguments are not hard to write
(and indeed may give the author some pleasure), but are
may be hard to read.
The second argument is that a
property which holds quasi-always is, in some sense, generic.
The next exercise shows that we must not press
this argument too far.
\begin{exercise} The following is a well known procedure
for constructing `Cantor sets'. Let $E_{0}=[0,1]$
and let $\zeta_{1}$, $\zeta_{2}$, $\ldots$ be a sequence
of real numbers with $0<\zeta_{j}<1$. At the $n$th
stage $E_{n}$ is the union of $2^{n}$ disjoint closed intervals $I(r,n)$
all of the same length.
We define $E_{n}$ to be the union of
the $2^{n+1}$ disjoint closed intervals formed by removing
an open interval $J(r,n)$ of length $\zeta_{n}$ times the length
of the initial interval $I(r,n)$ from the centre of $I(r,n)$.
(Thus if $I(r,n)=[c_{r,n}-\delta_{n},c_{r,n}+\delta]$ we take
$J(r,n)=(c_{r,n}-\zeta_{n}\delta_{n},c_{r,n}+\zeta_{n}\delta_{n})$
and
\[E_{n+1}=\bigcup_{r=1}^{2^{n}}(I(r,n)\setminus J(r,n).\]
(i) Explain why $\zeta=\prod_{n=1}^{\infty}\zeta_{n}$ is well defined.
Show that $\zeta$ can take any value subject only to the condition
$1>\zeta\geq 0$.
(ii) Show that $E=\bigcap_{n=1}^{\infty}E_{n}$ is a closed nowhere dense
set without isolated points. Show that $E$ has
Lebesgue measure $\zeta$.
(iii) Construct a set $H\subseteq[0,1]$ of first Baire category
but of Lebesgue measure $1$. Points in $H$ are `generic
in the the sense of measure theory' (almost all points in $[0,1]$
lie in $H$) but the points of $[0,1]\setminus H$ are `generic
in the the sense of topology' (quasi-all points in $[0,1]$
lie $[0,1]\setminus H$).
(iv) Construct a set $P\subseteq{\mathbb R}$ of first Baire category
such that ${\mathbb R}\setminus P$ has Lebesgue measure zero.
\end{exercise}
The third argument is that the act of seeking a Baire type
proof may, by itself, suggest a new ways of looking at
your problem.
\section{The Hausdorff metric} The Hausdorff metric measures the difference
between compact sets.
\begin{definition}\label{D;Hausdorff} Let $(X,d)$ be a
metric space and let ${\mathcal E}$ be the collection of non-empty
compact subsets of $X$. We write
\[d_{\mathcal E}(E,F)=
\sup_{e\in E}\inf_{f\in F}d(e,f)+\sup_{e\in E}\inf_{f\in F}d(e,f)\]
for all $E,\,F\in{\mathcal E}$ and call $d_{\mathcal E}$ the
Hausdorff metric on ${\mathcal E}$.
\end{definition}
The proof that the Hausdorff metric is indeed a metric is easy, but not,
I think, trivial. We use the following subsidiary lemma.
\begin{lemma}\label{T;semi Hausdorff} Let $(X,d)$ be a
metric space and let ${\mathcal E}$ be the collection of non-empty
compact subsets of $X$. If we write
\[\Delta(E,F)=\sup_{e\in E}\inf_{f\in F}d(e,f)\]
for $E,\,F\in{\mathcal E}$ then
\[\Delta(E,G)\leq\Delta(E,F)+\Delta(F,G)\]
for all $E,\,F\,G\in{\mathcal E}$.
\end{lemma}
\begin{proof} Write $d(e,F)=\inf_{f\in F}d(e,f)$
for $e\in X$ and $F\in{\mathcal E}$.
By the triangle inequality,
\[d(e,g)\leq d(e,f)+d(f,g)\]
so
\[d(e,G)\leq d(e,f)+d(f,g)\]
for all $g\in G$ whence
\[d(e,G)\leq d(e,f)+d(f,G)\leq d(e,f)+\Delta(F,G)\]
for all $f\in F$. Hence
\[d(e,G)\leq d(e,F)+\Delta(F,G)\]
for all $e\in E$ and
\[\Delta(E,G)\leq\Delta(E,F)+\Delta(F,G).\]
\end{proof}
\begin{exercise} Use Lemma~\ref{T;semi Hausdorff}
to show that the Hausdorff metric is indeed a metric.
\end{exercise}
\begin{lemma} (We use the notation of Definition~\ref{D;Hausdorff}.)
If $(X,d)$ is complete, then the Hausdorff metric is complete.
\end{lemma}
\begin{proof} It is sufficient to show that, if $E_{n}\in{\mathcal E}$
and $d_{\mathcal E}(E_{n},E_{n+1})\leq 2^{-n-1}$ for all $n\geq 1$,
then $E_{n}$ converges in the Hausdorff metric.
To this end, let $E$ be the set of $e\in X$ such that there
exist $e_{n}\in E_{n}$ with $d(e_{n},e))\rightarrow 0$
as $n\rightarrow\infty$.
We observe that, if $e\in E$ then, given any $m$,
we can find an $n\geq m+1$ such that $d(e,e_{n})<2^{-m}$.
Since
\[d_{\mathcal E}(E_{m},E_{n})\leq \sum_{j=m}^{n-1}d_{\mathcal E}(E_{j},E_{j+1})
\leq \sum_{j=m}^{n-1}2^{-(j+1)}<2^{-m}\]
we can find $x_{m}\in E_{m}$ such that $d(x_{m},e_{n})<2^{-m}$
and so $d(x_{m},e)<2^{-m+1}$. Thus
\[E\supseteq\{x\,:\,d(x,x_{m})<2^{-m+1}\ \text{for some $x\in E_{m}$}.\]
We next show that $E$ is compact. Suppose that $y(j)\in E$
for $j\geq 1$. We construct infinite subsets $A_{n}$ of ${\mathbb N}$
as follows. Set $A_{0}={\mathbb N}$. If $A_{m-1}$ has been defined
we obtain $A_{m}$ as follows.
Since $E$ is covered by open balls $B(x,2^{-m+1})$
with $x\in E_{m}$ and $E$ is compact, $E$ is covered by a finite
set of such balls and one of those balls $B(x_{m},2^{-m+1})$
must contain an infinite subset of $A_{m}$. We observe
that $d(x_{m},x_{m+1})<2^{-m}$ so the $x_{m}$ converge
to some $y\in E$. Choose $n(j)\in A_{j}$ so
that $n(j)\rightarrow\infty$. Then $d(y_{n(j)},y)\rightarrow 0$
as $j\rightarrow\infty$. Thus $E$ is compact.
The second paragraph of the proof shows that
\[\sup_{f\in E_{n}}\inf_{e\in E}d(e,f)\leq 2^{-n+1}.\]
If $x_{n}\in E_{n}$ then we can find $x_{j}\in E_{j}$
such that $d(x_{j},x_{j+1})<2^{-j+1}$. Since the $x_{j}$
are Cauchy, they converge to some $x$. We have $x\in E$
and
\[d(x,x_{n})\leq\sum_{j=n}^{\infty}d(x_{j},x_{j+1})\leq 2^{-n+2}\]
so
\[\sup_{e\in E}\inf_{f\in E_{n}}d(e,f)\leq 2^{-n+2}.\]
Thus $d_{\mathcal E}(E_{n},E)\rightarrow 0$ as $n\rightarrow\infty$.
\end{proof}
\begin{exercise} (We use the notation of Definition~\ref{D;Hausdorff}.)
Show that, if $({\mathcal E},d_{\mathcal E})$ is complete, then
$(X,d)$ is.
\end{exercise}
\begin{exercise} In these notes, we are not interested in metric spaces
in general but in spaces like $[0,1]^{n}$, ${\mathbb T}^{n}$ and
${\mathbb R}^{n}$ with the usual Euclidean metric. We can then give
a simpler proof of the completeness of the Hausdorff metric.
Let us work in ${\mathbb R}^{n}$ with the usual Euclidean norm.
Suppose that $E_{n}\in{\mathcal E}$
and $d_{\mathcal E}(E_{n},E_{m})\leq 2^{-n}$ for all $m,\,n\geq 1$.
Let
\[K_{n}=E_{n}+\bar{B(0,2^{-n+1})}=\{{\mathbf e}+{\mathbf x}\,:\,
\|{\mathbf x}\|\leq 2^{-n+1}\}.\]
Show that $K_{n}\in{\mathcal E}$ and $K_{n}\supseteq K_{n+1}$.
Setting $E=\bigcap_{n=1}^{\infty} K_{n}$, show that $E\in{\mathcal E}$
and
$d_{\mathcal E}(E_{n},E)\rightarrow 0$ as $n\rightarrow\infty$.
\end{exercise}
As a first exercise let us show that if we work in
${\mathbb T}={\mathbb R}/{\mathbb Z}$ with the usual metric
quasi-all members of ${\mathcal E}$ are perfect (that is to
say totally disconnected with no isolated points).
\begin{lemma} Let us work in ${\mathbb T}$ with usual metric.
(i) Quasi-all members of ${\mathcal E}$ have no isolated points.
(ii) Quasi-all members of ${\mathcal E}$ are disconnected.
(iii) Quasi-all members of ${\mathcal E}$ are perfect.
\end{lemma}
\begin{proof} (i) Let ${\mathcal E}_{m}$ consist of
all those $E\in{\mathcal E}$ such that there exists an
$x\in E$ with $E\cap(x-1/m,x+1/m)=\{x\}$.
We claim that ${\mathcal E}_{m}$ is closed. Suppose
that $F_{n}\in {\mathcal E}_{m}$ and
$d_{\mathcal E}(F_{n},E)\rightarrow 0$. We can
$x_{n}\in E$ with $E\cap(x_{n}-1/m,x_{n}+1/m)=\{x\}$.
By compactness there exists an $x\in{\mathbb T}$ and
$n(j)\rightarrow\infty$ such that $x_{n(j)}\rightarrow x$.
By extracting a subsequence we may suppose that $x_{n}\rightarrow x$.
Automatically $x\in E$. Suppose that $y\in E$ and $y\neq x$.
We can find $y_{n}\in F_{n}$ with $y_{n}\rightarrow y$.
When $n$ is sufficiently large $x_{n}\neq y_{n}$ and so
$|y_{n}-x_{n}|\geq 1/m$. Proceeding to the limit we obtain
$|y-x|\geq 1/m$. Thus $E\in{\mathcal E}_{m}$ and
we have shown that ${\mathcal E}_{m}$ is closed.
To show that ${\mathcal E}\setminus{\mathcal E}_{m}$ is open
let $E\in{\mathcal E}$ and $\epsilon>0$ be given. If we
choose an integer $N\geq \epsilon^{-1}+m+1$ and set
\[F=E\cup\{r/N\,:\,r\in{\mathbb Z}
\ \text{and there exists a $y\in E$ with $|y-r/N|\leq 1/N$}\},\]
then $F\in{\mathcal E}_{m}$ and $d_{\mathcal E}(E,F)<\epsilon$.
We have shown that ${\mathcal E}\setminus{\mathcal E}_{m}$ is open.
Thus $\bigcap_{1}^{\infty}E_{m}$ is of first category in
$({\mathcal E},d_{\mathcal E})$. Since every compact set with
an isolated point lies in $\bigcap_{1}^{\infty}E_{m}$ we are done.
(ii) We prove the stronger statement that quasi-all
members of ${\mathcal E}$ do not intersect ${\mathbb Q}$.
If $q$ is rational set
\[{\mathcal E}_{q}=\{E\in{\mathcal E}\,:\,q\in E\}.\]
It is clear that ${\mathcal E}_{q}$ is closed. To see
that ${\mathcal E}\setminus{\mathcal E}_{m}$ is open
suppose that $E\in{\mathcal E}$ and $1>\epsilon>0$ are given.
If we set
\[F=\{q+\epsilon/3\}\cup\big(E\setminus(q-\epsilon/3,q+\epsilon/3),\]
then $F\in{\mathcal E}_{q}$ and $d_{\mathcal E}(E,F)<\epsilon$.
Thus $\bigcap_{1}^{\infty}E_{q}$ is of first category in
$({\mathcal E},d_{\mathcal E})$ and we are done.
(iii) If ${\mathcal A}$ and ${\mathcal B}$ are of
firs category so is ${\mathcal A}\cup{\mathcal B}$.
\end{proof}
\begin{exercise} If we work in ${\mathbb R}^{n}$ with usual metric
show that quasi-all members of ${\mathcal E}$ are perfect.
\end{exercise}
\section{Independence and Kronecker sets} If we do harmonic analysis
on the circle ${\mathbb T}$ we find that independence
plays an important role.
\begin{definition} We say that points $x_{1}$,
$x_{2}$, $\ldots$, $x_{n}\in{\mathbb T}$ are independent if the equation
\[\sum_{j=1}^{n}n_{j}x_{j}=0\]
has no non-trivial solution with $n_{j}\in{\mathbb Z}$.
\end{definition}
\begin{lemma}{\bf [Kronecker's lemma]} The points $x_{1}$,
$x_{2}$, $\ldots$, $x_{n}\in{\mathbb T}$ are independent
if and only if the following statement is true.
Given $y_{j}\in{\mathbb T}$ and $\epsilon>0$
we can find $N\in{\mathbb Z}$ with
\[|Nx_{j}-y_{j}|<\epsilon\]for $1\leq j\leq n$.
\end{lemma}
The `only if' part of Kronecker's lemma is immediate.
The next exercise gives a proof of a stronger version
of the `if part'.
\begin{exercise}
Show that the following statements about a point
${\mathbf x}\in{\mathbb T}^{n}$
are equivalent. (We write $\card A$ for the number of elements
in a finite set $A$.)
(A) If $I_{j}$ is a closed interval in ${\mathbb T}$
of length $|I_{j}|$ then
\[\frac{1}{M}\card
\left\{0\leq m\leq M-1\,:\,{m\mathbf x}\in\prod_{j=1}^{n}I_{j}\}\right|
\rightarrow \prod_{j=1}^{m}|I_{j}|\]
as $N\rightarrow\infty$.
(B) If $f\in C({\mathbb T}^{n})$, then
\[\frac{1}{M}\sum_{m=0}^{N-1}f(m{\mathbf x})
\rightarrow\int_{{\mathbb T}^{n}}f({\mathbf t})\,d{\mathbf t}\]
as $N\rightarrow\infty$.
(C) If $P\in C({\mathbb T}^{n})$ is a trigonometric polynomial,
then
\[\frac{1}{M}\sum_{m=0}^{M-1}P(m{\mathbf x})
\rightarrow\int_{{\mathbb T}^{n}}P({\mathbf t})\,d{\mathbf t}\]
as $N\rightarrow\infty$.
(D) If
\[\chi_{\mathbf k}({\mathbf t})=
\exp\left(2\pi i\sum_{j=1}^{n}k_{j}t_{j}\right)\]
with ${\mathbf k}\in{\mathbb Z}^{n}$,
then
\[\frac{1}{M}\sum_{m=0}^{M-1}\chi_{\mathbf k}(m{\mathbf x})
\rightarrow\int_{{\mathbb T}^{n}}\chi_{\mathbf k}({\mathbf t})\,d{\mathbf t}\]
as $N\rightarrow\infty$.
(E) $x_{1}$,
$x_{2}$, $\ldots$, $x_{n}$ are independent.
The equivalence of (A) and (E) is Weyl's equidistribution
theorem. Show that Kronecker's lemma follows from
Weyl's equidistribution theorem.
\end{exercise}
Our discussion suggests that we investigate two types
of compact subsets of ${\mathbb T}$. We write
$\chi_{n}(t)=\exp(2\pi int)$.
\begin{definition} A compact subset $E$ of
${\mathbb T}$ is called independent if every finite subset
is independent.
\end{definition}
\begin{definition} A compact subset $E$ of
${\mathbb T}$ is called a Kronecker set if
given $f\in C({\mathbb T})$ and $\epsilon>0$
we can find $n$ such that
\[|\chi_{n}(t)-f(t)|<\epsilon\ \text{for all $t\in E$}.\]
\end{definition}
\begin{exercise} (i) Show that the complement
of an independent compact set is dense in ${\mathbb T}$.
(ii) Show that every Kronecker set is independent.
(We shall see that the converse is false.)
\end{exercise}
From the point of view of harmonic analysis Kronecker
sets are very `thin'. We shall show that quasi-all
compact sets are Kronecker. We need the following
result.
\begin{exercise}\label{E;dense modulus}
(i) Let $S({\mathbb T})$ be the subset
of $C({\mathbb T})$ consisting of those $f$ such that
$|f(t)|=1$ for all $t\in{\mathbb T}$. Show that $S({\mathbb T})$
has a countable dense subset. (This is easily proved by
all sorts of arguments. The reader should try to find
at least three.)
(ii) Let $A$ be a countable dense subset of $S({\mathbb T})$.
Show that a compact subset of ${\mathbb T}$ is Kronecker
if and only if
given $f\in A$ and $\epsilon>0$
we can find $n$ such that
\[|\chi_{n}(t)-f(t)|<\epsilon\ \text{for all $t\in E$}.\]
\end{exercise}
\begin{lemma}\label{L;quasi Kronecker}
If we work in ${\mathbb T}$ with usual metric
then quasi-all members of ${\mathcal E}$ are Kronecker.
\end{lemma}
\begin{proof} Let $f_{1}$, $f_{2}$, \ldots be a countable
dense subset of the set $S({\mathbb T})$ defined in
Exercise~\ref{E;dense modulus}. Let ${\mathcal U}_{n,m}$
be the set of $E\in{\mathcal E}$ such that there exists
an $N$ with
\[|f_{n}(t)-\chi_{N}(t)|<1/m
\ \text{for all $t\in E$}.\]
We shall show that ${\mathcal U}_{n,m}$ is open and dense
in ${\mathcal E}$.
Observe first that, if $E\in{\mathcal U}_{n,m}$ then, by definition,
we can find an $N$ with
\[|f_{n}(t)-\chi_{N}(t)|<1/m
\ \text{for all $t\in E$}.\]
Since $|f_{n}-\chi_{N}|$ is continuous and a continuous function
on a compact set attains its bounds we can find an $\eta>0$
such that
\[|f_{n}(t)-\chi_{N}(t)|<1/m-\eta
\ \text{for all $t\in E$}.\]
Since $f_{n}-\chi_{N}$ is uniformly continuous on ${\mathbb T}$
we can find a $\delta>0$ such that
\[\big|\big(f_{n}(t)-\chi_{N}(t)\big)-
\big(f_{n}(s)-\chi_{N}(s)\big)\big|<\eta
\ \text{whenever $|s-t|<\eta$}.\]
It follows that, if $F\in{\mathcal E}$ and
$d_{\mathcal E}(E,F)<\eta$, then $F\in {\mathcal U}_{n,m}$.
Thus ${\mathcal U}_{n,m}$ is open.
Now suppose that we are given $E\in{\mathcal E}$ and
$\epsilon>0$. Set
\[\tilde{E}=E+[-\epsilon/2,\epsilon/2]
=\{e+x\,:\,e\in E,\ |x|\leq\epsilon/2\}.\]
Then $\tilde{E}\in{\mathcal E}$,
$d_{\mathcal E}(E,\tilde{E})\leq\epsilon/2$
and given any $e\in\tilde{E}$ we can find an interval $I$
of length $\epsilon$ such that $e\in I\subseteq \tilde{E}$.
Now let
\[F=\tilde{E}\cap\{t\in{\mathbb T}\,:\,
f_{n}(t)-\chi_{M}(t)=0\}.\]
Automatically $F\in {\mathcal U}_{n,m}$ and
(since $f_{n}$ is uniformly continuous)
we have $d_{\mathcal E}(F,\tilde{E})<\epsilon/2$ and
so $d_{\mathcal E}(F,E)<\epsilon$, provided only that
$M$ is large enough. Thus $U_{n,m}$ is dense.
Since every element of
$\bigcup_{n,m\geq 1}({\mathcal E}\setminus{\mathcal U}_{n,m})$
is Kronecker we have shown that quasi-all compact sets
are Kronecker.
\end{proof}
It is a useful slogan that quasi-all compact sets are
as `thin' as possible. (Note that a slogan does not have
to be true or even meaningful to be useful.) At first sight
this seems to rule out the study of `thick' sets but if
we consider only
`thick' sets then we may hope that
quasi-all such sets will be be
as `thin' as possible with respect to an appropriate metric.
As an example let us show the existence of two Kronecker
sets $E_{1}$ and $E_{2}$ such that $E_{1}+E_{2}={\mathbb T}$.
Consider the space ${\mathcal E}^{2}$ of ordered pairs of compact
sets with the product metric
\[d_{2}\big((E_{1},E_{2}),(F_{1},F_{2})\big)
=d_{\mathcal E}(E_{1},F_{1})+d_{\mathcal E}(E_{2},F_{2}).\]
\begin{lemma}\label{L;Kronecker pairs}
The collection
\[{\mathcal G}=\{(E_{1},E_{2})\in{\mathcal E}
\,:\,E_{1}+E_{2}={\mathbb T}\}\]
is a non-empty closed subset of ${\mathcal F}^{2}$ and so
$({\mathcal G},d_{2})$
is a complete metric space.
\end{lemma}
\begin{proof} To see that ${\mathcal G}$
is non-empty, consider $({\mathbb T},{\mathbb T})$.
To see that ${\mathcal G}$ is closed, we argue as follows.
Suppose that $(F_{1},F_{2})\in{\mathcal E}^{2}$,
$\big(E_{1}(n),E_{2}(n)\big)\in{\mathcal G}$ and
\[\big(E_{1}(n),E_{2}(n)\big)\underset{d_{2}}{\rightarrow}
(F_{1},F_{2})\]
as $n\rightarrow\infty$. If $y\in{\mathbb T}$, then
we can find $(x_{1,n},x_{2,n})\in E_{1}(n)\times E_{2}(n)$
such that $x_{1,n}+x_{2,n}=y$. By the compactness of ${\mathbb T}$,
we can find $n(j)\rightarrow\infty$ and
$(x_{1},x_{2})\in F_{1}\times F_{2}$ such that $x_{1,n(j)}\rightarrow x_{1}$
and $x_{2,n(j)}\rightarrow x_{2}$ as $j\rightarrow\infty$.
Automatically $(x_{1},x_{2})\in F_{1}\times F_{2}$
and $x_{1}+x_{2}=y$. Thus $F_{1}+F_{2}={\mathbb T}$ and
we are done.
\end{proof}
\begin{theorem} Let us work in the complete metric space
$({\mathcal G},d_{2})$ defined in Lemma~\ref{L;Kronecker pairs}.
Quasi-all elements
of $({\cal G},d_{2})$ are pairs of Kronecker sets.
\end{theorem}
\begin{proof}
Let $f_{1}$, $f_{2}$, \ldots be a countable
dense subset of the set $S({\mathbb T})$ defined in
Exercise~\ref{E;dense modulus}. Let ${\mathcal U}_{q,n,m}$
be the set of $(E_{1},E_{2})\in{\mathcal G}$ such that there exists
an $N$ with
\[|f_{n}(t)-\chi_{N}(t)|<1/m
\ \text{for all $t\in E_{q}$}\]
with $q=1,\,2$ and $n,\,m\geq 1$.
The proof that ${\mathcal U}_{q,n,m}$ is open
in ${\mathcal G}$ follows the similar proof in
Lemma~\ref{L;quasi Kronecker}. We now
show that ${\mathcal U}_{q,n,m}$ is dense.
By symmetry it suffices to look at ${\mathcal U}_{1,n,m}$.
Suppose, therefore, that we are given $(E_{1},E_{2})\in{\cal G}$
and $\epsilon>0$. Set
\[\tilde{E}_{j}=E_{j}+[-\epsilon/4,\epsilon/4]
=\{e+x\,:\,e\in E_{j},\ |x|\leq\epsilon/4\}\]
Then $(\tilde{E}_{1},\tilde{E}_{2})\in{\mathcal G}$
and the following results hold.
(i) $d_{2}\big((E_{1},E_{2}),
(\tilde{E}_{1},\tilde{E}_{2})\big)\leq \epsilon/2$.
(ii) Each $e\in \tilde{E}_{1}$
belongs to some closed interval $I$ of length at least $\epsilon/4$
lying entirely within $\tilde{E}_{1}$.
(iii) If $F$ is a compact subset of ${\mathbb T}$ with Hausdorff
distance $d(F,E_{1})\leq\epsilon/4$, then
$F+\tilde{E}_{2}={\mathbb T}$.
By the uniform continuity of
$f_{j}$ and the intermediate value theorem, any sufficiently
large $M$ will have the property that the equation
$\chi_{M}(t)=f_{j}(t)$ has at least one solution in any closed
interval $I$ of length $\epsilon/4$. Choosing such an $M$ and
setting
\[F_{1}=\{t\in \tilde{E}_{1}:\chi_{M}(t)=f_{j}(t)\},
\ F_{2}=\tilde{E}_{2},\]
we see, using (iii),
that $(F_{1},F_{2})\in {\mathcal U}_{1,n,m}$ and
$d_{2}\big((\tilde{E}_{1},\tilde{E}_{2}),(F_{1},F_{2})\big)\leq \epsilon/4$
so that
$d_{2}\big((E_{1},E_{2}),(F_{1},F_{2})\big)\leq 3\epsilon/4$.
The rest of the proof runs on standard lines.
\end{proof}
\section{Besicovitch Sets} A Besicovitch set is a compact
subset $E$ of ${\mathbb R}^{2}$ of Lebesgue measure zero
containing line segments of length $1$
in every direction.
(Formally, if ${\mathbf u}$
is a unit vector, there exists an ${\mathbf x}$ such that
${\mathbf x}+\lambda{\mathbf u}\in E$ for all $0\leq \lambda\leq 1$.)
The first example of such a set was given by
Besicovitch~\cite{Besicovitch} and several
constructions appear in the literature.
(The construction we give here was inspired by
the one given in~\cite{Kahane 2}.)
Clearly we can construct Besicovitch sets from
compact sets of Lebesgue measure zero
containing line segments of length $1$
in each direction making an angle less than
$\pi/4$ with some fixed direction.
We shall show the existence of such sets by a category argument.
In what follows ${\mathcal E}$
will be the collection of compact subsets of
${\mathbb R}^{2}$ and
$d_{\mathcal E}$ the usual Hausdorff metric on
${\mathcal E}$.
\begin{definition}\label{D;Well}
We take ${\mathcal P}$ to be the collection
of all closed subsets $P$ of the rectangle
$[-2,2]\times[0,1]$ with the following properties
(i) $P$ is the union of line segments joining
points of the form $(x_{1},0)$ to points of the
form $(x_{2},1)$ with $x_{1},\, x_{2}\in [-2,2]$.
(ii) If $|v|\leq 1$, then we can find $x_{1},\, x_{2}\in [-2,2]$
with $x_{2}-x_{1}=v$ and
such that the line segment
joining $(x_{1},0)$ to $(x_{2},1)$ lies in $P$.
\end{definition}
\begin{lemma}\label{L;Besicovitch closed}
${\mathcal P}$ is a
non-empty closed subset of $({\mathcal E},d_{\mathcal E})$
and so $({\mathcal P},d_{\mathcal E})$ is complete
and non-empty.
\end{lemma}
\begin{proof}
Suppose $P_{n}\in{\mathcal P}$, $E\in{\mathcal E}$
and $d_{\mathcal E}(P_{n},E)\rightarrow 0$. We first show that
$E$ satisfies property~(i) in Definition~\ref{D;Well}.
To this end, suppose that ${\mathbf k}\in E$. By definition,
we can find ${\mathbf p}_{n}\in P_{n}$ with
$\|{\mathbf p}_{n}-{\mathbf k}\|\rightarrow 0$
as $n\rightarrow \infty$. Since $P_{n}$ has property~(i),
we can find $x_{1,n},\, x_{2,n}\in [-2,2]$ such that
the line segment $l_{n}$ joining
$(x_{1,n},0)$ to $(x_{2,n},1)$ contains ${\mathbf p}_{n}$.
By the compactness of $[-2,2]^{2}$, we can find a integer
sequence $n(j)\rightarrow\infty$ and
$x_{1},\, x_{2}\in [-1,1]$ such that $x_{1,n(j)}\rightarrow x_{1}$
and $x_{2,n(j)}\rightarrow x_{2}$ as $j\rightarrow\infty$.
If we denote the line segment joining
$(x_{1},0)$ to $(x_{2},0)$ by $l$, then
$d_{\mathcal E}(l_{n(j)},l)\rightarrow 0$
as $j\rightarrow \infty$. It follows that $l\subseteq E$
and ${\mathbf k}\in l$. We have established that $E$ has
property~(i).
To see that $E$ has property~(ii) choose $|v|\leq 1$.
Since $P_{n}$ has property~(ii),
we can find $x_{1,n},\, x_{2,n}\in [-2,2]$ such that
$x_{2,n}-x_{1,n}=v$ and
the line segment $l_{n}$ joining
$(x_{1,n},0)$ to $(x_{2,n},1)$ lies in $P$.
By the compactness of $[-2,2]^{2}$, we can find a integer
sequence $n(j)\rightarrow\infty$ and
$x_{1},\, x_{2}\in [-1,1]$ such that $x_{1,n(j)}\rightarrow x_{1}$
and $x_{2,n(j)}\rightarrow x_{2}$ as $j\rightarrow\infty$.
Automatically $x_{2}-x_{1}=v$
If we denote the line segment joining
$(x_{1},0)$ to $(x_{2},0)$ by $l$, then
$d_{\mathcal E}(l_{n(j)},l)\rightarrow 0$
as $j\rightarrow \infty$. It follows that $l\subseteq E$.
To see that ${\mathcal P}$ is non-empty observe that
$[-2,2]\times[0,1]\in{\mathcal E}$.
\end{proof}
\begin{theorem}\label{T;Besicovitch} If we work in
the complete metric space $({\mathcal P},d_{\mathcal E})$,
then quasi-all $P\in{\mathcal P}$ have
Lebesgue measure zero.
\end{theorem}
The path from Theorem~ref{T;Besicovitch} is completely standard.
Baire's category theorem tells us that if quasi-all
sets have a property at least one does so
there exists a set $P_{0}\in {\mathcal P}$
of Lebesgue measure zero. By part~(ii) of
Definition~\ref{L;Besicovitch closed},
$P_{0}$ contains line segments of length at least $1$ in every
direction making an angle of absolute value less than or equal
to $\pi/4$ with the $y$ axis. If we take the union of
of $P_{0}$ with a copy of $P_{0}$ rotated though $\pi/2$
the result will be a Besicovitch set.
\begin{theorem}\label{T;short Besicovitch} There exists a closed
bounded set of Lebesgue measure containing lines of length
at least $1$ in every direction.
\end{theorem}
The key to our proof of Theorem~\ref{T;Besicovitch} is
the following lemma.
\begin{lemma}\label{L;Simplest Besicovitch}
If $u\in [0,1]$ and $\epsilon>0$,
write ${\mathcal P}(u,\epsilon)$ for the set of
$P\in {\mathcal P}$ with the following property.
There exists an $N$ and $\kappa>0$
(both depending on $\epsilon$ and $u$)
such that whenever
$y\in[0,1]\cap[u-\epsilon,u+\epsilon]$,
we can find $N$ intervals of total length
less than $100\epsilon-\kappa$
covering the set
\[\{x\in[-1,1]\,:\,(x,y)\in P\}.\]
Then ${\mathcal P}(u,\epsilon)$
is open and dense
in $({\mathcal P},d_{\mathcal E})$.
\end{lemma}
\begin{proof}
It is easy to check that ${\mathcal P}(u,\epsilon)$
is open. Suppose that $P\in{\mathcal P}(u,\epsilon)$.
By definition, we can find $N$ and $\kappa>0$
(both depending on $\epsilon$ and $u$)
such that, whenever
$y\in[0,1]\cap[u-\epsilon,u+\epsilon]$,
we can find $N$ intervals of total length
less than $100\epsilon-\kappa$
covering the set
\[\{x\in[-1,1]\,:\,(x,y)\in P\}.\]
If we choose $\eta>0$ so that $2N\eta<\kappa/2$, then
writing $\kappa'=\kappa/2$ we see that, if
$P'\in{\mathcal P}$ and $d(P,P')<\eta$,
then, whenever
$y\in[0,1]\cap[u-\epsilon,u+\epsilon]$,
we can find $N$ intervals of total length
less than $100\epsilon-\kappa'$
covering the set
\[\{x\in[-1,1]\,:\,(x,y)\in P'\}.\]
(Informally, if we used intervals $[a_{r},b_{r}]$ for
the set $P$, we use interval $[a_{r}-\eta,b_{r+\eta}]$ for
the set $P'$.) Thus $P'\in {\mathcal P}(u,\epsilon)$.
We need to show that ${\mathcal P}(u,\epsilon)$ is dense.
To this end, let us write $l(x,\theta)$ for the line segment
through $(x,u)$ which joins a point on the line
$y=0$ to a point on the line $y=1$ and
which is at angle $\theta$ to the $y$-axis.
We start with
a bit of technical tidying up. Observe
that, if $P\in {\mathcal P}$ and $1>\eta>0$, then writing
\begin{multline*}
P'=\bigcup\{l(x+\eta,\theta)\,:\,l(x,\theta)\subseteq P
\ \text{and}\ x\leq 0\}\\
\cup
\bigcup\{l(x-\eta,\theta)\,:\,l(x,\theta)\subseteq P
\ \text{and}\ x\geq 0\},
\end{multline*}
we have $P'\in{\mathcal P}$, $d(P,P')\leq\eta$
and $P'\subseteq[-1+\eta,1-\eta]\times[0,1]$.
Thus, to show that ${\mathcal P}(u,\epsilon)$ is dense,
it suffices to show that, given $\delta>0$, $\eta>0$ and
$P\in {\mathcal P}$ with
$P\subseteq[-1+\eta,1-\eta]\times[0,1]$,
we can find a $P'\in{\mathcal P}(y,\epsilon)$
with $d(P,P')<\delta$. To this end, note that
we can find a $\rho>0$ such that, writing
\[Q=\bigcup\{l(x,\phi)\,:\,|\phi-\theta|\leq\rho
\ \text{and}\ l(x,\theta)\subseteq P\},\]
we have $Q\in {\mathcal P}$ and $d(P,Q)<\delta/2$.
We observe that the set of open intervals $(\theta-\rho,\theta+\rho)$
with $l(x,\theta)\subseteq P$ is an open cover of $[-\pi/4,\pi/4]$
(by condition~(ii) of
Definition~\ref{D;Well}) and so, by compactness,
we can find $x_{1}$, $x_{2}$, \dots, $x_{M}$
and $\theta_{1}$, $\theta_{2}$, \dots, $\theta_{M}$
such that $l(x_{m},\theta_{m})\subseteq P$ for all $1\leq m\leq M$
and
\[\bigcup_{m=1}^{M}(\theta_{m}-\rho,\theta_{m}+\rho)
\supseteq [-\pi/4,\pi/4]\]
We can now find $\rho_{m}$ and $\rho'_{m}$ such that
$\rho\geq\rho_{m},\,\rho_{m}'>0$ for $1\leq m\leq M$,
\[\bigcup_{n=1}^{M}(\theta_{m}-\rho_{m}',\theta_{m}+\rho_{m}')
\supseteq [-\pi/4,\pi/4]
\ \text{and}\ \sum_{m=1}^{M}\rho_{m}+\rho_{m}'\leq \pi.\]
Setting
\[Q'=\bigcup_{m=1}^{M}\{l(x_{m},\phi)\,:\,
\phi\in(\theta_{m}-\rho_{m}',\theta_{m}+\rho_{m})\},\]
we observe that $Q'\subseteq Q$ and $Q'\in{\mathcal P}$.
A simple compactness argument shows that we can find
$\tilde{x}_{1}$, $\tilde{x}_{2}$, \dots, $\tilde{x}_{\widetilde{M}}$
and $\tilde{\theta}_{1}$,
$\tilde{\theta}_{2}$, \dots, $\tilde{\theta}_{\widetilde{M}}$
such that $l(\tilde{x}_{m},\tilde{\theta}_{m})\subseteq P$
for all $1\leq m\leq \widetilde{M}$ and, writing
\[Q''=\bigcup_{m=1}^{\widetilde{M}}l(\tilde{x}_{m},\tilde{\theta}_{m}),\]
we have $d_{\mathcal E}(P,Q'')\leq \delta/2$. If we now take
$P'=Q'\cup Q''$, then $P'\in{\mathcal P}$ and
$d_{\mathcal E}(P',P)<\delta$.
At this point it may be worth the reader's while
to sketch $P'$. If $1\geq y+\epsilon$ the set
\[P'\cap\{(x,v)\,:\,-1\leq x\leq 1,\ v\leq y\leq v+\epsilon\}\]
consists of a finite set of lines and a finite set of
triangles with vertices on on the line $y=v$ and
bases on the line $y=v+\epsilon$ of total length
less than $4\pi\epsilon$ (it is not necessary to make best estimates
here). But it is trivial that a triangle of base $K\epsilon$
intersects any line parallel to the base in a segment
of length at most $K\epsilon$, so we have shown
that $P'\in{\mathcal P}(y,\epsilon)$.
\end{proof}
Lemma~\ref{L;Simplest Besicovitch} gives us a slightly stronger version of.
Theorem~\ref{T;Besicovitch}.
\begin{theorem}~\label{T;better Besicovitch}
If we work in
the complete metric space $({\mathcal P},d_{\mathcal E})$,
then quasi-all $P\in{\mathcal P}$ have the property that
\[\{x\,:\,(x,y)\in E\}\ \text{has Lebesgue measure zero}\]
for all $x$.
\end{theorem}
\begin{proof}
Set
${\mathcal P}_{n}=\bigcap_{r=0}^{n}{\mathcal P}(r/n,1/n)$.
By the defining property of ${\mathcal P}(r/n,1/n)$,
we know that, if $P\in {\mathcal P}_{n}$, then
\[\{x\,:\,(x,y)\in P\}\ \text{has Lebesgue measure strictly less than
$100/n$}\]
for all $y\in [0,1]$. By Lemma~\ref{L;Simplest Besicovitch}
quasi-all $P$ lie in ${\mathcal P}_{n}$.
If follows that quasi-all $P$ lie in $\cap_{n=1}^{\infty}{\mathcal P}_{n}$
and so have the property that
\[\{x\,:\,(x,y)\in E\}\ \text{has Lebesgue measure zero}\]
for all $x$.
\end{proof}
Fubini's theorem shows that Theorem~\ref{T;better Besicovitch}
implies Theorem~\ref{T;Besicovitch}.
The following easy exercise shows why we needed a little care in our proof.
\begin{exercise} We use the standard Hausdorff metric
associated with ${\mathbb T}$.
Show that we can find finite sets
$F_{n}$ such that $d_{\mathcal E}(F_{n},{\mathbb T})\rightarrow\infty$.
What can we say about the Lebesgue measure of $F_{n}$ and ${\mathbb T}$?
\end{exercise}
I do not think the next exercise is very illuminating, but
the reader may wish to tackle it for completeness.
\begin{exercise} We work in ${\mathbb R}^{2}$.
Let ${\mathcal Q}$ to be the collection
of all closed subsets $Q$ of the disc centre ${\mathbf 0}$
radius $2$ with the following properties.
(i) $Q$ is the union of line segments of length
at least $1$ joining points on the boundary of the disc.
(ii) We can find a line segment of the type described in (i) in every direction.
\noindent Show that, for an appropriate complete metric space, quasi-all
elements of ${\mathcal Q}$ have Lebesgue measure $0$.
\end{exercise}
\section{Measures} For the remainder of the lectures we
shall be interested in measures, their supports and their
Fourier transforms. This section is not intended to be
complete, but merely
intended to establish notation
and to jog the reader's memory. Later, I shall
use results on measures which include several not mentioned here
We shall consider the space $M({\mathbb T})$
of Borel measures $\mu$ on ${\mathbb T}$.
From our point of view, the two
key properties of Borel measures are that,
if $\mu\in M({\mathbb T})$, then
$\int_{\mathbb T}f\,d\mu$
is defined for all $f\in C({\mathbb T})$ and, that, if
$\mu,\,\tau\in M({\mathbb T})$
satisfy
\[\int_{\mathbb T}f\,d\mu=\int_{\mathbb T}f\,d\tau\]
for all $f\in C({\mathbb T})$, then $\mu=\tau$.
We recall that
$M({\mathbb T})$ has a natural norm
\[\|\mu\|=\sup\left\{\left|\int_{\mathbb T}f\,d\mu\right|
\,:\,f\in C({\mathbb T}),\ \|f\|_{\infty}\leq 1\right\}.\]
Standard theorems tell us that the unit ball for this norm
is weakly compact, that is to say, that, given
$\mu_{n}\in M({\mathbb T})$ with $\|\mu_{n}\|\leq 1$,
we can find $n(j)\rightarrow\infty$ and $\mu\in M({\mathbb T})$
such that
\[\int_{\mathbb T}f\,d\mu_{n(j)}
\rightarrow \int_{\mathbb T}f\,d\mu\]
for all $f\in C({\mathbb T})$.
We say that a measure $\mu$ is positive if
$\int_{\mathbb T}f\,d\mu$ is real and positive
for all $f\in C({\mathbb T})$ with $f(t)$ real and positive
for all $t\in{\mathbb T}$. A probability measure is
a positive measure of norm $1$.
\begin{exercise} Show that the set of probability measures
is closed under the standard norm. Show that it is weakly compact.
\end{exercise}
Every measure $\mu$ has a support with the property
that it is the smallest
compact set $E$ such that
\[\int_{\mathbb T}f\,d\mu=0\]
for all $f\in C({\mathbb T})$ with $f(t)=0$ for all $t\in E$.
Recall that we write
$\chi_{n}=\exp(2\pi it)$. We define the $n$th Fourier coefficient
$\hat{\mu}(n)$ in the natural way by
\[\hat{\mu}(n)= \int_{\mathbb T}\chi_{-n}\,d\mu.\]
\begin{exercise} By using the fact that the trigonometric polynomials
are uniformly dense, or otherwise, prove the following results.
(i) If $\mu,\,\tau\in M({\mathbb T})$ and
$\hat{\mu}(n)=\hat{\tau}(n)$ for all $n$, then
$\mu=\tau$.
(ii) Suppose $\mu_{j}$ is in the unit ball of $M({\mathbb T})$
for all $j\geq 1$ and $\mu\in M({\mathbb T})$. Then
$\mu_{j}\rightarrow\mu$ weakly if and only if
$\hat{\mu_{j}}(n)\rightarrow\hat{\mu}(n)$ for all $n$.
\end{exercise}
Any two measures $\mu,\,\tau\in M({\mathbb T})$ can be convolved
to produce $\mu*\tau\in M({\mathbb T})$. Whichever definition
the reader uses, she should find it easy to deduce the
key facts that $\|\mu*\tau\|\leq\|\mu\|\|\tau\|$,
$\supp(\mu*\tau)\subseteq\supp\mu+\supp\tau$
and $\widehat{\mu*\tau}(n)=\hat{\mu}(n)\hat{\tau}(n)$.
The next exercise gives
practice in the kind of ideas we use.
\begin{exercise}\label{E;convolution}
This exercise gives one way of defining convolution
from scratch.
Recall that $\delta_{a}$ is the Dirac measure
defined by
\[\int_{\mathbb T}f\,d\delta_{a}=f(a)\]
for $f\in{\mathbb T}$.
(i) Verify
that $\delta_{a}$ is a probability measure. Observe
that $\hat{\delta}_{a}(n)=\chi_{n}(a)$.
(ii) Let $\lambda_{j}\in{\mathbb C}$ and suppose $a(1)$, $a(2)$,
\ldots, $a(n)$ are distinct points of ${\mathbb T}$.
If $\mu=\sum_{j=1}^{n}\lambda_{j}\delta_{a(j)}$, show that
\[\|\mu\|=\sum_{j=1}^{n}|\lambda_{j}|.\]
State with proof, necessary and sufficient conditions for
$\mu$ to be a positive measure and for $\mu$ to be a probability
measure.
(iii) Let $M_{F}({\mathbb T})$ be the set of measures
of the form given in~(ii). By considering
\[\sum_{r=0}^{n-1}\mu\big([r/n,(r+1)/n)\big)\delta_{r/n},\]
or otherwise, show that every $\mu\in M({\mathbb T})$
is the weak limit of a sequence of $\mu_{n}\in M_{F}({\mathbb T})$
with $d_{\mathcal E}(\supp\mu,\supp\mu_{n})\rightarrow 0$.
(iv) If $\mu=\sum_{j=1}^{n}\lambda_{j}\delta_{a(j)}$
and $\tau=\sum_{k=1}^{m}\mu_{k}\delta_{b(k)}$ we define
\[\mu*\tau=\sum_{j=1}^{n}\sum_{k=1}^{m}\lambda_{j}\mu_{k}\delta_{a_(j)+b(k)}.\]
(A very cautious reader will check that different representations
of $\mu$ and $\tau$ give the same $\mu*\tau$.)
Show that
\[\|\mu*\tau\|\leq\|\mu\|\|\tau\|,
\ \supp(\mu*\tau)\subseteq\supp\mu+\supp\tau
\ \text{and}\ \widehat{\mu*\tau}(n)=\hat{\mu}(n)\hat{\tau}(n).\]
(v) Suppose that $\mu_{m},\,\tau_{m}\in M_{F}({\mathbb T})$,
$\mu,\,\tau\in M({\mathbb T})$ and $\mu_{m}\rightarrow\mu$,
$\tau_{m}\rightarrow\tau$ weakly. Show that we can find
$m(j)\rightarrow\infty$ and $\sigma\in M({\mathbb T})$
such that $\mu_{m(j)}*\tau_{m(j)}\rightarrow\sigma$ weakly.
Show now that $\mu_{m}*\tau_{m}\rightarrow\sigma$ weakly.
If $\mu_{m}',\,\tau_{m}'\in M_{F}({\mathbb T})$,
and $\mu_{m}'\rightarrow\mu$,
$\tau_{m}'\rightarrow\tau$ weakly, show that
$\mu_{m}'*\tau_{m}'\rightarrow\sigma$ weakly.
We can thus define $\sigma=\tau*\sigma$ unambiguously.
(vi) Show that, if $\mu,\,\tau\in M({\mathbb T})$,
then
\[\|\mu*\tau\|\leq\|\mu\|\|\tau\|,
\ \supp(\mu*\tau)\subseteq\supp\mu+\supp\tau
\ \text{and}\ \widehat{\mu*\tau}(n)=\hat{\mu}(n)\hat{\tau}(n).\]
Show also that, if $\mu$ and $\tau$ are positive, so is
$\mu*\tau$. By considering $\widehat{\mu*\tau}(0)$,
or otherwise, show that, if $\mu$ and $\tau$ are probability
measures, so is
$\mu*\tau$.
\end{exercise}
The central objects of study in these notes are
the relations between convolution,
supports and Fourier series of measures.
\section{A theorem of Rudin} We start with a simple
result which establishes a link between the algebraic properties
of the support of a measure $\mu$ and the speed with which
its Fourier coefficients $\hat{\mu}(n)$ can tend to zero
as $|n|\rightarrow\infty$.
\begin{lemma}\label{L;large support}
Suppose that $\mu$ is a non-zero measure on ${\mathbb T}$
and $q$ is a positive integer such that we can find
an $\alpha>1/q$ and an $A>0$ with
\[|\hat{\mu}(r)|\leq A|r|^{-\alpha}\]
for all $r\neq 0$. Then we can find
distinct points $x_{1},\,x_{2},\,\dots,\,x_{q}\in \supp \mu$
and $m_{j}\in{\mathbb Z}$, not all zero, such that
\[\sum_{j=1}^{q}m_{j}x_{j}=0.\]
\end{lemma}
\begin{proof}
Let $\mu_{q}=\mu*\mu*\dots*\mu$, the convolution of $\mu$ with itself
$q$ times. Then
\[|\hat{\mu}_{q}(r)|=|\hat{\mu}(r)|^{q}\leq A^{q}|r|^{-q\alpha}\]
for all $r\neq 0$. It follows that $\hat{\mu}_{q}\in l^{1}$
and so $d{\mu}_{q}(t)=f(t)\,dt$ for some continuous function
$f$. Thus (since
$\mu_{q}$ is non-zero) $\supp{\mu}_{q}$ contains a non-trivial
interval and so a non-zero rational number $y$ and so
we can find $y_{1},\,y_{2},\,\dots,\,y_{q}\in \supp \mu$
such that
\[\sum_{j=1}^{q}y_{j}=y.\]
Since we do not know that the $y_{j}$ are distinct,
we can only conclude that
there exists a $q'$ with $1\leq q'\leq q$,
distinct points $x_{1},\,x_{2},\,\dots,\,x_{q'}\in \supp \mu$
and non-zero $n_{j}\in{\mathbb Z}$ such that
\[\sum_{j=1}^{q'}n_{j}x_{j}=y.\]
If we take $n_{j}=0$ for $j>q'$, it now follows that
there are
distinct points $x_{1},\,x_{2},\,\dots,\,x_{q}\in \supp \mu$
and $n_{j}\in{\mathbb Z}$, not all zero, such that
\[\sum_{j=1}^{q}n_{j}x_{j}=y.\]
The stated result follows
if we choose a non-zero $M\in{\mathbb Z}$ such that $My=0$
and set $m_{j}=Mn_{j}$.
\end{proof}
In~\cite{Rudin},
Rudin proved the following famous result in the other direction.
\begin{theorem}\label{T;Rudin}
There exists a probability measure $\mu$
such that $\hat{\mu}(r)\rightarrow 0$ as $|r|\rightarrow\infty$,
but $\supp\mu$ is independent.
\end{theorem}
Our object is to prove the following quantitative version of
Rudin's result.
\begin{theorem}\label{T;fast Rudin}
Suppose that $\phi:{\mathbb N}\rightarrow{\mathbb R}$
is a sequence of strictly positive numbers with
$r^{\alpha}\phi(r)\rightarrow \infty$ as $r\rightarrow\infty$
whenever $\alpha>0$.
Then there exists a probability measure $\mu$
such that $\phi(|r|)\geq |\hat{\mu}(r)|$ for all $r\neq 0$,
but $\supp\mu$ is independent.
\end{theorem}
In view of Lemma~\ref{L;large support}, this result is best possible.
We prove Theorem~\ref{T;fast Rudin} by using a Baire category argument
but in order to do this we must first introduce an appropriate
metric space.
\begin{lemma}\label{L;get metric}
Let $\phi:{\mathbb N}\rightarrow {\mathbb R}$
be a bounded sequence of strictly positive numbers. The
following results hold.
(i) The space $\Lambda_{\phi}$
of sequences ${\mathbf a}:{\mathbb Z}\rightarrow {\mathbb C}$
with $\sup_{r\in{\mathbb Z}}\phi(|r|)^{-1}|a_{r}|$ finite
is a complete normed space under the norm
\[\|{\mathbf a}\|_{\phi}=\sup_{r\in{\mathbb Z}}\phi(|r|)^{-1}|a_{r}|.\]
(ii) Consider the space ${\mathcal P}_{\phi}$ consisting of ordered
pairs $(E,\mu)$ where $E$ is a compact subset of ${\mathbb T}$
and $\mu$ is a probability measure with $\supp\mu\subseteq E$
and $\sup_{r\in{\mathbb Z}}\phi(|r|)^{-1}|\hat{\mu}(r)|$ finite.
Then
\[d_{\phi}\big((E,\mu),(F,\sigma)\big)=d(E,F)+
\|\hat{\mu}-\hat{\sigma}\|_{\phi}\]
is a complete metric on ${\mathcal P}_{\phi}$.
(iii) If
\[{\mathcal G}_{\phi}=\{(E,\mu)\in{\mathcal P}_{\phi}\,:\,
\phi(|r|)^{-1}|\hat{\mu}(r)|\rightarrow 0
\ \text{as $|r|\rightarrow\infty$}\},\]
then ${\mathcal G}_{\phi}$ is a non-empty closed subset
of ${\mathcal P}_{\phi}$. Thus $({\mathcal G}_{\phi},d_{\phi})$
is a complete metric space.
\end{lemma}
\begin{proof} (i) The standard proof is left to the reader.
(ii) It is easy to check that $d_{\phi}$ is a metric on
${\mathcal P}_{\phi}$. To see that $d_{\phi}$ is complete,
suppose that $(E_{n},\mu_{n})$ is a Cauchy sequence.
Since $E_{j}$ is a Cauchy sequence in $({\mathcal E},d_{\mathcal E})$
we can find an $E\in{\mathcal E}$ such that
$d_{\mathcal E}(E_{n},E)\rightarrow 0$.
Since every sequence of probability measures contains a
weakly convergent subsequence we can find a probability measure
$\mu$ and a sequence $n(j)\rightarrow\infty$ such that
$\mu_{n(j)}\rightarrow\mu$ weakly. Since
$\hat{\mu}_{n(j)}(r)\rightarrow\hat{\mu}(r)$
for each $r$ and $\hat{\mu}_{n}(r)$ is a Cauchy sequence
in ${\mathbb R}$,
we have $\hat{\mu}_{j}(r)\rightarrow\hat{\mu}(r)$
for each $r$ and so $\mu_{n}\rightarrow\mu$ weakly.
If $\epsilon>0$ then
\[\supp\mu_{n}\subseteq E_{n}\subseteq E+[-\epsilon,\epsilon]\]
for all $n$ sufficiently large, so $\supp\mu\subseteq E$.
Finally, part~(i) (or a direct proof) shows that
$\hat{\mu}\in\Lambda_{\phi}$ and
$\|\hat{\mu}_{n}-\hat{\mu}\|_{\phi}\rightarrow 0$.
Thus $(E,\mu)\in{\mathcal P}_{\phi}$ and
$d_{\phi}\big((E_{n},\mu_{n}),(E,\mu)\big)\rightarrow 0$
as $n\rightarrow \infty$.
(iii) The standard proof is left to the reader.
\end{proof}
The next exercise may help explain why we defined
${\mathcal G}_{\phi}$ as we did.
\begin{exercise}\label{E;support club}
Let $\phi(n)=1$ for all $n$
(i) Given an example of a sequence
$(\mu_{n},E_{n})\in{\mathcal G}_{\phi}$ and a
$(\mu,E)\in{\mathcal G}_{\phi}$
such that
\[\supp\mu_{n}=E_{n},\ \text{and}\ (\mu_{n},E_{n})
\underset{d_{\phi}}{\rightarrow}(\mu,E)
\ \text{but}\ \supp\mu \neq E.\]
(ii) By considering sets of the form
\[{\mathcal F}_{n,r}
=\big\{(\mu,E)\in{\mathcal F}\,:\,E\cap[(r-1)/n,r/n]\neq\emptyset,
\ \mu\big(E\cap[(r-1)/n,r/n]\big)=0\big\},\]
or otherwise, show that quasi-all sets
$(\mu,E)$
in $({\mathcal G}_{\phi},d_{\phi})$ have the property
that $\supp\mu= E$.
\end{exercise}
\begin{exercise} (i) What can you say about ${\mathcal G}_{\phi}$
if $\sum_{n=1}^{\infty}\phi(n)$ converges?
(ii) For the rest of the question we suppose that
$n^{2}\phi(n)\rightarrow\infty$.
Let $f:{\mathbb T}\rightarrow{\mathbb R}$ be a three times
continuously differentiable positive function with
$\int_{\mathbb T}f(t)\,dt=1$ and let $\mu$ be the measure
defined by $d\mu(t)=f(t)\,dt$.
Show that $(\mu,\supp\mu)\in{\mathcal G}_{\phi}$.
(iii) Repeat Exercise~\ref{E;support club} for the $\phi$ of
part~(ii).
\end{exercise}
We can now state our Baire category version of
Theorem~\ref{T;fast Rudin}.
\begin{theorem}\label{T;Baire Rudin}
Suppose that $\phi:{\mathbb N}\rightarrow{\mathbb R}$
is a sequence of strictly positive numbers with
$r^{\alpha}\phi(r)\rightarrow \infty$ as $r\rightarrow\infty$
whenever $\alpha>0$.
Then quasi-all $(\mu,E)\in{\mathcal G}_{\phi}$ have the
property that $E$ is independent.
\end{theorem}
We obtain Theorem~\ref{T;Baire Rudin}
by studying the set ${\mathcal H}(q,p,{\mathbf m})$
defined as follows.
\begin{definition}\label{D;set definition}
Suppose that $\phi$ is as in Theorem~\ref{T;Baire Rudin},
$q$ and $p$ are positive integers and
${\mathbf m}=(m_{1},m_{2},\dots,m_{q})\in{\mathbb Z}^{q}$
with
\[M=\sum_{j=1}^{q}|m_{j}|\neq 0.\]
Then the
the set
${\mathcal H}(q,p,{\mathbf m})$ consists of those
$(E,\mu)\in{\mathcal G}_{\phi}$ such that
$\sum_{j=1}^{q}m_{j}x_{j}\neq 0$
whenever $x_{j}\in E$ and
$|x_{i}-x_{j}|\geq 1/p$ for $i\neq j$.
\end{definition}
Since the set of finite sequences of integers is countable,
Theorem~\ref{T;Baire Rudin} follows from
the following lemma.
\begin{lemma}~\label{L;Baire step} The
set ${\mathcal H}(q,p,{\mathbf m})$ is open and dense in
$({\mathcal G}_{\phi},d_{\phi})$.
\end{lemma}
We split the proof of Lemma~\ref{L;Baire step}
into two parts. The firs part follows a familiar pattern.
\begin{lemma}\label{L;Baire step one} The set
${\mathcal H}(q,p,{\mathbf m})$ is open.
\end{lemma}
\begin{proof} We show that the complement of
${\mathcal H}(q,p,{\mathbf m})$ is closed.
Suppose that
$(E_{r},\mu_{r})\notin {\mathcal H}(q,p,{\mathbf m})$
and $(E_{r},\mu_{r})\rightarrow (E,\mu)$.
We can find $x_{j}(r)\in E_{r}$ such that
$|x_{i}(r)-x_{j}(r)|\geq 1/p$ for $i\neq j$
and
\[\sum_{j=1}^{q}m_{j}x_{j}(r)=0.\]
By an appropriate form of the Bolzano--Weierstrass
theorem, we can find $x_{j}\in{\mathbb T}$
and $r(k)\rightarrow\infty$ such that
$x_{j}(r(k))\rightarrow x_{j}$ for each $1\leq j\leq q$.
Automatically, $|x_{i}-x_{j}|\geq 1/p$ for $i\neq j$
and
\[\sum_{j=1}^{q}m_{j}x_{j}=0.\]
Since $d_{\phi}(E_{r(k)},E)\rightarrow 0$ it follows that
$x_{j}\in E$ for $1\leq j\leq q$ and so
$(E,\mu)\notin {\mathcal H}(q,p,{\mathbf m})$
as required.
\end{proof}
The proof that ${\mathcal H}(q,p,{\mathbf m})$
is dense forms the meat of the proof. We shall use
the simple but powerful probabilistic ideas
developed in the next section.
\section{The poor man's central limit theorem} Every student
learns the statement and a few students learn the
proof of the central limit theorem.
\begin{theorem} If $X_{1}$, $X_{2}$, $\dots$ are independent
real valued random variables with mean $0$ and variance $1$,
then
\[\Pr\left(
\frac{X_{1}+X_{2}+\ldots+X_{n}}{n^{1/2}}\in [a,b]\right)
\rightarrow \frac{1}{2\pi}\int_{a}^{b}\exp(-t^{2}/2)\,dt
\]
as $n\rightarrow\infty$.
\end{theorem}
However, knowing the statement, or even the proof, of a theorem
is not the same as understanding it\footnote{The present
author knows for certain that he did not understand the
central theorem when he was a student. He strongly suspects that
he does not understand it now.}.
\begin{exercise}\label{E;draw exponential}
(i) Quickly sketch the graph of $\exp x$ that you usually draw.
(ii) Sketch the graph of $\exp x$ as $x$ runs from $-10$
to $10$ paying attention to the scales involved.
(iii) Sketch the graph of $\exp(-x^{2}/2)$ as $x$ runs from $-10$
to $10$ paying attention to the scales involved.
\end{exercise}
Exercise~\ref{E;draw exponential} reminds us that, if
$X$ is a random variable with a normal distribution
mean $0$ and variance $\sigma^{2}$, then
$\Pr(|X|\geq K\sigma)\rightarrow 0$ very rapidly
as $K\rightarrow\infty$.
Unfortunately, the central limit theorem, in the form
given above, is purely a limit theorem
and does not enable us to make statements
about
\[\Pr\left(
\left|\frac{X_{1}+X_{2}+\ldots+X_{n}}{n^{1/2}}\right|\right)>K)\]
for some specific $n$.
However, we can use an idea which, I believe goes back to
Bernstein to obtain a very useful substitute. We develop
the idea in one of its simplest forms.
\begin{lemma}\label{L;Bernstein}
(i) If $X$ is a real valued random variable
with $|X|\leq 1$ and ${\mathbb E}X=0$, then
\[{\mathbb E}\exp(tX)\leq\exp(t^{2}).\]
(ii) If $X_{1}$, $X_{2}$, \ldots, $X_{n}$ are independent
real valued random variables
with $|X_{j}|\leq 1$ and ${\mathbb E}X_{j}=0$, then
\[{\mathbb E}\exp\big(t\sum_{j=1}^{n}X_{j}\big)\leq\exp(nt^{2}).\]
(iii) If $X_{1}$, $X_{2}$, \ldots, $X_{n}$ are independent
real valued random variables
with $|X_{j}|\leq 1$ and ${\mathbb E}X_{j}=0$,
then
\[\Pr(X_{1}+X_{2}+\ldots+X_{n}\geq K)\leq \exp\big(-K^{2}n/4\big)\]
and
\[\Pr\big(|X_{1}+X_{2}+\ldots+X_{n}|\geq K\big)\leq 2\exp\big(-K^{2}n/4\big)\]
for all $K>0$.
(iii) If $Z_{1}$, $Z_{2}$, \dots, $Z_{n}$ are independent
complex valued random variables
with $|Z_{j}|\leq 1$ and ${\mathbb E}Z_{j}=0$,
then
\[\Pr\big(|Z_{1}+Z_{2}+\ldots+Z_{n}|\geq K\big)\leq 4\exp\big(-K^{2}n/8)\big)\]
for all $K>0$.
\end{lemma}
\begin{proof} (i) We consider two cases.
If $|t|\geq 1$, then
\[{\mathbb E}\exp(tX)\leq{\mathbb E}\exp|t|=\exp|t|\leq \exp(t^{2}).\]
If $|t|\leq 1$, then
\begin{align*}
{\mathbb E}\exp(tX)&={\mathbb E}\sum_{m=0}^{\infty}\frac{(tX)^{m}}{m!}
=\sum_{m=0}^{\infty}\frac{t^{m}{\mathbb E} X^{m}}{m!}\\
&=1+\sum_{m=2}^{\infty}\frac{t^{m}{\mathbb E} X^{m}}{m!}
\leq 1+\sum_{m=2}^{\infty}\frac{|t|^{m}}{m!}\\
&\leq 1+t^{2}\sum_{m=2}^{\infty}\frac{1}{m!}\leq 1+t^{2}\leq \exp(t^{2})
\end{align*}
and we are done.
(ii) Since independent expectations multiply,
\[{\mathbb E}\exp\big(t\sum_{j=1}^{n}X_{j}\big)
={\mathbb E}\prod_{j=1}^{n}\exp(tX_{j})
=\prod_{j=1}^{n}{\mathbb E}\exp(tX_{j})\leq\exp(nt^{2}).\]
(iii) Observe that
\[\Pr(X_{1}+X_{2}+\ldots+X_{n}\geq K)\exp (tK)
\leq{\mathbb E}\exp\big(t(X_{1}+X_{2}+\ldots+X_{n})\big)
=\exp(nt^{2})\]
and so
\[\Pr(X_{1}+X_{2}+\ldots+X_{n}\geq K)\leq
\exp(nt^{2}-Kt)=\exp\big(n(t-K/2)^{2}-nK^{2}/4\big).\]
Setting $t=K/2$, we see that
\[\Pr(X_{1}+X_{2}+\ldots+X_{n}\geq K)\leq\exp(-nK^{2}/4).\]
Replacing $X_{j}$ by $-X_{j}$ we have
\[\Pr(X_{1}+X_{2}+\ldots+X_{n}\leq -K)\leq\exp(-nK^{2}/4),\]
so, using the last two displayed formula,
\[\Pr\big(|X_{1}+X_{2}+\ldots+X_{n}|\geq K\big)\leq 2\exp\big(-K^{2}n/4\big).\]
(iii) If we write $Z_{j}=X_{j}+iY_{j}$ with $X_{j}$ and $Y_{j}$
real, then $X_{j}$ and $Y_{j}$ satisfy the conditions of (ii).
Since
\[|\Re z|,\, |\Im z|\leq 2^{-1/2}K\Rightarrow|Z|\leq K\]
we have
\begin{align*}
\Pr\left(\left|\sum_{j=1}^{n}Z_{j}\right|\geq K\right)
&\leq \Pr\left(\left|\sum_{j=1}^{n}X_{j}\right|\geq 2^{-1/2}K\right)
+\Pr\left(\left|\sum_{j=1}^{n}Y_{j}\right|\geq 2^{-1/2}K\right)\\
&\leq 4\exp\big(-K^{2}n/8\big).
\end{align*}
as stated.
\end{proof}
The next lemma, which forms the main step in our
proof that ${\mathcal H}(q,p,{\mathbf m})$
is dense, gives a good example of how Lemma~\ref{L;Bernstein}
is used.
\begin{lemma}\label{L;Kaufman}
Let $q$ be a strictly positive integer and
let $m_{1}$, $m_{2}$, \dots, $m_{q}$ be non-zero integers.
Then, provided only that $n$ is large enough,
we can find distinct points $x_{1}$, $x_{2}$, \dots, $x_{n}$
with the following three properties.
(i) If we write $\mu=n^{-1}\sum_{u=1}^{n}\delta_{x_{u}}$,
we have
\[|\hat{\mu}(r)|\leq 8q^{1/2} n^{-1/2}(\log n)^{1/2}\]
for all $1\leq |r|\leq n^{4q}$.
(ii) If $j(1)$, $j(2)$, \dots $j(q)$ are distinct integers
with $1\leq j(k)\leq n$, then
\[\left|\sum_{k=1}^{q} m_{k}x_{j(k)}\right|\geq 8^{-1}n^{-q}.\]
\end{lemma}
\begin{proof} Consider the independent random variables $Y_{u}$ where
each $Y_{u}$ is uniformly distributed over ${\mathbb T}$.
We look at the random measure
\[\sigma=n^{-1}\sum_{u=1}^{n}\delta_{Y_{u}}.\]
We note that
\[\hat{\sigma}(r)=n^{-1}\sum_{u=1}^{n}\exp(2\pi irY_{u}).\]
If $r\neq 0$, we see that the $\exp(2\pi irY_{u})$
are are independent identically distributed complex valued
random variables with
\[|\exp(2\pi irY_{u})|=1\ \text{and}
\ {\mathbb E}\exp(2\pi irY_{u})=0.\]
Thus, by
Lemma~\ref{L;Bernstein}
with $K=8q^{1/2}n^{1/2}(\log n)^{1/2}$,
\begin{align*}
\Pr\bigg(|\hat{\sigma}(r)|\geq 4q^{1/2} n^{-1/2}(\log n)^{1/2}\bigg)
&=\Pr\left(\left|\sum_{u=1}^{n}\exp(2\pi irY_{u})\right|
\geq 8q^{1/2} n^{1/2}(\log n)^{1/2}\right)\\
&\leq 4\exp(-8q\log n)=4n^{-8q}.
\end{align*}
Thus, provided only that $n$ is large enough,
\begin{align*}
\Pr\bigg(|\hat{\sigma}(r)|&\geq 8q^{1/2}n^{-1/2}(\log n)^{1/2}
\ \text{for some $1\leq |r|\leq n^{4q}$}\bigg)\\
&\leq\sum_{1\leq |r|\leq n^{4q}}\Pr\bigg(|\hat{\sigma}(r)|
\geq 4q^{1/2}n^{-1/2}(\log n)^{1/2}\bigg)\\
&\leq (2n^{4q}+1)4n^{-8q}\leq 1/4.
\end{align*}
Now suppose that $j(1)$, $j(2)$, \dots, $j(q)$ are distinct integers
with $1\leq j(k)\leq n$. By symmetry or direct calculation,
the random variable
\[\sum_{k=1}^{q} m_{k}Y_{j(k)}\]
is uniformly distributed and so
\[\Pr\left(\sum_{k=1}^{q} m_{k}Y_{j(k)}
\in [-8^{-1}n^{-q},8^{-1}n^{-q}]\right)=4^{-1}n^{-q}.\]
There are no more than $n^{q}$ different $q$-tuples
$j(1)$, $j(2)$, \dots, $j(q)$ of the type discussed,
so, by the same kind of argument as we used in the previous
paragraph, the probability that
\[ \sum_{k=1}^{q} m_{k}Y_{j(k)}
\in [-8^{-1}n^{-q},8^{-1}n^{-q}]\]
for any such $q$-tuple is no more than $1/4$.
Combining the results of our last two paragraphs, we see that,
provided $n$ is large enough,
the probability that $x_{j}=Y_{j}$ will fail to satisfy
the conditions of our lemma is at most $1/2$.
Since there must be an instance of any event with
positive probability, the required result follows.
\end{proof}
\section{Completion of the construction}
The process by which we move from Lemma~\ref{L;Kaufman}
to showing that ${\mathcal H}(q,p,{\mathbf m})$
is dense looks complicated but is not. I suggest the reader concentrates
on the ideas rather than the computations.
The next exercise merely serves to establish notation.
\begin{exercise}\label{E;approximate unit}
Let $K:{\mathbb R}\rightarrow{\mathbb R}$
be an infinitely differentiable function
with the following properties.
(i$'$) $K(x)\geq 0$ for all $x\in{\mathbb R}$.
(ii$'$) $\int_{\mathbb R}K(x)\,dx=1$.
(iii$'$) $K(x)=0$ for $|x|\geq 1/4$.
If $N$ is a positive
integer and we define $K_{N}:{\mathbb T}\rightarrow{\mathbb R}$
by
\[K_{N}(t)=
\begin{cases}
NK(Nt)&\text{if $|t|\leq 1/(4N)$,}\\
0&\text{otherwise,}
\end{cases}
\]
then $K_{N}$ is an infinitely differentiable function
having the following properties.
(i) $K_{N}(t)\geq 0$ for all $t\in{\mathbb T}$.
(ii) $\int_{\mathbb T}K_{N}(t)\,dt=1$.
(iii) $K_{N}(t)=0$ for $|t|\geq 1/(4N)$.
(iv) $|\hat{K}_{N}(r)|\leq 1$ for all $r$.
(v) There exists a constant
$A$, independent of $N$, such that
$|\hat{K}_{N}(r)|\leq A(N/r)^{2}$ for all $r\neq 0$.
\end{exercise}
We now `spread out' the measure of Lemma~\ref{L;Kaufman}
to obtain the measure used in our construction.
\begin{lemma}\label{L;main step 1}
Suppose that $\psi:{\mathbb N}\rightarrow{\mathbb R}$
is a sequence of positive numbers
such that
$\psi(r)\rightarrow\infty$
as $r\rightarrow\infty$.
Suppose that $q$ is a positive integer,
$\epsilon,\,\delta>0$ and
${\mathbf m}=(m_{1},m_{2},\dots,m_{q})\in{\mathbb Z}^{q}$
with $M=\sum_{k=1}^{q}|m_{k}|\neq 0$.
Then we can find an infinitely
differentiable function $f:{\mathbb T}\rightarrow{\mathbb R}$
with the following properties.
(i) $f(t)\geq 0$ for all $t$.
(ii) $\int_{\mathbb T}f(t)\,dt=1$.
(iii) $|\hat{f}(r)|\leq \epsilon
|r|^{-1/(2q)}(\log(1+|r|))^{1/2}\psi(|r|)$ for all
$r\neq 0$.
(iv) If $t_{k}\in\supp f$ for $1\leq k\leq q$
and $|t_{k}-t_{l}|\geq\delta$ for $1\leq kN(n)$ and $N(n)\geq \delta^{-1}$.
If $t_{k}\in\supp f$ for $1\leq k\leq q$
and $|t_{k}-t_{l}|\geq\delta$ for $1\leq k0
\end{align*}
and condition~(iv) follows.
We bound $|\hat{f}(r)|$ using condition~(i) of
Lemma~\ref{L;Kaufman},
Exercise~\ref{E;approximate unit} and the trivial bounds
$|\hat{f}(r)|,|\hat{\mu}(r)|\leq 1$. If $1\leq |r|\leq N(n)$,
then
\[|\hat{f}(r)|\leq |\hat{\mu}(r)|\leq 4q^{1/2} n^{-1/2}(\log n)^{1/2}
\leq C_{1}N(n)^{-1/(2q)}(\log N(n))^{1/2}\]
for some constant $C_{1}$ independent
of $n$. If $N(n)\leq |r|\leq n^{4q}$, then
\begin{align*}
|\hat{f}(r)|&\leq |\hat{\mu}(r)|
|\hat{K}_{N(n)}(r)|\leq \big(4q ^{1/2}n^{-1/2}(\log n)^{1/2}\big)
\big(A(N(n)/r)^{2}\big)\\
&\leq C_{2}N(n)^{-1/(2q)}(\log N(n))^{1/2}(N(n)/r)^{2}\\
&=C_{2}(N(n)/r)^{2-1/(2q)}r^{-(1/2q)}(\log N(n))^{1/2}\\
&\leq C_{3}|r|^{-1/(2q)}(\log |r|)^{1/2}
\end{align*}
for some constants $C_{2}$ and $C_{3}$ independent of $n$.
If $|r|\geq n^{4q}$, then
\[|\hat{f}(r)|\leq |\hat{K}_{N(n)}(r)|\leq A(N(n)/r)^{2}
= A|r|^{-1}(N(n)^{2}/|r|)
\leq C_{4}|r|^{-1/(2q)}(\log |r|)^{1/2}\]
for some constant $C_{4}$ independent
of $n$.
Since $\psi(r)\rightarrow\infty$
as $r\rightarrow\infty$, it follows that,
provided only that $n$ is large enough,
\[|\hat{f}(r)|\leq \epsilon |r|^{-1/(2q)}(\log(1+|r|))^{1/2}\psi(|r|)\]
for all
$r\neq 0$ and condition~(iv) holds.
\end{proof}
We make a further observation.
\begin{lemma}\label{L;spread out}
Given $\epsilon>0$, we can find an $\eta>0$
such that, if $\mu$ is a probability measure with
$|\hat{\mu}(r)|\leq\eta$ for $r\neq 0$, we know that $\supp\mu$ intersects
every interval of length $\epsilon$.
\end{lemma}
\begin{proof} By translation, it suffices to show that
$\supp\mu$ intersects $(-\epsilon/2,\epsilon/2)$. Choose
an integer $N$ with $N\geq \epsilon^{-1}$. If $\supp\mu$
does not intersect $(-\epsilon/2,\epsilon/2)$, then
\begin{align*}
0=&\left|\int_{\mathbb T}K_{N}(t)\,d\mu(t)\right|
=\left|\sum_{r=-\infty}^{\infty}\hat{K}_{N}(-r)\hat{\mu}(r)\right|\\
&\geq |\hat{K}_{N}(0)||\hat{\mu}(0)|-
\sum_{r\neq 0}|\hat{K}_{N}(-r)\hat{\mu}(r)|
\geq 1-2\eta A_{2}N^{2}\sum_{r=1}^{\infty}r^{-2}
\end{align*}
which is impossible if $\eta$ is sufficiently small.
\end{proof}
\begin{exercise} Instead of using Lemma~\ref{L;spread out},
we could have added an extra condition to
Lemma~\ref{L;Kaufman}. We suppose that we are also
given some integer $Q\geq 1$.
(iii) We have
\[[u/Q,(u+1)/Q]\cap\{x_{1},\, x_{2},\, \ldots,\, x_{n}\}\neq\emptyset\]
for all integers $u$ with $0\leq u\leq Q-1$.
Show how to modify the proof of Lemma~\ref{L;Kaufman}
to add this condition. What condition does this addition
enable us to add to Lemma~\ref{L;main step 1}?
\end{exercise}
Our next lemma is another `spreading lemma' but rather simpler.
\begin{lemma}\label{L;smooth Baire}
Given $(E,\mu) \in {\mathcal G}_{\phi}$
and $\epsilon>0$, we can find an $(F,\sigma)\in {\mathcal G}_{\phi}$
with $d_{\phi}\big((E,\mu),(F,\sigma)\big)<\epsilon$
having the following properties.
(i) $d\sigma(x)=g(x)\,dm(x)$, where $g$ is infinitely differentiable
and $m$ is Lebesgue measure.
(ii) There exists an $\alpha>0$ such that, whenever $x\in F$,
we can find an interval $I=[y-\alpha,y+\alpha]$ with
$x\in I\subseteq F$.
\end{lemma}
\begin{proof} Choose $u_{n}:{\mathbb T}\rightarrow{\mathbb R}$
a non-negative, infinitely differentiable function, such that
$\supp u_{n}\subseteq [-1/n,1/n]$ and $\int_{\mathbb T}u_{n}(t)\,dt=1$.
Provided that $n$ is large enough, standard theorems show
that $g=u_{n}*\sigma$, $d\sigma(x)=g(x)\,dm(x)$,
and $F=E+[-1/n,1/n]$ satisfy the conclusions of the lemma.
\end{proof}
We also need the following calculation.
\begin{lemma}\label{L;standard calculation}
There exists a constant $A$ with the following property.
Suppose that $\omega:{\mathbb N}\rightarrow {\mathbb R}$
is a sequence of positive numbers with $\omega(0)=1$,
\[K^{-1}n^{-1}\leq \omega(n)\]
for all $n\neq 0$ and
\[K^{-1}\omega(n)\leq \omega(r)\leq K\omega(n)\]
for all $1\leq n\leq r\leq 2n$ and some constant $K>1$.
Suppose also that $f$ and $g$ are continuous functions with
$\hat{f}(0)=1$ and
\[|\hat{g}(r)|\leq Br^{-2}, |\hat{f}(r)|\leq C\omega(|r|)\]
for all $r\neq 0$. Then
\[|\widehat{f\times g}(r)-\hat{g}(r)|\leq ABCK\omega(r)\]
for all $r$.
\end{lemma}
\begin{proof} We have
\begin{align*}
|\widehat{f\times g}(r)-\hat{g}(r)|
&=\left|\sum_{u\neq 0}\hat{f}(r-u)\hat{g}(u)\right|
\leq \sum_{u\neq 0}|\hat{f}(r-u)\hat{g}(u)|\\
&\leq BC\sum_{u\neq 0}\frac{\omega(|r-u|)}{u^{2}}\\
&=BC\sum_{0<|u|\leq |r|/2}\frac{\omega(|r-u|)}{u^{2}}
+BC\sum_{|u|>r/2}\frac{\omega(|r-u|)}{u^{2}}\\
&\leq BC\sum_{|u|\leq |r|/2}\frac{K\omega(|r|)}{u^{2}}
+BC\sum_{|u|>|r|/2}\frac{1}{u^{2}}\\
&\leq A_{1}BCK\omega(|r|)+A_{2}BC\frac{1}{|r|}
\leq ABCK\omega(|r|)
\end{align*}
for appropriate constants $A_{1}$, $A_{2}$ and $A$
and all $r\neq 0$. A similar calculation works for $r=0$.
\end{proof}
We can now complete the proof of Lemma~\ref{L;Baire step}.
\begin{lemma}\label{L;Baire step two} The set
${\mathcal H}(q,p,{\mathbf m})$ is dense
in $({\mathcal G}_{\phi},d_{\phi})$.
\end{lemma}
\begin{proof} We wish to show that, given any
$\eta$ with $1/10>\eta>0$
and any $(E,\mu)\in {\mathcal G}_{\phi}$, we can find
an $(F,\sigma)\in {\mathcal H}(q,p,{\mathbf m})$ with
\[d_{\phi}\big((E,\mu),(F,\sigma)\big)<\eta.\]
In view of Lemma~\ref{L;smooth Baire}, we may suppose that
$d\mu(x)=g(x)\,dm(x)$ where $g$ is infinitely differentiable
and there exists an $\alpha>0$ such that
every point of $\supp g$ lies in an interval $I\subseteq \supp g$
of length at least $\alpha>0$.
Since $g$ is smooth, there exists a constant $B$ such that
\[|\hat{g}(r)|\leq B|r|^{-2}\]
for all $r\neq 0$.
Lemma~\ref{L;main step 1} tells us that,
if $\epsilon>0$, we can find an infinitely
differentiable function
$f_{\epsilon}:{\mathbb T}\rightarrow{\mathbb R}$
with the following properties.
(i) $f_{\epsilon}(t)\geq 0$ for all $t$.
(ii) $\int_{\mathbb T}f_{\epsilon}(t)\,dt=1$.
(iii) $|\hat{f}_{\epsilon}(r)|\leq \epsilon |r|^{-1/(4q)}$ for all
$r\neq 0$.
(iv) If $x_{j}\in\supp f_{\epsilon}$ for $1\leq j\leq q$,
and $|x_{j}-x_{k}|\geq 1/p$ for $1\leq j0$ is fixed,
$|\hat{g}_{\epsilon}(0)-\hat{g}(0)|\leq \gamma$ and
\[|\hat{g}_{\epsilon}(r)-\hat{g}(r)|\leq \gamma r^{-1/4q}\]
for all $r\neq 0$ and all sufficiently small $\epsilon$.
Since $r^{1/4q}\phi(r)\rightarrow\infty$
as $r\rightarrow\infty$, it follows that,
if $\beta>0$ is fixed with $1/10>\beta>0$,
\[\|\hat{g}_{\epsilon}-\hat{g}\|_{\phi}<\beta\]
for all $\epsilon$ sufficiently small.
In particular, we know that
\[|\hat{g}_{\epsilon}(0)-1|=|\hat{g}_{\epsilon}(0)-\hat{g}(0)|<\beta\]
for $\epsilon$ sufficiently small. Writing
$G_{\epsilon}=\hat{g}_{\epsilon}(0)^{-1}g_{\epsilon}$,
we have
\begin{align*}
\|\hat{G}_{\epsilon}-\hat{g}\|_{\phi}
&=\left\|\frac{\hat{g}_{\epsilon}}{\hat{g}(0)}
-\hat{g}\right\|_{\phi}\\
&\leq \|\hat{g}_{\epsilon}-\hat{g}\|_{\phi}
+\left(1-\frac{1}{\hat{g}(0)}\right)
(\|\hat{g}\|_{\phi}+\|\hat{g}_{\epsilon}-\hat{g}\|_{\phi})\\
&\leq \beta+2\beta(\|\hat{g}\|_{\phi}+\beta).
\end{align*}
It follows that $G_{\epsilon}\in {\mathcal G}_{\phi}$
and, provided only that $\beta$ (and so $\epsilon$)
is small enough,
\[\|\hat{G}_{\epsilon}-\hat{g}\|_{\phi}<\eta/2.\]
Thus, provided only that $\epsilon$ is small enough,
$F=E_{\epsilon}$ and
$d\sigma(x)=G_{\epsilon}(x)\,dm(x)$
satisfy the conclusions required by the first sentence of this proof.
\end{proof}
We have thus proved Theorem~\ref{T;Baire Rudin}
and so Theorem~\ref{T;fast Rudin}.
The reader should do as much or as little of the exercises
which conclude this section as she pleases. They will not be
referred to again.
\begin{exercise}\label{E;algebra 1}
Using the same kind of methods as we used
to establish Theorem~\ref{T;fast Rudin}, establish the following result.
If $q$ is an integer with $q\geq 1$, then,
given any $\alpha>1/(2q)$,
there exists a probability measure $\mu$
such that
\[|\hat{\mu}(r)|\leq |r|^{\alpha}\]
for all $r\neq 0$,
but,
given distinct points $x_{1},\,x_{2},\,\dots,\,x_{q}\in \supp\mu$,
the only solution to the equation
\[\sum_{j=1}^{q}m_{j}x_{j}=0\]
with $m_{j}\in{\mathbb Z}$ is the trivial solution
$m_{1}=m_{2}=\dots=m_{q}=0$.
\end{exercise}
Notice that there is a very big gap between the result of
Exercise~\ref{E;algebra 1} and the result of Lemma~\ref{L;large support}.
\begin{exercise} Consider the independent random variables $Y_{u}$
and the random measure
\[\sigma=n^{-1}\sum_{u=1}^{n}\delta_{Y_{u}}\]
introduced in Lemma~\ref{L;Kaufman}.
Show that, provided $n$ is large enough,
the probability that more than
$n^{1/2}(\log n)^{1/2}$ $n^{q}$ of different $q$-tuples
$j(1)$, $j(2)$, \dots, $j(q)$ satisfy
\[\sum_{k=1}^{q} m_{k}Y_{j(k)}
\in [n^{-q+1/2},n^{-q+1/2}]\tag{$\bigstar$}\]
is very small indeed. By removing one element $Y_{j(1)}$
corresponding to every $q$-tuple which satisfies $\bigstar$,
show that with high probability, the set ${Y_{1},\,Y_{2},\,\ldots,Y_{n}}$
contains a subset $\{W_{1},\,W_{2},\,\ldots,W_{v}\}$ with
$v\geq n-n^{1/2}(\log n)^{-1/2}$ with the following property.
If $j(1)$, $j(2)$, \dots, $j(q)$ are distinct integers
with $1\leq j(k)\leq v$ then
\[\sum_{k=1}^{q} m_{k}Y_{j(k)}\notin [n^{-q+1/2},n^{-q+1/2}].\]
Let $\tau=v^{-1}\sum_{u=1}^{v}\delta_{Y_{u}}$.
By comparing $\hat{\tau(r)}$
and $\hat{\sigma}(r)$ show that there exists an $A$ depending
only on $q$ such that, if $n$ is large enough, then,
with high probability
\[|\hat{\tau}(r)|\leq A n^{-1/2}(\log n)^{1/2}\]
for all $1\leq |r|\leq n^{4q}$.
Hence show that we can replace the condition $\alpha>1/(2q)$
in Exercise~\ref{E;algebra 1} by the condition
$\alpha>1/(2q+\tfrac{1}{2})$.
\end{exercise}
A substantially more complicated construction, given in~\cite{Extra Rudin},
shows that we can replace the condition $\alpha>1/(2q)$
in Exercise~\ref{E;algebra 1} by the condition $\alpha>1/(2q+1)$
but this still leaves a very large gap.
\section{Sets of uniqueness and multiplicity}
The contents of the next two sections are intended
to provide general background to our next results.
The reader who misses out these sections will
loose nothing except this background.
We are
used to the idea of studying the Fourier sum
\[\sum_{n=-\infty}^{\infty}\hat{f}(n)\chi_{n}\]
where $f$ is some appropriate function.
What happens if we study general trigonometric sums
\[\sum_{n=-\infty}^{\infty}a_{n}\chi_{n}\]
with $a_{n}\in{\mathbb C}$? One of the first
questions about such sums is the problem of uniqueness.
If
\[\sum_{n=-N}^{N}a_{n}\chi_{n}(t)\rightarrow 0\]
as $N\rightarrow\infty$ for all $t\in{\mathbb T}$,
does it follow that $a_{n}=0$ for all $n$?
\begin{exercise} (Easy.) Show that,
if $\sum_{n=-N}^{N}a_{n}\chi_{n}(t)\rightarrow 0$
as $N\rightarrow\infty$ for all $t\in{\mathbb T}$ then
$a_{n}\rightarrow 0$ as $|n|\rightarrow\infty$.
\end{exercise}
Riemann had the happy idea of considering the effect
of formally integrating twice to obtain
\[F(t)=A+Bt+\frac{a_{0}t^{2}}{2}
-\sum_{n=-\infty}^{\infty}\frac{a_{n}}{n^{2}}\chi_{n}(t).\]
\begin{exercise} (Easy.)
Suppose that $a_{n}\rightarrow 0$ as $|n|\rightarrow\infty$.
Explain why $F$ is a well defined continuous function.
\end{exercise}
When $\sum_{n=-N}^{N}a_{n}\chi_{n}(t)$ converges to a certain
value, we can recover that value by looking at the
`generalised second derivative'
\[\lim_{h\rightarrow 0}\frac{F(+h)-2F(t)+F(t-h)}{4h^{2}}.\]
\begin{exercise} If $f:{\mathbb R}\rightarrow{\mathbb R}$
is twice differentiable at $0$ with $f(0)=f'(0)=f''(0)=0$,
use the mean value theorem to show that
\[\frac{f(h)-2f(0)+f(-h)}{4h^{2}}\rightarrow 0\]
as $h\rightarrow 0$.
Deduce that if $g:{\mathbb R}\rightarrow{\mathbb R}$
is twice differentiable at $0$, then
\[\frac{g(h)-2g(0)+g(-h)}{4h^{2}}\rightarrow g''(0)\]
as $h\rightarrow 0$.
\end{exercise}
\begin{exercise} Suppose that $a_{n}\in{\mathbb C}$ and
$a_{n}\rightarrow 0$ as $|n|\rightarrow\infty$. If
\[F(t)=A+Bt+\frac{a_{0}t^{2}}{2}
-\sum_{n=-\infty}^{\infty}\frac{a_{n}}{n^{2}}\chi_{n}(t),\]
show that
\[\frac{F(x+h)-2F(x)+F(x-h)}{4h^{2}}
=a_{0}+\sum_{n\neq 0}a_{n}\chi_{n}(x)\left(\frac{\sin 2\pi nh}{nh}\right)^{2}.\]
\end{exercise}
Our next task, which will take a little time
is to prove the `Riemann summation' result given in the next lemma.
\begin{lemma}\label{L;Riemann summation} If
$\sum_{n=0}^{\infty}b_{n}$ converges then
\[b_{0}+\sum_{n=1}^{\infty}b_{n}\left(\frac{\sin nh}{nh}\right)^{2}
\rightarrow \sum_{n=0}^{\infty}b_{n}\]
as $h\rightarrow 0$.
\end{lemma}
\begin{exercise}\label{E;reverse Riemann} Deduce from
Lemma~\ref{L;Riemann summation} that,
if
\[\sum_{n=-N}^{N}a_{n}\chi_{n}(t)\rightarrow 0\]
as $N\rightarrow\infty$ for all $t\in{\mathbb T}$
and we set
\[F(t)=\frac{a_{0}t^{2}}{2}
-\sum_{n=-\infty}^{\infty}\frac{a_{n}}{n^{2}}\chi_{n}(t),\]
then
\[\frac{F(t+h)-2F(t)+F(t-h)}{4h^{2}}\rightarrow 0\]
as $h\rightarrow 0$ for all $t\in{\mathbb T}$
\end{exercise}
Part of the proof of Lemma~\ref{L;Riemann summation}
rests on ideas which are now familiar from
elementary functional analysis.
\begin{exercise}\label{E;Toeplitz}
(i) Suppose that $\gamma_{n}(h)\in{\mathbb C}$
satisfies the following two conditions.
(A) $\gamma_{n}(h)\rightarrow 0$ as $h\rightarrow 0$.
(B) There exists a $C$ such that
\[\sum_{n=0}^{\infty}|\gamma_{n}(h)|\leq C\]
for all $h$.
Then, if $t_{n}\rightarrow 0$ as $n\rightarrow\infty$, it follows
that
\[\sum_{n=0}^{\infty}\gamma_{n}(h)t_{n}\rightarrow 0\]
as $h\rightarrow 0$.
(ii) Suppose, in addition, that
(C) ${\displaystyle \sum_{n=1}^{\infty}\gamma_{n}(h)\rightarrow 1}$
as $h\rightarrow 0$.
Then, if $s_{n}\rightarrow t$ as $n\rightarrow\infty$, it follows
that
\[\sum_{n=0}^{\infty}\gamma_{n}(h)s_{n}\rightarrow t\]
as $h\rightarrow 0$.
\end{exercise}
\begin{proof}[Proof of Lemma~\ref{L;Riemann summation}]
If we write $s_{n}=\sum_{r=0}^{\infty}b_{r}$
\[\gamma_{0}(h)=1-\left(\frac{\sin 2\pi h}{h}\right)^{2},
\ \gamma_{n}(h)=\left(\frac{\sin 2\pi nh}{nh}\right)^{2}
-\left(\frac{\sin 2\pi (n+1)h}{(n+1)h}\right)^{2}\]
for $n\geq 1$, Abel summation (that is to say, summation by parts) yields
\[b_{0}+\sum_{n=1}^{\infty}b_{n}\left(\frac{2\pi\sin nh}{nh}\right)^{2}
=\sum_{n=1}^{\infty}\gamma_{0}(h)s_{n}.\]
We wish to estimate $\sum_{n=0}^{\infty}|\gamma_{n}(h)|$.
To this end, observe that, writing $u(t)=\big((\sin 2\pi t)/t\big)^{2}$,
we have
\[
u'(t)
=-2\frac{\sin 2\pi t}{t}\times\frac{2\pi t\cos 2\pi t-\sin 2\pi t}{t^{2}}\]
so $u'(t)\rightarrow 0$ as $t\rightarrow 0$ and
\[|u'(t)|\leq\frac{20}{t^{2}}\]
for $t\geq 1$.
Thus
\[\sum_{n=0}^{\infty}|\gamma_{n}(h)|
=\sum_{n=0}^{\infty}\left|\int_{nh}^{(n+1)h}u'(t)\,dt\right|
\leq \int_{0}^{\infty}|u'(t)|\,dt,\]
with the convergence of the integral guaranteed by the estimates
in the previous sentence.
Since $\sum_{n=0}^{\infty}\gamma_{n}(h)=1$ and $\gamma_{n}(h)\rightarrow 0$
as $h\rightarrow 0$ for each fixed $n$, Exercise~\ref{E;Toeplitz}
gives the required result.
\end{proof}
We combine the result of Lemma~\ref{E;reverse Riemann}
with a very neat result of Schwarz.
\begin{lemma}\label{L;Schwarz concave}
Let $f:[a,b]\rightarrow{\mathbb R}$ be continuous.
(i) Suppose that $f(a)=f(b)$ and
\[\limsup_{h\rightarrow 0}\frac{f(x+h)-2f(x)+f(x-h)}{4h^{2}}>0\]
for all $x\in (a,b)$. Then $f(x)\leq 0$ for all $x\in[a,b]$.
(ii) Suppose that $f(a)=f(b)$ and
\[\limsup_{h\rightarrow 0}\frac{f(x+h)-2f(x)+f(x-h)}{4h^{2}}\geq 0\]
for all $x\in (a,b)$. Then $f(x)\leq 0$ for all $x\in[a,b]$
(iii) Suppose that $f(a)=f(b)$ and
\[\frac{f(x+h)-2f(x)+f(x-h)}{4h^{2}}\rightarrow 0\]
as $h\rightarrow 0$
for all $x\in (a,b)$. Then $f(x)=0$ for all $x\in[a,b]$.
(iv) Suppose that
\[\frac{f(x+h)-2f(x)+f(x-h)}{4h^{2}}\rightarrow 0\]
as $h\rightarrow 0$
for all $x\in (a,b)$. Then there exist constants $A$ and $B$
such that $f(x)=Ax+B$ for all $x\in[a,b]$.
\end{lemma}
\begin{proof} (i) Since $f$ is continuous on the closed interval $[a,b]$,
it is bounded and attains its bounds. Suppose that $f$ attains
its maximum at $x_{0}$. If $x_{0}\in (a,b)$ then
\[\frac{f(x_{0}+h)-2f(x_{0})+f(x_{0}-h)}{4h^{2}}\leq 0\]
for all $x_{0}+h,\,x_{0}-h\in [a,b]$ so
\[\limsup_{h\rightarrow 0}
\frac{f(x_{0}+h)-2f(x_{0})+f(x_{0}-h)}{4h^{2}}>0.\]
Since this is excluded, $x_{0}\in\{a,b\}$ and
$f(x)\leq f(x_{0})=0$ for all $x\in[a,b]$.
(ii) If we set
$g(x)=f(x)-\epsilon\big(\big(x-(a+b)/2)^{2}-(b-a)^{2}/4\big)/2 $
with $\epsilon>0$, then $g(a)=g(b)=0$ and
\[\frac{g(x+h)-2g(x)+g(x-h)}{4h^{2}}=-\epsilon+
\frac{f(x+h)-2f(x)+f(x-h)}{4h^{2}},\]
so part~(i) tells us that $g(x)\leq 0$ for all $x\in [a,b]$.
Allowing $\epsilon\rightarrow 0$, we get $f(x)\leq 0$.
(iii) Apply part~(ii) to $f$ and $-f$.
(iv) Choose $A$ and $B$ so that, writing $g(x)=f(x)-A-Bx$,
we have $g(a)=g(b)=0$ and apply part~(iii) to the function $g$.
\end{proof}
Putting our results together, we obtain the following
uniqueness theorem.
\begin{theorem} If
\[\sum_{n=-N}^{N}a_{n}\chi_{n}(t)\rightarrow 0\]
as $N\rightarrow\infty$ for all $t\in{\mathbb T}$,
then $a_{n}=0$ for all $n$.
\end{theorem}
\begin{proof}
Consider the continuous function
\[F(t)=\frac{a_{0}t^{2}}{2}
-\sum_{n=-\infty}^{\infty}\frac{a_{n}}{n^{2}}\chi_{n}(t)\]
on the subset $[-\pi/4,3\pi/2]$
of ${\mathbb T}$.
By Exercise\ref{E;reverse Riemann},
\[\frac{F(t+h)-2F(t)+F(t-h)}{4h^{2}}\rightarrow 0\]
as $h\rightarrow 0$ for all $t\in{\mathbb T}$
so, by Lemma~\ref{L;Schwarz concave}~(iv), we can find $A$, $B$ such that
$F(t)=At +B$
and so
\[\sum_{n\neq 0}\frac{a_{n}}{n^{2}}\chi_{n}(t)
=A+Bt+\frac{a_{0}t^{2}}{2}\]
for all $-\pi/4\leq t\leq 3\pi/2$. Exactly the same argument
shows that we can find constants $A'$ and $B'$ such that
\[\sum_{n\neq 0}\frac{a_{n}}{n^{2}}\chi_{n}(t)
=A+Bt+\frac{a_{0}t^{2}}{2}\]
for $-3\pi/2\leq t\leq \pi/4$. For these statements to be consistent
we must have $A=A'=B=B'=0$, $a_{0}=0$ and
\[\sum_{n\neq 0}\frac{a_{n}}{n^{2}}\chi_{n}(t)=0\]
for all $t\in{\mathbb T}$.
By the uniqueness of Fourier coefficients for continuous functions,
we have $a_{n}=0$ for all $n$ and we are done.
\end{proof}
Cantor realised that this result could be extended.
\begin{definition} We say that a subset $E$ of ${\mathbb T}$
is of \emph{uniqueness} if
\[\sum_{n=-N}^{N}a_{n}\chi_{n}(t)\rightarrow 0\]
as $N\rightarrow\infty$ for all $t\notin E$
and $a_{n}\rightarrow 0$ as $|n|\rightarrow\infty$
implies $a_{n}=0$ for all $n$.
If $E$ is not a set of uniqueness we say that
$E$ is of multiplicity.
\end{definition}
Cantor showed that every finite set is of uniqueness,
every closed set with
a finite set of limit points is of uniqueness, every closed set with
whose set of limit points has a finite set of limit points
is of uniqueness and so on. This line of research led him
naturally in to the study of ordinals. Later it was shown that
every
countable closed set is of uniqueness and Young
showed that every countable set is of uniqueness.
It is not hard (once we understand Lebesgue measure) to
show that no set of strictly positive Lebesgue measure
can be of uniqueness and it must have been plausible
to suppose that all sets of Lebesgue measure zero would
turn out to be of uniqueness. Men\u{s}ov showed that
this is not the case.
To show this we need a version of the Riemann localisation
lemma.
\begin{lemma}\label{L;localise}
Suppose that $\mu$ is a measure such that
$\hat{\mu}(n)\rightarrow 0$ as $|n|\rightarrow\infty$ and
$f:{\mathbb T}\rightarrow{\mathbb C}$ is
an infinitely differentiable function.
If we set $d\nu(t)=f(t)\,d\mu(t)$ then
\[\left|f(t)\sum_{n=-N}^{N}\hat{\mu}(n)\chi_{n}(t)
-\sum_{n=-N}^{N}\hat{\nu}(n)\chi_{n}(t)\right|\rightarrow 0\]
as $N\rightarrow\infty$.
\end{lemma}
\begin{proof} Using the fact that $|\hat{f}(n)|\leq A(1+|n|)^{-4}$
to justify various interchanges of summation,
\begin{align*}
\left|f(t)\sum_{n=-N}^{N}\right.&\left.\hat{\mu}(n)\chi_{n}(t)
-\sum_{n=-N}^{N}\hat{\nu}(n)\chi_{n}(t)\right|\\
&=\left|\sum_{m=-\infty}^{\infty}\hat{f}(m)\chi_{m}(t)
\sum_{n=-N}^{N}\hat{\mu}(n)\chi_{n}(t)
-\sum_{n=-N}^{N}\sum_{q=-\infty}^{\infty}
\hat{\nu}(n-q)\hat{f}(q)\chi_{n}(t)\right|\\
&=\left|\sum_{(n,m)\in A(N)}-\sum_{(n,m)\in B(N)}
\hat{f}(m)\hat{\mu}(n)\chi_{m+n}(t)\right|\\
&\leq \sum_{A(N)\triangle B(N)}|\hat{f}(m)\hat{\mu}(n)|
\end{align*}
with
\[A(N)=\{(m,n)\,:\,m\in{\mathbb Z},\,|n|\leq N\}
\ \text{and}
\ B(N)=\{(m,n)\,:\,|m-n|\leq N\}.\]
Now observe that
\[\sum_{A(N)\triangle B(N)}(1+|n|)^{-3}
\leq 4\sum_{m=0}^{\infty}\sum_{n=m}^{\infty}(1+n)^{-3}
\leq C'\sum_{m=0}^{\infty}(1+m)^{-2}\leq C\]
for appropriate constants $C'$ and $C$.
so, if $M$ is fixed,
\begin{align*}
\left|f(t)\sum_{n=-N}^{N}\right.&\left.\hat{\mu}(n)\chi_{n}(t)
-\sum_{n=-N}^{N}\hat{\nu}(n)\chi_{n}(t)\right|\\
&\leq \sum_{A(N)\triangle B(N)}(1+|n|)^{-3}\sup_{|r|\geq M}|\hat{\mu}(r)|
+\sum_{|m|\leq M,|n-r|\geq N}(1+|n|)^{-3}\sup_{|r\in{\mathbb Z}}|\hat{\mu}(r)|\\
&\leq C\sup_{|r|\geq M}|\hat{\mu}(r)|
+\sum_{|m|\leq M,|n-r|\geq N}(1+|n|)^{-3}\sup_{|r|\in{\mathbb Z}}|\hat{\mu}(r)|\\
&\rightarrow C\sup_{|r|\geq M}|\hat{\mu}(r)|
\end{align*}
as $N\rightarrow\infty$. Allowing $M\rightarrow\infty$,
gives the required result.
\end{proof}
\begin{theorem} If $\mu$ is a non-zero measure with
$\hat{\mu}(n)\rightarrow 0$ as $|n|\rightarrow 0$
then $\supp\mu$ is a set of multiplicity.
\end{theorem}
Men\u{s}ov then exhibited a non-zero measure with support
on a compact set $E$ of Lebesgue measure zero thus
showing that there existed compact sets of multiplicity
with Lebesgue measure zero.
During the first half of the 20th century all
such constructions involved sets $E$ with many arithmetic
relations. Rudin's theorem (Theorem~\ref{T;Rudin})
showed that there was no simple arithmetic condition
which could characterise sets of multiplicity.
\section{Distributions}
Even if $a_{n}\rightarrow 0$
as $|n|\rightarrow\infty$,
it is not true
every trigonometric sum
\[\sum_{n=-\infty}^{\infty}a_{n}\chi_{n}\]
can be made to correspond to a measure. To
get round this problem classical analysts resorted
to a series of tricks which allowed them to act
as though the formal series was a measure.
Schwartz showed that these ideas could be linked
with other formal tricks from the study of partial
differential equations to give the theory of distributions.
If the reader is not familiar with that theory, she
may omit the rest of this section without
loss. If she is familiar with the theory,
she may be interested to see how the study
of sets of uniqueness fits in.
When we talk of a distribution we shall mean a member
of the dual ${\mathcal D}'({\mathbb T})$ of the
space ${\mathcal D}({\mathbb T})$ of infinitely
differentiable functions.
It is easy to check that if $a_{n}$ is bounded
then the relation
\[\langle S,f\rangle=\sum_{n=-\infty}^{\infty}a_{n}\hat{f}(n)\]
with $f\in{\mathcal D}({\mathbb T})$ defines an element
$S\in {\mathcal D}'({\mathbb T})$. It is natural
to define
\[\hat{S}(n)=a_{n}=\langle S,chi_{-n}\rangle.\]
We know that every distribution $T$ has a support
defined to be the smallest closed set $E$ with the
property that,
if the support of $f\in{\mathcal D}({\mathbb T})$
is disjoint from $E$ then $\langle T,f\rangle=0$.
The calculations which gave Lemma~\ref{L;localise}
go through unchanged to give us the following
characterisation of a closed set of multiplicity.
\begin{lemma} A compact set $E$ is of multiplicity
if and only if we can find a non-zero distribution $S$
with $\hat{S}(n)\rightarrow 0$ as $|n|\rightarrow\infty$
such that $\supp S\subseteq E$.
\end{lemma}
Let us see how this idea can be used.
\begin{lemma} (i) If $S$ is a non-zero distribution
whose support is a single point then $\hat{S}(n)\nrightarrow 0$
as $n\rightarrow\infty$.
(ii) If $S$ is distribution with
$\hat{S}(n)\rightarrow 0$
as $n\rightarrow\infty$ then the support of $S$
can not contain an isolated point.
(iii) A countable closed set must be of uniqueness.
\end{lemma}
\begin{proof} (i) If $S$ is a distribution with support $a$, then
it can be shown that
\[S=\sum_{r=0}^{m}\lambda_{r}\delta_{a}^{(r)}\]
with not all the $\lambda_{r}$ zero. Thus
\[\hat{S}(n)=\sum_{r=0}^{m}\lambda_{r}(-in)^{r}\chi(a)\nrightarrow 0\]
as $|n|\rightarrow 0$.
(ii) Suppose that the support of $S$ contains an isolated
point $a$. We can find an $f\in{\mathcal D}({\mathbb T})$
such that $f=1$ in a neighbourhood of $a$ and
$\supp f\cap\supp S={a}$. Automatically, $T$ is
a non-zero distribution and
\[\hat{T}(r)=\sum_{k=-\infty}^{\infty}\hat{f}(k)\hat{S}(k+r)
\rightarrow 0\]
as $|r|\rightarrow\infty$. This contradicts part~(i) so
the result follows by reductio ad absurdum.
(iii) If $E$ is a countable closed set, $T$ a distribution,
$E\supseteq \supp T$
and $T\neq 0$ then $\supp T$ is a non-empty countable closed set
and so contains an isolated point. By part~(ii),
$\hat{T}(n)\nrightarrow 0$ as $n\rightarrow\infty$.
\end{proof}
\section{Debs and Saint-Reymond} Just as it was possible to
hope that closed
sets of multiplicity might be characterised by arithmetic properties,
so one might hope that they could be characterised by
`metric properties' such as Hausdorff $h$-measure.
\begin{definition}\label{D;Hausdorff 1}
Let $h:[0,\infty)\rightarrow{\mathbb R}$
be a strictly increasing function.
We say that a set $E$ has \emph{Hausdorff h-measure zero}
if, given any $\epsilon>0$, we can find a sequence $I_{j}$
of intervals of length $|I_{j}|$ such that
\[\sum_{j=1}^{\infty}h(|I_{j}|)<\epsilon
\ \text{but}
\ \bigcup_{j=1}^{\infty}I_{j}\supseteq E.\]
\end{definition}
However, Lusin, who seems to had a remarkable instinct
in such matters, conjectured that every complement of
a set of first Baire category would turn out to be a set
of multiplicity.
That this is the case is shown by the famous theorem of
Debs and Saint-Reymond.
\begin{theorem}\label{T;Debs} Let $B$ be a set of first category in
${\mathbb T}$.
Then we can find a probability measure $\mu$
with $\hat{\mu}(r)\rightarrow 0$ as $|r|\rightarrow\infty$
such that
\[\supp\mu\cap B=\emptyset.\]
\end{theorem}
As an indication of the power of this result we derive
a theorem of Iva{\v{s}}ev-Musatov~\cite{IV}.
\begin{theorem}\label{T;Musatov}
If $h:[0,\infty)\rightarrow{\mathbb R}$
is a strictly increasing continuous function
with $h(0)=0$, then we can find a probability measure $\mu$
with $\hat{\mu}(r)\rightarrow 0$ as $|r|\rightarrow\infty$
such that $\supp\mu$ has Hausdorff $h$-measure zero.
\end{theorem}
\begin{proof}
Enumerate the rationals as $y_{1}$, $y_{2}$,
$y_{3}$, \dots
and choose $\epsilon_{n}>0$ so that
$\sum_{n=1}^{\infty}h(2\epsilon_{n})$ converges.
Then
\[B=\bigcup_{j=1}^{\infty}\bigcap_{n=1}^{\infty}
\big({\mathbb T}\setminus(y_{n}-\epsilon_{n+j},y_{n}+\epsilon_{n+j})\big)\]
is a set of first category whose complement has Hausdorff
$h$-measure zero.
\end{proof}
We shall prove the following result which includes both
the theorem of Rudin and that of Debs and Saint Raymond
as corollaries. (A similar result to the one presented here
was obtained independently by Matheron and Zelen{\'y} in~\cite{MZ}.)
\begin{theorem}\label{T;master} Let $A_{q}$ be a set of first category in
${\mathbb T}^{q}$ $[q\geq 1]$ and let
\[A=\bigcup_{q=1}^{\infty}\{{\mathbf x}\in {\mathbb T}^{{\mathbb N}^{+}}
\,:\,(x_{1},x_{2},\dots,x_{q})\in A_{q}\}.\]
Then we can find a probability measure $\mu$
with $\hat{\mu}(r)\rightarrow 0$ as $|r|\rightarrow\infty$
such that, whenever $x_{1}$, $x_{2}$, \dots are distinct
points of $\supp\mu$, ${\mathbf x}\notin A$.
\end{theorem}
The following corollary makes the connection with Rudin's theorem
explicit.
\begin{theorem}\label{T;how} Let $B$ be a set of first category
in ${\mathbb T}$. Then we can find a probability measure $\mu$
with $\hat{\mu}(r)\rightarrow 0$ as $|r|\rightarrow\infty$
such that $\supp\mu$ is independent and
the subgroup $G$ of ${\mathbb T}$
generated by $\supp \mu$
satisfies
\[G\cap B\subseteq\{0\}.\]
\end{theorem}
Theorem~\ref{T;how} can be restated as follows. (Note that,
if $B$ is of first category, so is $B\cup\{0\}$, so there
is no loss of generality in supposing $0\in B$.)
\begin{theorem}\label{T;how now} Let $B$ be a set of first category
in ${\mathbb T}$. Then we can find a probability measure $\mu$
with $\hat{\mu}(r)\rightarrow 0$ as $|r|\rightarrow\infty$
such that whenever $q\geq 1$
and $x_{1},\,x_{2},\,\dots,\,x_{q}$
are distinct points of $\supp \mu$ and $m_{1},\,m_{2},\,\dots,\,m_{q}$
are integers not all zero, then
\[m_{1}x_{1}+m_{2}x_{2}+\dots+m_{q}x_{q}\notin B.\]
\end{theorem}
\begin{proof} Suppose that $K$ is a closed subset of ${\mathbb T}$
with empty interior, $q$ is an integer with $q\geq 1$
and $m_{1}$, $m_{2}$, \dots, $m_{q}$ are integers, not all zero.
Then
\[
\{{\mathbf x}\in{\mathbb T}^{q}\,:\,
m_{1}x_{1}+m_{2}x_{2}+\dots+m_{q}x_{q}\in K\}\]
is closed with empty interior.
Since the countable union of sets of the first category is
of the first category, the set $\tilde{K}$ of
${\mathbf x}\in{\mathbb T}^{q}$
such that the there exists a $q\geq 1$ and integers $m_{1}$,
$m_{2}$, \dots, $m_{q}$, not all zero, with
\[m_{1}x_{1}+m_{2}x_{2}+\dots+m_{q}x_{q}\in K\]
is of first category.
We now observe that $B$ is a subset of a countable union
of closed sets with empty interior, so,
since the countable union of sets of the first category
is of the first category, the set $A_{q}$ of
${\mathbf x}\in{\mathbb T}^{q}$
such that there exists a $q\geq 1$
and integers $m_{1}$,
$m_{2}$, \dots, $m_{q}$ not all zero with
\[m_{1}x_{1}+m_{2}x_{2}+\dots+m_{q}x_{q}\in B\]
is of first category in ${\mathbb T}^{q}$
and we may apply Theorem~\ref{T;master}.
\end{proof}
The following trivial remark explains why we
stated Theorem~\ref{T;master} in the way we did.
\begin{example} The set
\[A=\{{\mathbf x}\in{\mathbb T}^{4}\,:\,x_{1}+x_{2}=x_{3}+x_{4}\}\]
is closed and has empty interior. None the less, if $E$ is any
non-empty set in ${\mathbb T}$, we have
\[E^{4}\cap A\neq\emptyset.\]
\end{example}
However, we are interested in the statement that
\[x_{1}+x_{2}\neq x_{3}+x_{4}\]
whenever $x_{1}$, $x_{2}$, $x_{3}$ and $x_{4}$ are
\emph{distinct} points of $E$.
The original proof of their theorem by Debs and Saint Raymond
employed descriptive set theory and other sophisticated tools.
Later Kechris and Louveau produced a much
simpler proof,
If readers consider our proof of Theorem~\ref{T;master}
in the case when
\[A_{q}=\{{\mathbf x}\in{\mathbb T}^{q}\,:\,
x_{1}\in B\},\]
they will recover a lowbrow version of the the
proof of Kechris and Louveau.
\begin{definition} We
set
\[({\mathcal P},d_{\mathcal P})=({\mathcal G}_{\phi},d_{\phi})\]
where $\psi(n)=1$ for all $n$
and ${\mathcal G}_{\psi},d_{\psi})$ is defined as
in Lemma~\ref{L;get metric}.
\end{definition}
\begin{exercise} Write down the definition of
$({\mathcal P},d_{\mathcal P})$ explicitly
(ie without using Lemma~\ref{L;get metric}).
\end{exercise}
We can now state a Baire category version of Theorem~\ref{T;master}.
\begin{theorem}\label{T;Baire master}
Let $A_{q}$ be a set of first category in
${\mathbb T}^{q}$ $[q\geq 1]$ and let
\[A=\bigcup_{q=1}^{\infty}\{{\mathbf x}\in {\mathbb T}^{{\mathbb N}^{+}}
\,:\,(x_{1},x_{2},\dots,x_{q})\in A_{q}\}.\]
Consider the set ${\mathcal E}$ of $(E,\mu)\in{\mathcal P}$
such that, whenever $x_{1}$, $x_{2}$, \dots are distinct
points of $\supp\mu$, it follows that
${\mathbf x}\notin A$.
Then the complement of ${\mathcal E}$ is of first category
in $({\mathcal P},d_{\mathcal P})$.
\end{theorem}Theorem~\ref{T;Baire master} follows from a slightly simpler
result.
\begin{lemma}\label{L;Baire master one}
Suppose that $q$ is a strictly positive integer,
$\alpha>0$ and $K$ is a closed subset of ${\mathbb T}^{q}$
with empty interior. Consider the set ${\mathcal E}_{\alpha}$
of $(E,\mu)\in{\mathcal P}$
such that, whenever $x_{1},\,x_{2},\,\dots,\,x_{q}\in \supp\mu$
and $|x_{k}-x_{l}|\geq\alpha$ for $k\neq l$,
it follows that ${\mathbf x}\notin K$.
Then the complement of ${\mathcal E}_{\alpha}$ is closed with empty interior.
\end{lemma}
\begin{proof}[Proof of Theorem~\ref{T;Baire master}
from Lemma~\ref{L;Baire master one}] By standard Baire category arguments,
Theorem~\ref{T;Baire master} follows from the special case in which
\[A=\{{\mathbf x}\in{\mathbb T}^{{\mathbb N}^{+}}\,:
\,(x_{1},x_{2},\dots,x_{q})\in K\}\]
and $K$ is a closed set in ${\mathbb T}^{q}$ with empty interior.
By Lemma~\ref{L;Baire master one} we know that
the complement of ${\mathcal E}_{1/n}$
is closed with empty interior. Since
${\mathcal E}=\bigcap_{n=1}^{\infty}{\mathcal E}_{1/n}$,
it follows that the complement of ${\mathcal E}$ is of first category.
\end{proof}
A further slight simplification reduces our proof to
the following core lemma.
\begin{lemma}\label{L;center step}
Suppose that $q$ is a strictly positive integer,
$\alpha>0$ and $K$ is a closed subset of ${\mathbb T}^{q}$
with empty interior. Consider the set ${\mathcal E}$
of $(H,\rho)\in{\mathcal P}$
such that, whenever $x_{1},\,x_{2},\,\dots,\,x_{q}\in \supp\rho$
and $|x_{k}-x_{l}|\geq\alpha$ for $k\neq l$,
it follows that ${\mathbf x}\notin A$.
Then, given any $\epsilon>0$ and any $(E,\mu)\in{\mathcal P}$,
we can find an $(F,\sigma)\in{\mathcal E}$ with
$d_{\mathcal P}\big((E,\mu),(F,\sigma)\big)<\epsilon$.
\end{lemma}
\begin{proof}[Proof of Lemma~\ref{L;Baire master one}
from Lemma~\ref{L;center step}] This follows a familiar
pattern.
Lemma~\ref{L;center step} states that
the complement of ${\mathcal E}$ contains no non-empty
open set. Thus we need only show that the complement of
${\mathcal E}$ is closed. To this end, suppose that
$(E_{n},\mu_{n})\notin{\mathcal E}$ and
$(E_{n},\mu_{n})\underset{d_{\mathcal P}}{\rightarrow}(E,\mu)$
as $n\rightarrow\infty$.
By definition, we can find $x_{j}(n)\in E_{n}$ such that
$|x_{k}(n)-x_{l}(n)|\geq\alpha$ for $k\neq l$ and ${\mathbf x}(n)\in A$.
By applying the Theorem of Bolzano--Weierstrass and extracting
a subsequence, we may suppose that $x_{j}(n)\rightarrow x_{j}$
for all $1\leq j\leq q$. Automatically,
$x_{j}\in E$, $|x_{k}-x_{l}|\geq\alpha$ for $k\neq l$
and, since $K$ is closed, ${\mathbf x}\in K$.
Thus $(E,\mu)\notin{\mathcal E}$ and we are done.
\end{proof}
\section{The perturbation argument} The proof of
Lemma~\ref{L;center step} depends on a simple but
very useful observation.
\begin{lemma}\label{L;perturb}
Suppose that $q$ is a strictly positive integer, that
$K$ is a closed subset of ${\mathbb T}^{q}$
with empty interior, and that $\{e_{1},e_{2},\dots,e_{n}\}$
is a finite subset of ${\mathbb T}$.
Then, given any $\epsilon>0$,
we can find $f_{k}\in {\mathbb T}$ and $\eta>0$
such that
(i) $|f_{k}-e_{k}|<\epsilon$ for $1\leq k\leq n$,
(ii) $|f_{j}-f_{k}|>2\eta$ for all $1\leq j0$,
we can find an $M\geq 1$, $\eta>0$, points $y_{p}\in{\mathbb T}$
real numbers $\lambda_{p}\geq 0$ $[1\leq p\leq M]$
with $\sum_{p=1}^{M}\lambda_{p}=1$ having the following properties.
Whenever $|f_{p}-y_{p}|<\eta$ $[1\leq p\leq M]$
and we write
\[F=\{f_{1},f_{2},\dots,f_{M}\}
\ \text{and}\ \sigma=\sum_{p=1}^{M}\lambda_{p}\delta_{f_{p}}\]
we have
(i) $d_{\mathcal H}(E,F)<\epsilon$ and
(ii) $|\hat{\mu}(r)-\hat{\sigma}(r)|<\epsilon$ for all
$|r|\leq N$.
\end{exercise}
Our proof uses little beyond
Lemma~\ref{L;perturb} and Exercise~\ref{E;convolution}.
\begin{lemma}\label{L;make thin} Suppose that $q$ is a strictly
positive integer,
$\alpha>0$ and $K$ is a closed subset of ${\mathbb T}^{q}$
with empty interior.
Then, given any $\epsilon>0$ and any $(E_{j},\mu_{j})\in{\mathcal P}$
$[1\leq j\leq q]$,
we can find $N'\geq N$, $\gamma>0$
and $(F_{j},\sigma_{j})\in{\mathcal E}$ with the following properties.
(i) $d_{\mathcal H}(E_{j},F_{j})<\epsilon/2$ for all $1\leq j\leq q$.
(ii) If $d_{\mathcal H}(F_{j},G_{j})<\gamma$, then,
whenever
\[x_{1},\,x_{2},\,\dots,\,x_{q}\in \bigcup_{j=1}^{q}G_{j}
\ \text{and}\ |x_{k}-x_{l}|\geq\alpha
\ \text{for}\ k\neq l\]
it follows that
${\mathbf x}\notin K$.
(iii) $|\hat{\mu}_{j}(r)-\hat{\sigma}_{j}(r)|<\epsilon$
for all $|r|\leq N$ and for all $|r|\geq N'$ $[1\leq j\leq q]$.
\end{lemma}
\begin{proof} By Exercise~\ref{E;finite approximation},
we can find $\eta>0$, integers $M(j)\geq 1$, points $y_{p,j}\in{\mathbb T}$,
and real numbers $\lambda_{p,j}\geq 0$ $[1\leq p\leq M(j)]$
with $\sum_{p=1}^{M(j)}\lambda_{p,j}=1$
having the following properties.
Whenever $|x_{p,j}-y_{p,j}|<\eta$ $[1\leq p\leq M(j)]$
and we write
\[L_{j}=\{x_{1,j},x_{2,j},\dots,x_{M(j),j}\}
\ \text{and}\ \rho_{j}=\sum_{p=1}^{M(j)}\lambda_{p}\delta_{x_{p,j}}\]
we have
(i)$'$ $d_{\mathcal H}(E_{j},L_{j})<\epsilon$ and
(iii)$'$ $|\hat{\mu}_{j}(r)-\hat{\rho}_{j}(r)|<\epsilon$ for all
$|r|\leq N$.
By Lemma~\ref{L;perturb}, we can find $t_{p,j}$
and a $\gamma>0$
with $\gamma<\min(\eta,\epsilon)/4$ such that
(i)$''$ $|t_{p,j}-y_{p,j}|<\epsilon/4$ for $1\leq p\leq M(j)$ and
$1\leq j\leq q$,
(iv)$''$ $|t_{p,j}-t_{p',j'}|>4\gamma$ for all $(p,j)\neq(p',j')$, and
(ii)$''$ if we write $L=\bigcup_{j=1}^{q}\bigcup_{p=1}^{M(j)}\{t_{p,j}\}$, we
know that, whenever $a_{1}$, $a_{2}$, \dots $a_{q}$ are distinct
points of $L$ and $|a_{j}-b_{j}|<2\gamma$ for $1\leq j\leq q$,
then ${\mathbf b}\notin K$.
Now choose $h$ an infinitely differentiable
positive function with
$\supp h\subseteq[-\gamma/2,\gamma/2]$ and
$\int_{\mathbb T}h(t)\,dt=1$. If we
take
\[
F_{j}=\bigcup_{p=1}^{M(j)}[t_{p,j}-\gamma/2,t_{p,j}+\gamma/2]
\ \text{and}
\ \sigma_{j}=\left(\sum_{p=1}^{M(j)}\lambda_{p,j}\delta_{t_{p,j}}\right)*g
\]
then
all the conclusions of the lemma (with the possible exception
of that involving $N'$) are satisfied.
Since $\hat{\mu}_{j}(r),\hat{\sigma}_{j}(r)\rightarrow 0$
as $|r|\rightarrow\infty$
we can choose $N'$ so that
$|\hat{\mu}_{j}(r)|,|\hat{\sigma}_{j}(r)|<\epsilon/2$
for all $1\leq j\leq q$ and $|r|\geq N'$. Automatically,
$|\hat{\mu}_{j}(r)-\hat{\sigma}_{j}(r)|<\epsilon$
for all $|r|\geq N'$, so we are done.
\end{proof}
\begin{lemma}\label{L;shuffle thin} Suppose that $q$ and
$m$ are strictly
positive integers with $m\geq q$,
$\alpha>0$ and $K$ is a closed subset of ${\mathbb T}^{q}$
with empty interior.
Then, given any $\epsilon>0$ and any $(E_{k},\mu_{k})\in{\mathcal P}$
$[1\leq k\leq m]$,
we can find
$(F_{k},\sigma_{k})\in{\mathcal E}$
$[1\leq k\leq m]$
with the following properties.
(i) $d_{\mathcal H}(E_{k},F_{k})<\epsilon$ for all $1\leq k\leq m$
(ii) Whenever $x_{1},\,x_{2},\,\dots,\,x_{q}\in \bigcup_{k=1}^{m}F_{k}$
and $|x_{j}-x_{l}|\geq\alpha$ for $j\neq l$ it follows that
${\mathbf x}\notin K$.
(iii) $\sum_{k=1}^{m}|\hat{\mu}_{k}(r)-\hat{\sigma}_{k}(r)|<2q+1$
for all $r$.
\end{lemma}
\begin{proof} Let $\Phi$ be the collection of subsets of
$\{1,2,\dots,m\}$ containing exactly $q$ elements.
Let $M=\binom{m}{q}$, the number of elements of $\Phi$,
and let
\[\theta:\{1,\,2,\,\dots,\,M\}\rightarrow \Phi\]
be a bijection. Set $N(0)=0$, $\gamma_{0}=\epsilon/4$,
$E_{k,0}=E_{k}$ and $\mu_{k,0}=\mu_{k}$.
By repeated use of Lemma~\ref{L;make thin},
we can find
$N(w)$, $E_{k,w}$, $\mu_{k,w}$, $\gamma_{w}$ with $N(w)>N(w-1)$,
$\gamma_{w-1}/4>\gamma_{w}>0$ and
$(E_{k,w},\mu_{k,w})\in{\mathcal P}$ for $w=1,\,2,\,\dots,\,M$
with the following properties.
(i)$_{w}$ $d_{\mathcal H}(E_{k,w},E_{k,w-1})<\gamma_{w-1}/2$
for all $k\in\theta(w)$.
(ii)$_{w}$ If $d_{\mathcal H}(E_{k,w},G_{k,w})<\gamma$, then,
whenever $x_{1},\,x_{2},\,\dots,\,x_{q}\in \bigcup_{k\in\theta(w)}G_{k,w}$
and $|x_{j}-x_{l}|\geq\alpha$ for $j\neq l$, it follows that
${\mathbf x}\notin K$.
(iii)$_{w}$ Whenever $k\in\theta(w)$,
$|\hat{\mu}_{k,(w-1)}(r)-\hat{\mu}_{k,w}(r)|<1/(2Mq)$
for all $|r|\leq N(k-1)$ and for all $|r|\geq N(k)$.
(iv)$_{w}$ If $k\notin \theta(w)$, then $E_{k,w}=E_{k,w-1}$
and $\mu_{k,w}=\mu_{k,w-1}$.
We now set $F_{k}=E_{k,M}$ and $\mu_{k}=\mu_{k,M}$.
By construction, $(F_{k},\sigma_{k})\in{\mathcal P}$
and (using (ii)$_{w}$ and (iv)$_{w}$)
\[d_{\mathcal H}(E_{k},F_{k})\leq\sum_{w=1}^{M}
d_{\mathcal H}(E_{k,(w-1)},E_{k,w})
\leq \sum_{w=1}^{M}\gamma_{w}<\epsilon.\]
Thus (i) holds.
Now suppose that $x_{j}\in
\bigcup_{k=1}^{m}F_{k}$ for $1\leq j\leq q$
and $|x_{j}-x_{l}|\geq\alpha$ for $j\neq l$.
By the definition of $\theta$,
we can find a $1\leq w\leq M$ such that
$x_{j}\in\bigcup_{k\in \theta(w)}F_{k}$.
Arguing as in the previous paragraph,
we can find
$y_{j}\in\bigcup_{k\in \theta(w)}E_{k,p}$
such that $|x_{j}-y_{j}|\leq\gamma_{p}$ $[1\leq j\leq q]$
and so, by (ii)$_{w}$, ${\mathbf x}\notin K$.
Thus (ii) holds.
Now suppose $N(a-1)\leq |r|\leq N(a)$ for some integer $a$ with
$1\leq a\leq M$.
By (iii)$_{w}$ and (iv)$_{w}$,
\begin{align*}
\sum_{k=1}^{m}|\hat{\mu}_{k}(r)-\hat{\sigma}_{k}(r)|
&\leq\sum_{k=1}^{m}\sum_{w=1}^{M}|\hat{\mu}_{k,(w-1)}(r)-\hat{\mu}_{w,p}(r)|\\
&=\sum_{w=1}^{M}\sum_{k=1}^{m}|\hat{\mu}_{k,(w-1)}(r)-\hat{\mu}_{k,w}(r)|\\
&=\sum_{w=1}^{M}\sum_{k\in\theta(w)}
|\hat{\mu}_{k,(w-1)}(r)-\hat{\mu}_{k,w}(r)|\\
&=\sum_{w\neq a}\sum_{k\in\theta(w)|}
\hat{\mu}_{k,(w-1)}(r)-\hat{\mu}_{w,p}(r)|\\
&\qquad\qquad
+\sum_{k\in\theta(a)}|\hat{\mu}_{k,(a-1)}(r)-\hat{\mu}_{k,a}(r)|\\
&\leq\sum_{w\neq a}\sum_{k\in\theta(w)}1/(2Mq)
+\sum_{k\in\theta(a)}2\\
&< 2q+1.
\end{align*}
Less complicated estimates work if $|r|\geq N(M)$.
Thus (iii) holds.
\end{proof}
We can now complete the proof of Theorem~\ref{T;master}
by proving Lemma~\ref{L;center step}.
\begin{proof}[Proof of Lemma~\ref{L;center step}]
We are given $\epsilon>0$
and an $(E,\mu)\in{\mathcal P}$. Choose an integer $m$
such that $(2q+1)/m<\epsilon/2$ and write $(E_{k},\mu_{k})=(E,\mu)$
for $1\leq k\leq m$. By Lemma~\ref{L;shuffle thin}
we can find $(F_{k},\sigma_{k})\in{\mathcal P}$,
with the following properties.
(i) $d_{\mathcal H}(E,F_{k})<\epsilon/2$ for all $1\leq k\leq m$.
(ii) Whenever $x_{1},\,x_{2},\,\dots,\,x_{q}\in \bigcup_{k=1}^{m}F_{k}$
and $|x_{j}-x_{l}|\geq\alpha$ for $j\neq l$, it follows that
${\mathbf x}\notin K$.
(iii) $\sum_{j=1}^{m}|\hat{\mu}(r)-\hat{\sigma}_{j}(r)|<2q+1$
for all $r$.
If we now set $F=\bigcup_{k=1}^{m}F_{k}$
and $\sigma=m^{-1}\sum_{k=1}^{m}\sigma_{k}$.
Then, automatically, $(F,\sigma)\in{\mathcal P}$
and statement~(ii) tells us that $(F,\sigma)\in{\mathcal E}$.
Statement~(i)
tells us that $d_{\mathcal H}(E,F)<\epsilon/2$
and statement (iii) tells us that
\[|\hat{\mu}(r)-\hat{\sigma}(r)|
\leq m^{-1}\sum_{k=1}^{m}|\hat{\mu}_{k}(r)-\hat{\sigma}_{k}(r)|
\leq (2q+1)/m<\epsilon/2\]
for all $r$. Thus $d_{\mathcal P}\big((E,\mu),(F,\sigma)\big)<\epsilon$
and we are done.
\end{proof}
\section{Convolution of distinct measures} Except for this preliminary
section the rest of these notes deal
with `convolution squares' that is to say convolutions
$\mu*\mu$ of a measure with itself. However, as a warm
up exercise, we shall deal with an easier theorem
involving the convolution of two measures.
\begin{theorem}\label{T;Kronecker two}
Let $B$ be a set of first category
in ${\mathbb T}$. Then there exist
Kronecker sets $E_{1}$ and $E_{2}$ and probability measures
$\mu_{1}$ and $\mu_{2}$ with
$\supp\mu_{j}\subseteq E_{j}\subseteq {\mathbb T}\setminus B$
such that $\mu_{1}*\mu_{2}$ is an infinitely differentiable
nowhere zero function.
\end{theorem}
As usual we seek a Baire category proof and this requires
an appropriate metric.
\begin{exercise} (i) Consider the space $C^{\infty}({\mathbb T})$
of infinitely differentiable functions
$f:{\mathbb T}\rightarrow{\mathbb C}$. Show, by
using theorems on uniform convergence, or otherwise,
that
\[\rho(f,g)=\sum_{k=0}^{\infty}
\frac{2^{-k}\|f^{(k)}-g^{(k)}\|_{\infty}}
{1+\|f^{(k)}-g^{(k)}\|_{\infty}}\]
defines a complete metric on $C^{\infty}({\mathbb T})$.
(ii) Consider the space ${\mathcal P}$
of probability measures on
${\mathbb T}$. Show, by using theorems on weak convergence
or otherwise, that
\[d_{P}(\mu,\tau)=\sum_{k=-\infty}^{\infty}2^{-|k|}
|\hat{\mu}(k)-\hat{\tau}(k)|\]
defines a complete metric on ${\mathcal P}$.
(iii) Consider the space ${\mathcal K}$
with elements $(E_{1},E_{2},\mu_{1},\mu_{2},f)$
where $E_{i}$ is a compact set, $\mu_{j}$ is a probability
measure with $\supp \mu_{j}\supseteq E_{j}$ $[j=1,\,2]$,
$f\in C^{\infty}({\mathbb T})$ and $f=\mu_{1}*\mu_{2}$.
Why is ${\mathcal K}$ non-empty?
Show that
\begin{align*}
d_{\mathcal K}&\big((E_{1},E_{2},\mu_{1},\mu_{2},f),
(F_{1},F_{2},\tau_{1},\tau_{2},g)\big)\\
&=d_{\mathcal H}(E_{1},F_{1})+d_{\mathcal H}(E_{2},F_{2})
+d_{P}(\mu_{1},\tau_{1})+d_{P}(\mu_{2},\tau_{2})
+\rho(f,g)
\end{align*}
(where $d_{\mathcal H}$ is the usual Hausdorff metric)
defines a complete metric on ${\mathcal K}$.
\end{exercise}
We can now state our Baire category theorem.
\begin{theorem}\label{T;Kronecker three}
Let $B$ be a set of first category
in ${\mathbb T}$. Then quasi-all
$(E_{1},E_{2},\mu_{1},\mu_{2},f)\in{\mathcal K}$
have the property that $E_{j}$
is Kronecker and $E_{j}\cap B=\emptyset$
\end{theorem}
\begin{exercise} Deduce Theorem~\ref{T;Kronecker two}
from Theorem~\ref{T;Kronecker three}.
\end{exercise}
We now perform our standard reductions.
\begin{lemma}\label{L;start Kronecker}
(i) Let $K$ be a compact subset of ${\mathbb T}$
whose complement is dense.
Then the set of
$(E_{1},E_{2},\mu_{1},\mu_{2},f)\in{\mathcal K}$
with the property that $E_{1}\cap K=\emptyset$
is open and dense.
(ii) Let $u\in S({\mathbb T})$ and $n\geq 1$. Then
the set of $(E_{1},E_{2},\mu_{1},\mu_{2},f)\in{\mathcal K}$
with the property that there exists an integer $Q$ such that
\[|u(t)-\chi_{Q}(t)|\leq 1/n\ \text{for all $t\in E_{1}$}\]
is open and dense.
\end{lemma}
\begin{exercise} Deduce Theorem~\ref{T;Kronecker three}
from Lemma~\ref{L;start Kronecker}.
\end{exercise}
\begin{exercise}
(i) Let $L$ be a compact subset of ${\mathbb T}$.
Show that the set of
$(E_{1},E_{2},\mu_{1},\mu_{2},f)\in{\mathcal K}$
with the property that $E_{1}\cap K=\emptyset$
is open and dense.
(ii) Let $u\in S({\mathbb T})$ and $n\geq 1$. Show that
the set of $(E_{1},E_{2},\mu_{1},\mu_{2},f)\in{\mathcal K}$
with the property that there exists an integer $Q$ such that
\[|u(t)-\chi_{Q}(t)|\leq 1/n\ \text{for all $t\in E_{1}$}\]
is open and dense.
\end{exercise}
The proof of Theorem~\ref{T;Kronecker three} thus reduces
to the proof of the two parts of the following lemma.
\begin{lemma}\label{L;final Kronecker}
(i) Let $L$ be a compact subset of ${\mathbb T}$
whose complement is dense.
Given
$(F_{1},F_{2},\tau_{1},\tau_{2},g)\in{\mathcal K}$
and $\epsilon>0$, we can find
$(E_{1},E_{2},\mu_{1},\mu_{2},f)\in{\mathcal K}$
with
\[d_{\mathcal K}\big((E_{1},E_{2},\mu_{1},\mu_{2},f),
(F_{1},F_{2},\tau_{1},\tau_{2},g)\big)<\epsilon\]
such that $E_{1}\cap K=\emptyset$.
(ii) Let $u\in S({\mathbb T})$ and $n\geq 1$.
Then, given
$(F_{1},F_{2},\tau_{1},\tau_{2},g)\in{\mathcal K}$,
and $\epsilon>0$ we can find
$(E_{1},E_{2},\mu_{1},\mu_{2},f)\in{\mathcal K}$
with
\[d_{\mathcal K}\big((E_{1},E_{2},\mu_{1},\mu_{2},f),
(F_{1},F_{2},\tau_{1},\tau_{2},g)\big)<\epsilon\]
and an integer $Q$ such that
\[|u(t)-\chi_{Q}(t)|\leq 1/n\ \text{for all $t\in E_{1}$}.\]
\end{lemma}
The proofs of the two parts are very similar.
We make use of the following well known result.
\begin{exercise}\label{E;differentiate convolution}
If $\mu$ is a measure and $K$ is $n$ times
continuously differentiable show that $K*\mu$ is
$n$ times continuously differentiable with
$(K*\mu)^{(n)}=K^{(n)}*\mu$. (If the reader finds our statement
too informal then she should formalise it.)
\end{exercise}
\begin{exercise}\label{E;finite differentiate}
Use Exercise~\ref{E;differentiate convolution}
and the kind of ideas used in Exercise~\ref{E;finite approximation}
to prove the following result.
Suppose that $F$ is a closed set and
$h:{\mathbb T}\rightarrow{\mathbb C}$
is an $N$ times continuously differentiable function.
Then, given any $\epsilon>0$ and any positive integer $N'$,
we can find an $M\geq 1$, $\eta>0$, points $y_{p}\in{\mathbb T}$
real numbers $\lambda_{p}\geq 0$ $[1\leq p\leq M]$
with $\sum_{p=1}^{M}\lambda_{p}=1$ having the following properties.
Whenever $|e_{p}-y_{p}|<\eta$ $[1\leq p\leq M]$
and we write
\[E=\{e_{1},e_{2},\dots,e_{M}\}
\ \text{and}\ \sigma=\sum_{p=1}^{M}\lambda_{p}\delta_{e_{p}},\]
we have
(i) $d_{\mathcal H}(E,F)<\epsilon$,
(ii) $|(\sigma*h)^{(q)}(t)-(\tau*h)^{(q)}(t)|<\epsilon$ for all
$q\leq N$ and all $t\in{\mathbb T}$ and
(iii) $|\hat{\sigma}(r)-\hat{\tau}(r)|<\epsilon$ for
all $|r|\leq N$.
\end{exercise}
Our first step is to `spread out' the measure $\tau_{2}$.
\begin{lemma}\label{L;spread Kronecker} Given
$(F_{1},F_{2},\tau_{1},\tau_{2},g)\in{\mathcal K}$
and $\epsilon>0$ we can find
$(E_{1},E_{2},\mu_{1},\mu_{2},f)\in{\mathcal K}$
with
\[d_{\mathcal K}\big((E_{1},E_{2},\mu_{1},\mu_{2},f),
(F_{1},F_{2},\tau_{1},\tau_{2},g)\big)<\epsilon\]
such that $d\mu_{2}(t)=h(t)\,dt$ where $h$
is an infinitely differentiable function.
\end{lemma}
\begin{proof} Let $K_{N}$ be the function discussed
in Exercise~\ref{E;approximate unit}. If we
set $\mu_{1,N}=\tau_{1}$, $E_{1,N}=F_{1}$,
$\mu_{2,N}=\tau_{2}*K_{N}$, $E_{2,N}(N)=F_{2}+\supp K_{N}$
and $f_{N}=g*K_{N}$, then
$(E_{1,N},E_{2,N},\mu_{1,N},\mu_{2,N},f_{N})\in{\mathcal K}$
and $d\mu_{2,N}(N)(t)=h_{N}(t)\,dt$ for some
infinitely differentiable function $h_{N}$.
We have
\[d_{\mathcal H}(E_{2,N},F_{1})\rightarrow 0,
\ d_{P}(\mu_{2,N},\tau_{2})\rightarrow 0
\ \text{and}
\ \rho(f_{N},g)\rightarrow 0\]
as $N\rightarrow \infty$, so the required
result follows on taking
\[(E_{1},E_{2},\mu_{1},\mu_{2},f)
=(E_{1,N},E_{2,N},\mu_{1,N},\mu_{2,N},f_{N})\]
with $N$ sufficiently large.
\end{proof}
\begin{proof}[Proof of Lemma~\ref{L;final Kronecker}]
(i) By Lemma~\ref{L;spread Kronecker}, it suffices to consider
the case when $d\tau_{2}(t)=h(t)\,dt$, where $h$
is an infinitely differentiable function.
We can now use Exercise~\ref{E;finite differentiate}
to tell us that we can find an $M\geq 1$, $\eta>0$, points $y_{p}\in{\mathbb T}$
real numbers $\lambda_{p}\geq 0$ $[1\leq p\leq M]$
with $\sum_{p=1}^{M}\lambda_{p}=1$ having the following properties.
Whenever $|e_{p}-y_{p}|<\eta$ $[1\leq p\leq M]$
and we write
\[E=\{e_{1},e_{2},\dots,e_{M}\}
\ \text{and}\ \sigma=\sum_{p=1}^{M}\lambda_{p}\delta_{e_{p}}\]
we have
(i) $d_{\mathcal H}(E,F_{1})<\epsilon/3$,
(ii) $\rho(\sigma*\tau_{2},\tau_{1}*\tau_{2})<\epsilon/3$, and
(iii) $d_{P}(\sigma,\tau_{1})<\epsilon/3$.
\noindent Since the complement of $L$ is dense we can certainly
choose the $e_{p}\notin L$. Taking
\[E_{1}=E,\ E_{2}=F_{2},\ \mu_{1}=\sigma,\ \mu_{2}=\tau_{2},
\ f=\mu_{1}*\mu_{2}\]
we are done.
(ii) The argument is the same as for~(i) but the last but one
sentence must be replaced by `Provided $M$ is large enough
we can choose the $e_{p}$ so that $\chi_{M}(e_{p})=f(e_{p})$'.
\end{proof}
\section{The Wiener--Wintner theorem} In a famous paper
Wiener and Wintner showed that there
exists a singular measure $\mu$
(that is to say a measure whose support has Lebesgue
measure zero) such
that $\mu*\mu$ is absolutely continuous
(that is to say $d(\mu*\mu)t=f(t)\,dt$ where $f$ is a
Lebesgue $L^{1}$ function). The measure of Wiener and
Wintner is very thick (for example high convolution powers
of $\mu$ correspond to continuous functions). We shall
produce other examples of such thick measures later.
First we shall produce examples measures $\mu$
with extremely thin support such
that $\mu*\mu$ is absolutely continuous.
\begin{theorem}\label{T;Winter} Let
$A$ be a set of first category in
${\mathbb T}$. Then we can find a
probability measure $\mu$ such that
$\supp\mu\cap A=\emptyset$ but
$d(\mu*\mu)t=f(t)\,dt$ where $f$ is a
Lebesgue $L^{1}$ function.
\end{theorem}
It is easy to be lulled by the easy rhythm of
Baire category proof into a feeling that one
proof is very much like another.
You should note that Theorem~\ref{T;Winter}
implies the theorem of Debs and Saint-Raymond
(since $\hat{\mu}(n)^{2}=\widehat{\mu*\mu}(n)=\hat{f}(n)\rightarrow 0$
as $|n|\rightarrow\infty$)
so it can not be trivial.
\begin{exercise} We have just used the Riemann--Lebesgue
lemma that if $f\in L^{1}$ (for Lebesgue measure),
then $\hat{f}(n)\rightarrow 0$ as $|n|\rightarrow\infty$.
Prove this (for example, by noting that the trigonometric
polynomials are $L^{1}$ dense).
\end{exercise}
\begin{exercise} (i) Show that if $E$ is a Kronecker set
and $\supp\mu\subseteq E$, then
\[\sup|\hat{\mu}(n)|=\|\mu\|.\]
(ii) If $f\in S({\mathbb T})$, show that there exist $n(j)$
with $|n(j)|\rightarrow\infty$ such that
\[\sup_{t\in E}|\chi_{n(j)}(t)-f(t)|\rightarrow 0\]
as $j\rightarrow\infty$.
(iii) Show that if $E$ is a Kronecker set
and $\supp\mu\subseteq E$ then
\[\limsup_{|n|\rightarrow\infty}|\hat{\mu}(n)|=\|\mu\|.\]
(i) If $\mu_{1}$ and $\mu_{2}$ are the measures appearing
in Theorem~\ref{T;Kronecker two}, show that
\[\big((\mu_{1}+\mu_{2})*(\mu_{1}+\mu_{2})\big)\hat{\phantom a}(n)
=(\hat{\mu}_{1}(n))^{2}+2\hat{\mu}_{1}(n)\hat{\mu}_{2}(n)
+(\hat{\mu}_{2}(n))^{2}\nrightarrow 0.\]
Conclude that $(\mu_{1}+\mu_{2})*(\mu_{1}+\mu_{2})$
is not absolutely continuous.
\end{exercise}
Having issued this warning we continue along a standard path.
We first define a suitable metric space.
\begin{exercise} Let
${\mathcal Q}$ be the space
consisting of ordered pairs $(E,\mu)$ where $E$ is
a compact subset of ${\mathbb T}$
and $\mu$ is a probability measure with $\supp\mu\subseteq E$
and
\[\mu*\mu=f_{\mu}\]
with $f_{\mu}\in L^{1}$.
If we set
\[d_{\mathcal Q}\big((E,\mu),(F,\sigma)\big)
=d_{\mathcal H}(E,F)+
\sup_{r\in{\mathbb Z}}2^{-|r|}|\hat{\mu}(r)-\hat{\sigma}(r)|
+\|f_{\mu}-f_{\sigma}\|_{1}\]
for all $(E,\mu),\,(F,\sigma)\in{\mathcal Q}$,
show that
$({\mathcal Q},d_{\mathcal Q})$ is a complete
non-empty
metric space.
\end{exercise}
We now state the Baire category version of our theorem.
\begin{theorem}\label{T;Winter Baire}
Let $A$ be a set of first category
in ${\mathbb T}$. Then quasi-all $(E,\mu)\in{\mathcal Q}$
have the property that $E\cap A=\emptyset$.
\end{theorem}
As usual, we deduce Theorem~\ref{T;Winter Baire}
from a simpler result.
\begin{lemma}\label{L;Winter Baire 2}
Let $L$ be a compact set in ${\mathbb T}$ with dense
complement. Then the set ${\mathcal Q}_{L}$ of
$(E,\mu)\in{\mathcal Q}$
with the property that $E\cap L=\emptyset$ is open
and dense.
\end{lemma}
\begin{exercise}\label{E;Winter Baire 3}
(i) Show that Theorem~\ref{T;Winter Baire}
follows from Lemma~\ref{L;Winter Baire 2}.
(ii) Show that the set ${\mathcal Q}_{L}$ in
Lemma~\ref{L;Winter Baire 2} is indeed open.
\end{exercise}
Exercise~\ref{E;Winter Baire 3} shows that the proof
of Theorem~\ref{T;Winter Baire} reduces to the following
lemma.
\begin{lemma}\label{L;Winter Baire 4}
Let $L$ be a compact set in ${\mathbb T}$ with dense
complement.
Given $(F,\sigma)\in{\mathcal Q}$ and $\epsilon>0$,
we can find an $(E,\mu)\in{\mathcal Q}$ with
\[E\cap L=\emptyset
\ \text{and}\ d_{\mathcal Q}\big((E,\mu),(F,\sigma)\big)\leq\epsilon.\]
\end{lemma}
We can make life somewhat easier for ourselves
by spreading out measures in the standard manner.
Let us write ${\mathcal Q}_{S}$ for the set of
$(E,\mu)\in{\mathcal Q}$ such that $d\mu(t)=h_{\mu}(t)\,dt$
with $h_{\mu}$ an infinitely differentiable function.
\begin{exercise} Show, by convolving with a function
$K_{n}$ of the type considered in Exercise~\ref{E;approximate unit},
or otherwise, that given $(F,\sigma)\in{\mathcal Q}$
and $\epsilon>0$ we can find an $(E,\mu)\in{\mathcal Q}_{S}$
with $d_{\mathcal Q}\big((E,\mu),(F,\sigma)\big)\leq\epsilon$.
\end{exercise}
Thus Lemma~\ref{L;Winter Baire 4} will follow from
the following modified version
\begin{lemma}\label{L;Winter Baire 5}
Let $L$ be a compact set in ${\mathbb T}$ with dense
complement.
Given $(F,\sigma)\in{\mathcal Q}_{S}$ and $\epsilon>0$
we can find an $(E,\mu)\in{\mathcal Q}_{S}$ with
\[E\cap L=\emptyset
\ \text{and}\ d_{\mathcal Q}\big((E,\mu),(F,\sigma)\big)\leq\epsilon.\]
\end{lemma}
It is at this point that the proof requires thought.
We obtain Lemma~\ref{L;Winter Baire 5} from the following
result.
\begin{lemma}\label{L;bit Wintner}
Let $L$ be a compact set in ${\mathbb T}$ with dense
complement and let $P\geq 2$
Given
\[(F_{1},\sigma_{1}),\,(F_{2},\sigma_{2}),\,
\ldots,\,
(F_{P},\sigma_{P})\in{\mathcal Q}_{S}\]
and $\eta>0$,
we can find an $(E_{1},\mu_{1})\in{\mathcal Q}_{S}$
such that
$E_{1}\cap L=\emptyset$ with the following property.
If we write
\[F=\bigcup_{j=1}^{P}F_{j},
\ \sigma=P^{-1}\sum_{j=1}^{P}\mu_{j},
\
\ E=E_{1}\cup\bigcup_{j=2}^{P}F_{j},
\ \mu=P^{-1}\left(\mu_{1}+\sum_{j=2}^{P}\sigma_{j}\right)
\]
then $d_{\mathcal Q}\big((E,\mu),(F,\sigma)\big)
\leq 2P^{-2}+\eta$.
\end{lemma}
\begin{proof} Let $d\sigma_{j}(t)=h_{j}(t)\,dt$
with $h_{j}$ an infinitely differentiable function.
We use Exercise~\ref{E;finite differentiate}
to tell us that we can find an $M\geq 1$, $\kappa>0$,
points $y_{m}\in{\mathbb T}$ and
real numbers $\lambda_{m}\geq 0$ $[1\leq m\leq M]$
with $\sum_{m=1}^{M}\lambda_{p}=1$ having the following properties.
Whenever $|e_{m}-y_{m}|<\eta$ $[1\leq m\leq M]$
and we write
\[E'=\{e_{1},e_{2},\dots,e_{M}\}
\ \text{and}\ \tau=\sum_{m=1}^{M}\lambda_{m}\kappa_{e_{m}}\]
we have
(i) $d_{\mathcal H}(E',F_{1})<\eta/6$,
(ii) $\sup_{r\in{\mathbb Z}}2^{-|r|}|\hat{\tau}(r)-\hat{\sigma}_{1}(r)|
<\eta/6$,
(iii) $\|\tau*g_{j}-g_{1}*g_{j}\|_{1}\leq\eta/4$ for $2\leq j\leq P$.
Since the complement of $L$ is dense, we can choose
choose the $e_{p}\notin L$. We now take
$\mu_{1}(t)=\tau*K_{n}$ $E_{1}=E'+\supp K_{n}$ where
$K_{n}$ is the function considered in
Exercise~\ref{E;approximate unit}. Provided that
$n$ is large enough, we have
(i) $d_{\mathcal H}(E_{1},F_{1})<\eta/3$,
(ii) $\sup_{r\in{\mathbb Z}}2^{-|r|}|\hat{\mu}_{1}(r)-\hat{\sigma}_{1}(r)|
<\eta/3$,
(iii) $\|\sigma_{1}*g_{j}-g_{1}*g_{j}\|_{1}\leq\eta/3$ for $2\leq j\leq P$.
Condition~(iii) tells us that
\begin{align*}
\|\sigma*\sigma-\mu*\mu\|_{1}&
=P^{-2}\left\|2\sum_{j=2}^{P}(\sigma_{1}-\mu_{1})*\mu_{j}+\sigma_{1}*\sigma_{1}
-\mu_{1}*\mu_{1}\right\|_{1}\\
&\leq 2P^{-2}\sum_{j=2}^{P}\|\sigma_{1}*g_{j}-g_{1}*g_{j}\|_{1}
+P^{-2}(\|\sigma_{1}*\sigma_{1}\|_{1}+\|\mu_{1}*\mu_{1}\|_{1})\\
&\leq 2P^{-2}+\eta/3.
\end{align*}
Combining this result with (i) and (ii), we get
\[d_{\mathcal Q}\big((E,\mu),(F,\sigma)\big)
\leq 2P^{-2}+\eta\]
as required.
\end{proof}
\begin{proof}[Proof of Lemma~\ref{L;Winter Baire 5}
from Lemma~\ref{L;bit Wintner}] Set
\[(F_{1},\sigma_{1})=(F_{2},\sigma_{2})=
\ldots
=(F_{P},\sigma_{P})=(F,\sigma).\]
By using Lemma~\ref{L;bit Wintner} $P$ times
we can find $(E_{j},\mu_{j})\in{\mathcal Q}_{S}$
such that
$E_{j}\cap L=\emptyset$ for $1\leq j\leq P$
with the following property.
If we write
\[E=\bigcup_{j=1}^{P}E_{j},
\ \mu=P^{-1}\sum_{j=1}^{P}\mu_{j},
\]
then $d_{\mathcal Q}\big((E,\mu),(F,\sigma)\big)
\leq P(P^{-2}+\eta)=P^{-1}+P\eta$.
If we choose $P$ and $\eta$ so that $P^{-1}+P\eta<\epsilon$,
then the required result follows.
\end{proof}
\section{Hausdorff dimension and measures}
The Hausdorff dimension provides a useful measure
of the thinness of of a set.
\begin{definition} If $0<\kappa\leq 1$, write
$h_{\kappa}(t)=t^{\kappa}$.
We say that a set $E$ has \emph{Hausdorff
dimension} $\alpha$ if $E$does not have zero
Hausdorff $h_{\kappa}$-measure
measure zero for all $\alpha<\kappa$ but
does not have Hausdorff $h_{\kappa}$-measure
zero for any $\alpha<\kappa$.
\end{definition}
\begin{exercise}\label{E;easy dimension}
(i) Let $1\geq\alpha>\beta>0$.
Show that, if a set $E$ has
has Hausdorff $h_{\alpha}$-measure zero,
then it has Hausdorff
$h_{\beta}$-measure zero.
(ii) If $F\supseteq E$ show that the
Hausdorff dimension of $F$ is at least as large as that
of $E$.
(iii) If $E$ does not
have Hausdorff $h_{\alpha}$-measure zero,
show that there is a $\gamma>0$ such that,
for any sequence $I_{j}$ of intervals,
\[\bigcup_{j=1}^{\infty}I_{j}\supseteq E
\Rightarrow \sum_{j=1}^{\infty}|I_{j}|^{\alpha}\geq\gamma.\]
\end{exercise}
When we construct sets `by hand', it is often
easy to prove upper
bounds for the Hausdorff dimension of a set
by providing a suitable cover of
intervals, but not so simple to prove lower
bounds. We shall obtain lower bounds
by using the following well known result (the easy
part of of theorem of Frostman).
\begin{theorem}\label{T,Hausdorff up}
Let $E$ be a closed set in ${\mathbb T}$
and $1>\alpha\geq 0$. If we can find
a probability measure $\mu$ with support contained
in $E$ such that
\[\iint_{{\mathbb T}^{2}} \frac{d\mu(x)\,d\mu(y)}
{|x-y|^{\alpha}}<\infty,\]
then the Hausdorff dimension of $E$ is at least $\alpha$.
\end{theorem}
\begin{proof} Let $t>0$ and let $E_{t}$ be the set of
$y\in E$ such that
\[\int_{\mathbb T}\frac{d\mu(x)}{|x-y|^{\alpha}}\leq t.\]
We fix $t$ sufficiently large that $E_{t}$ has strictly
positive $\mu$ measure.
Consider a covering of $E_{t}$
by intervals $I_{j}$ of length $|I{j}|$.
By choosing a subsequence if necessary,
we may suppose that $I_{j}\cap E_{t}\neq\emptyset$
for each $j$. Picking $y_{j}\in I_{j}$, we obtain
\[\mu(I_{j})=\int_{I_{j}}\,d\mu\leq
\int_{I_{j}}\frac{|I_{j}|^{\alpha}}{|x-y_{j}|^{\alpha}}\,d\mu
\leq t|I_{j}|^{\alpha}\]
whence
\[t\sum_{j=1}^{\infty}|I_{j}|^{\alpha}
\geq \sum_{j=1}^{\infty}\mu(I_{j})\geq \mu(E_{t})\]
and $\sum_{j=1}^{\infty}|I_{j}|^{\alpha}\geq t^{-1}\mu(E_{t})$. Thus
$E_{t}$ must have Hausdorff dimension at least $\alpha$
(see Exercise~[E;easy dimension])
and so (since $E\supseteq E_{t}$) $E$ must have dimension at least $\alpha$.
\end{proof}
Although we shall not make any use of the
hard part of Frostman's theorem,
it seems a pity not to give it here. We return to the main
argument at the end of the proof.
\begin{theorem}
(i) Let $E$ be a closed set in ${\mathbb T}$
and $1>\alpha\geq 0$. If $E$ does not have zero
Hausdorff-$h_{\alpha}$ measure (where $h_{\alpha}(t)=t^{\alpha}$)
then
we can find a probability measure $\mu$
with support contained in $E$ and a constant $C>0$
such that
\[\mu(I)\leq C|I|^{\alpha}\]
for every interval $I$.
(ii) Let $E$ be a closed set in ${\mathbb T}$
and $1>\beta>\alpha\geq 0$. If $E$ has Hausdorff dimension
$\alpha$ we can find a probability measure $\mu$
with support contained in $E$
such that
\[\iint_{{\mathbb T}^{2}} \frac{d\mu(x)\,d\mu(y)}
{|x-y|^{\beta}}<\infty.\]
\end{theorem}
\begin{proof} (i) Since $E$ does not have zero,
Hausdorff $h_{\alpha}$-measure,
Exercise~\ref{E;easy dimension}~(iii)
tells us that there exists a $\gamma>0$ such that
\[\bigcup_{j=1}^{\infty}I_{j}\supseteq E
\Rightarrow \sum_{j=1}^{\infty}|I_{j}|^{\alpha}\geq\gamma.\]
Let ${\mathcal I}_{n}$
be the collection of dyadic
intervals $[2\pi r2^{-n},2\pi (r+1)2^{-n})$.
If $m\geq 1$ define measures $\tau_{m,r}$
with $0\leq r\leq m$ as follows.
If $I\in {\mathcal I}_{m}$, then $\tau_{m,0}|_{I}$,
the restriction of $\tau_{m,0}$ to $I$, is the zero
measure if $E\cap I=\empty$ and the uniform measure
on $I$ with total mass $2^{-m\alpha}$ if
$E\cap I\neq\empty$. Once $\tau_{m,r-1}$
has been defined with $1\leq r\leq m$, we define $\tau_{m,r}$
as follows. If $I\in {\mathcal I}_{m-r}$
\[\tau_{m,r}|_{I}
=\begin{cases}
\tau_{m,r-1}|_{I}&\text{if $\tau_{m,r-1}(I)\leq 2^{-(m-r)\alpha}$,}\\
2^{-(m-r)\alpha}\big(\tau_{m,r-1}(I)\big)^{-1}\tau_{m,r-1}|_{I}
&\text{otherwise.}
\end{cases}\]
Finally we set $\tau_{m}=\tau_{m,m}$.
We observe that if $I$ is a dyadic interval with $I\cap E=\emptyset$,
then $\tau_{m}(I)=0$.
By construction $\tau_{m}(I)\leq (2\pi)^{-\alpha}|I|^{\alpha}$
for every dyadic interval of length at least $2\pi\ 2^{-m}$
and each $x\in E$ lies in some dyadic interval
$I$ of length at least $2\pi\ 2^{-m}$ such that
\[\tau_{m}(I)\leq (2\pi)^{-\alpha}|I|^{\alpha}.\]
and so each $x\in E$ lies in some dyadic interval
$I_{x}$ of greatest length such that
\[\tau_{m}(I_{x})\leq (2\pi)^{-\alpha}|I_{x}|^{\alpha}.\]
Let
\[{\mathcal J}_{m}=\{I_{x}\,:\,x\in E\}.\]
Then ${\mathcal J}_{m}$ consists of a finite
set of disjoint intervals covering $E$ and
satisfying $\tau_{m}(J)\leq (2\pi)^{-\alpha}|J|^{\alpha}$
for each $J\in {\mathcal J}_{m}$. Automatically
\[\|\tau_{m}\|
=\tau_{m}\left(\bigcup_{J\in{\mathcal J}_{m}}J\right)
=\sum_{J\in{\mathcal J}_{m}}\tau(J)
=(2\pi)^{-\alpha}\sum_{J\in{\mathcal J}}|J|^{\alpha}
\geq (2\pi)^{-\alpha}\gamma.\]
If we now set $\mu_{m}=\|\tau_{m}\|^{-1}\tau_{m}$
we see that $\mu_{m}$ is a probability measure such that
\[\supp\mu_{m}\subseteq E+[2\pi 2^{-m},2\pi 2^{-m}]\]
and
\[\supp\mu_{m}(I)=\|\tau_{m}\|^{-1}\tau_{m}(I)
\leq \|\tau_{m}\|^{-1}(2\pi)^{-\alpha}|I|^{\alpha}
\leq\gamma|I|^{\alpha}\]
for every dyadic interval
$I$ of length at least $2\pi\ 2^{-m}$.
By weak compactness we can extract a subsequence $\mu_{m(r)}$
tending weakly to some probability measure $\mu$.
Automatically
\[\supp\mu\subseteq E\]
and
\[\mu(I)
\leq\gamma|I|^{\alpha}\]
for every dyadic interval $I$.
We now remark that every interval $I$ can be covered
by two dyadic intervals $I_{1}$ and $I_{2}$
of length no greater than, $2|I|$ so
\[\mu(I)\leq \mu(I_{1})+\mu(I_{2})\leq
\gamma(|I_{1}|^{\alpha}+|I_{2}|^{\alpha})
\leq 2^{1+\alpha}\gamma |I|^{\alpha}\]
and we are done.
(ii) By part~(i) we we can find a probability measure $\mu$
with support contained in $E$ and a constant $C>0$
such that
\[\mu(I)\leq C|I|^{\alpha}\]
for every interval $I$. By applying the measure
theoretic version of integration by parts,
\begin{align*}
\int_{\mathbb T}\frac{d\mu(x)}
{|x-y|^{\beta}}
&=-\int_{0}^{\pi}\mu\big([y-t,y+t)\big)
\frac{\partial \ }{\partial x}\frac{1}{(x-y)^{\beta}}
\bigg|_{x=y+t}\,dt\\
&=\int_{0}^{\pi}\mu\big([y-t,y+t)\big)
\frac{\beta}{t^{1+\beta}}\,dt\\
&\leq \int_{0}^{\pi}C(2t)^{\alpha}\frac{\beta}{t^{1+\beta}}
=2^{\alpha}C\beta\int_{0}^{\pi}\frac{1}{t^{1+\beta-\alpha}}\,dt
=A
\end{align*}
for some constant $A$. Since
$\int_{\mathbb T}|x-y|^{-\beta}\,d{\mu}(x)$
is uniformly bounded as a function of $y$, we have
\[\iint_{{\mathbb T}^{2}} \frac{d\mu(x)\,d\mu(y)}
{|x-y|^{\beta}}<\infty\]
so we are done.
\end{proof}
The link to Fourier series is established in a rather
pretty way. If $0\alpha>0$
and we define $k:{\mathbb T}\rightarrow{\mathbb R}$
by $k(t)=|t|^{-\alpha}$ for $t\neq 0$ $k(0)=0$.
Then, if $\mu$ is a probability measure,
\[\iint_{{\mathbb T}^{2}}k(x-y)\,d\mu(x)\,d\mu(y)
=\sum_{r=-\infty}^{\infty}\hat{k}(r)|\hat{\mu}(r)|^{2}.\]
\end{lemma}
\begin{proof} By Lemma~\ref{L;weighted sum triangles},
we can find a continuous positive function
$g:(0,\pi]\rightarrow{\mathbb R}$ such that
\[f(t)=A+B\Delta_{\pi}(x)+\int_{0}^{\pi}g(x)\Delta_{x}(t)\,dx\]
for all $0<|t|\leq\pi$. Define $g_{n}:[0,\pi]\rightarrow{\mathbb R}$
by
\[g_{n}(x)=
\begin{cases}
g(t)&\text{if $x\geq n^{-1}$,}\\
g(1/n)&\text{otherwise.}
\end{cases}
\]
and set
\[k_{n}(t)=A+B\Delta_{\pi}(x)+\int_{0}^{\pi}g_{n}(x)\Delta_{x}(t)\,dt.\]
We observe that, for each fixed $t\neq 0$, $k_{n}(t)$
is an increasing sequence with $k_{n}(t)\rightarrow k(t)$
as $n\rightarrow\infty$. Using Fubini's theorem
\[\hat{k}_{n}(r)=B\hat{\Delta}_{\pi}(r)
+\int_{0}^{\pi}g_{n}(x)\hat{\Delta}_{x}(r)\,dx\]
for $r\neq 0$ and
\[\hat{k}_{n}(0)=A+B\hat{\Delta}_{\pi}(0)
+\int_{0}^{\pi}g_{n}(x)\hat{\Delta}_{x}(0)\,dx\]
so $\hat{k}_{n}(r)$ is a positive increasing
sequence. By dominated convergence,
$\hat{k}_{n}(r)\rightarrow\hat{k}(r)$.
By Exercise~\ref{E;Frost and snow}, we know that
\[\iint_{{\mathbb T}^{2}}k_{n}(x-y)\,d\mu(x)\,d\mu(y)
=\sum_{r=-\infty}^{\infty}\hat{k}_{n}(r)|\hat{\mu}(r)|^{2}\]
so, allowing $n\rightarrow\infty$ and applying the
monotone convergence theorem to both sides of the equation,
we have
\[\iint_{{\mathbb T}^{2}}k(x-y)\,d\mu(x)\,d\mu(y)
=\sum_{r=-\infty}^{\infty}\hat{k}(r)|\hat{\mu}(r)|^{2}\]
as required.
\end{proof}
In order to use Theorem~\ref{T,Hausdorff up}, we need to
know something about the behaviour of $\hat{k}(r)$ as
$|r|\rightarrow\infty$.
\begin{lemma}\label{L;long Fourier}
(i) If $1>\alpha>0$ and we set
\[A=\frac{1}{\pi}\int_{0}^{\infty}\frac{\cos t}{t^{\alpha}}\,dt,\]
then $A>0$.
(ii) Suppose that $1>\alpha>0$
and we define $k:{\mathbb T}\rightarrow{\mathbb R}$
by $k(t)=|t|^{-\alpha}$ for $t\neq 0$, $k(0)=0$. Then
\[|r|^{1-\alpha}\hat{k}(r)\rightarrow A\]
as $|r|\rightarrow\infty$
\end{lemma}
\begin{proof} (i) We can write
\[A=\sum_{j=0}^{\infty}(-1)^{j}
\frac{1}{\pi}\int_{j\pi}^{(j+1)\pi}
\frac{|\cos t|}{t^{\alpha}}\,dt\]
so, since the error in evaluating an alternating sum
(of terms which decrease in absolute size)
is no greater than the modulus of the first term
neglected,
\[A\geq \frac{1}{\pi}\int_{0}^{\pi}\frac{|\cos t|}{t^{\alpha}}\,dt
-\frac{1}{\pi}\int_{\pi}^{2\pi}\frac{|\cos t|}{t^{\alpha}}\,dt>0.\]
(ii) Observe that
\begin{align*}
|r|^{1-\alpha}\hat{k}(r)
&=|r|^{1-\alpha}\frac{1}{2\pi}\int_{\mathbb T}e^{-irt}k(t)\,dt
=|r|^{1-\alpha}\frac{1}{\pi}\int_{0}^{\pi}\frac{\cos rt}{t^{\alpha}}\,dt\\
&=\frac{1}{\pi}\int_{0}^{|r|\pi}\frac{\cos s}{s^{\alpha}}\,dt
\rightarrow A
\end{align*}
as $|r|\rightarrow\infty$.
\end{proof}
Putting our results together, we obtain the following key theorem
\begin{theorem}\label{T,Hausdorff up Fourier}
Let $E$ be a bounded closed set
and $1>\alpha\geq 0$. If we can find
a probability measure $\mu$ with support contained
in $E$ such that
\[\sum_{r\neq 0}\frac{|\hat{\mu}(r)|^{2}}{|r|^{1-\alpha}}
<\infty,\]
then the Hausdorff dimension of $E$ is at least $\alpha$.
\end{theorem}
\begin{proof} Set $k(t)=t^{\alpha}$.
If
\[\sum_{r\neq 0}\frac{|\hat{\mu}(r)|^{2}}{|r|^{1-\alpha}}
<\infty,\]
then Lemma~\ref{L;long Fourier} tells us that
\[\sum_{r=-\infty}^{\infty}\hat{k}(r)|\hat{\mu}(r)|^{2}
<\infty,\]
so Lemma~\ref{L;Frost and sum} gives us
\[\iint_{{\mathbb T}^{2}}k(x-y)\,d\mu(x)\,d\mu(y)
<\infty\]
and Theorem~\ref{T,Hausdorff up} tells us that
the Hausdorff dimension of $E$ is at least $\alpha$.
\end{proof}
Theorem~\ref{T,Hausdorff up Fourier} immediately yields
the following result of Salem~\cite{Salem}
\begin{theorem} If $\mu$ is a probability measure whose support
has Hausdorff dimension $\alpha$ then
\[\limsup_{n\rightarrow\infty}|n|^{\beta/2}|\hat{\mu}(n)|=\infty\]
for all $\beta>\alpha$.
\end{theorem}
In particular if $\mu$ is a probability measure whose support
has Hausdorff dimension $\alpha$, then
\[\limsup_{n\rightarrow\infty}|n|^{\alpha}|\hat{\mu}(n)|=\infty\]
for all $\alpha>0$.
\begin{exercise} It is an easy but rather lengthy
exercise to
modify the proof of Theorem~\ref{T;Baire Rudin}
to obtain the following result.
Suppose that $\phi:{\mathbb N}\rightarrow{\mathbb R}$
is a sequence of strictly positive numbers with
$r^{\alpha}\phi(r)\rightarrow \infty$ as $r\rightarrow\infty$
whenever $\alpha>0$.
Then quasi-all $(\mu,E)\in{\mathcal G}_{\phi}$ have the
property that $E$ is independent and has Hausdorff dimension
zero.
\end{exercise}
We can make the following observation
about Theorem~\ref{T;Winter}. (You should recall
Theorem~\ref{T;Musatov}, taking $h(t)=-\log t$.)
\begin{lemma} Let
$A$ be a set in
${\mathbb T}$ whose complement has Hausdorff dimension zero.
Then if $\mu$ is a
probability measure such that
$\supp\mu\cap A=\emptyset$ and
$d(\mu*\mu)t=f(t)\,dt$ where $f$ is a
Lebesgue $L^{1}$ function then
\[f_{m}=\underbrace{f*f*\ldots*f}_{m}\]
cannot be a Lebesgue $L^{2}$ function for any $m$.
\end{lemma}
\begin{proof} Suppose that $f_{m}\in L^{2}$.
By H{\"o}lder's inequality,
\begin{align*}
\sum_{r\neq 0}\frac{|\hat{\mu}(r)|^{2}}{|r|^{1-m^{-1}/4}}
&=\sum_{r\neq 0}\frac{|\hat{f}(r)|}{|r|^{1-m^{-1}/4}}\\
&\leq\left(\sum_{r\neq 0}|\hat{f}(r)|^{2m}\right)^{1/2m}
\left(\sum_{r\neq 0}
\frac{1}{(|r|^{1-m^{-1}/4})^{2m/(2m-1)}}\right)^{(2m-1)/(2m)}\\
&=\left(\sum_{r\neq 0}|\hat{f}_{m}(r)|^{m}\right)^{1/2m}
\left(\sum_{r\neq 0}
\frac{1}{(|r|^{1-m^{-1}/4})^{2m/(2m-1)}}\right)^{(2m-1)/(2m)}\\
&=\|f_{m}\|_{2}^{1/m}
\left(\sum_{r\neq 0}\frac{1}{|r|^{(2m-1)/(4m-1)}}\right)^{(2m-1)/(2m)}
<\infty
\end{align*}
By Theorem~\ref{T,Hausdorff up Fourier}, it follows that
$\supp\mu$ has Hausdorff dimension at least $1/(4m)$.
\end{proof}
Thus we may think of the measures in Theorem~\ref{T;Winter}
as rather thin.
\section{Thick Wiener--Wintner measures}
In ~\cite{WW}, Wiener and Wintner
constructed a singular measure $\mu$ whose convolution
square $\mu*\mu$ was an $L^{1}$ function.
In~\cite{Saeki}, Saeki constructed
a singular measure $\mu$ whose convolution
square $\mu*\mu$ was continuous. However, there are strong
constraints on how smooth $\mu*\mu$ can be depending
on the nature of the support of $\mu$.
\begin{definition}
If $0<\alpha\leq 1$, we
say that function $f:{\mathbb T}\rightarrow{\mathbb C}$
lies in $\Lambda_{\alpha}$ the space of \emph{Lipschitz}
(or H{\"o}lder) $\alpha$ functions if
\[\sup_{t,h\in{\mathbb T},h\neq 0}
|h|^{-\alpha}|f(t+h)-f(t)|<\infty.\]
\end{definition}
\begin{exercise} Show that
\[\|f\|_{\alpha}=\|f\|_{\infty}+\sup_{t,h\in{\mathbb T},h\neq 0}
|h|^{-\alpha}|f(t+h)-f(t)|\]
defines a complete norm on $\Lambda_{\alpha}$.
\end{exercise}
Using Theorem~\ref{T,Hausdorff up Fourier}
and another result from Fourier analysis, we can relate
the Hausdorff dimension of $\supp\mu$ with the
possible Lipschitz smoothness of $\mu*\mu$.
\begin{lemma}\label{L;upper}
(i) There is a constant $C$ with the following property.
If $f:{\mathbb T}\rightarrow{\mathbb C}$ is
Lischitz $\beta$, then
\[\sum_{n\leq |k|\leq 2n-1}|\hat{f}(k)|\leq C\|f\|_{\beta}
n^{(1-2\beta)/2}\]
(ii) If $\mu$ is a measure whose support
has Hausdorff dimension
$\alpha$
and $d(\mu*\mu)(t)=f(t)\,dt$ where $f$ is
Lipschitz $\beta$, then $\alpha-\tfrac{1}{2}\geq\beta$.
(iii) If $\mu$ is a measure whose support
has Hausdorff dimension
$\alpha$
and $d(\mu*\mu)(t)=f(t)\,dt$ where $f$ is
continuous, then $\alpha\geq\tfrac{1}{2}$.
\end{lemma}
\begin{proof} (i) If $h\in{\mathbb R}$, set
\[g_{h}(t)=f(t+h)-f(t-h).\]
We have
\[\hat{g}_{h}(k)=(2\sin kh)\hat{f}(k),\]
so, by Parseval's equality,
\begin{align*} 4\sum_{k=-\infty}^{\infty}|\sin kh|^{2}|\hat{f}(k)|^{2}
&= \sum_{k=-\infty}^{\infty}|\hat{g}_{h}(k)|^{2}
=\frac{1}{2\pi}\int_{\mathbb T}
|g_{h}(t)|^{2}\,dt\\
&=\frac{1}{2\pi}\int_{\mathbb T}
|f(t+h)-f(t-h)|^{2}\,dt\\
&\leq \frac{1}{2\pi}\int_{\mathbb T}
\|f\|_{\beta}^{2}(2h)^{\beta}\,dt
=\|f\|_{\beta}^{2}(2h)^{\beta}
\end{align*}
If we set $h=4\pi/n$ and observe that, with this choice,
\[|\sin kh|^{2}\geq 1/2\]
for all $n\leq |k|\leq 2n-1$, we obtain
\[2\sum_{n\leq |k|\leq 2n-1}|\hat{f}(k)|^{2}
\leq \|f\|_{\beta}^{2}(2h)^{2\beta}= \|f\|_{\beta}^{2}
(4\pi)^{2\beta}n^{-2\beta}\]
Schwarz's inequality now gives
\[\sum_{n\leq |k|\leq 2n-1}|\hat{f}(k)|
\leq \left(\sum_{n\leq |k|\leq 2n-1}1^{2}\right)^{1/2}
\left(\sum_{n\leq |k|\leq 2n-1}|\hat{f}(k)|^{2}\right)^{1/2}
\leq C\|f\|_{\beta}n^{(1-2\beta)/2}\]
for an appropriate constant $C$.
(ii) Since $f$ is Lipschitz $\beta$, it follows from part~(i)
\[\sum_{n\leq |k|\leq 2n-1}|\hat{f}(k)|\leq C_{1}n^{(1-2\beta)/2}\]
for some constant $C_{1}$ depending on $f$.
Since $|\hat{f}(k)|=|\hat{\mu}(k)|^{2}$, we have
\[\sum_{n\leq |k|\leq 2n-1}|\hat{\mu}(k)|^{2}\leq C_{1}n^{(1-2\beta)/2}\]
and so, if $\eta>0$,
\[\sum_{k=n}^{2n-1}\frac{|\hat{\mu}(k)|^{2}}{|k|^{1-\eta}}
\leq C_{2}n^{-(1+2\beta-2\eta)/2}\]
for all $n\geq 1$ and some constant $C_{2}$.
By Cauchy's condensation test,
\[\sum_{k\neq 0}\frac{|\hat{\mu}(k)|^{2}}{|k|^{1-\eta}}
\ \text{converges}\]
whenever $(1+2\beta)/2>\eta$.
We know, by Theorem~\ref{T,Hausdorff up Fourier},
that if $\sigma$ is a positive, non-zero measure with
\[\sum_{k\neq 0}\frac{|\hat{\sigma}(k)|^{2}}{|k|^{1-\eta}}
\ \text{convergent}\]
for some $0<\eta<1$, it follows that the Hausdorff
dimension of $\supp\mu$ must be at least $\eta$.
Thus the Hausdorff dimension of $\supp\mu$ must be
at least $\eta$ for each $\eta$ with $(1+2\beta)/2>\eta$.
We conclude that the Hausdorff dimension of $\supp\mu$ must be at
least $(1+2\beta)/2$.
(ii) This follows the proof of~(i) with $\beta=0$.
\end{proof}
We shall show that these constraints are best possible.
\begin{theorem}\label{T;main Hausdorff}
If $1>\alpha>1/2$, then there exists
a probability measure $\mu$ such that the Hausdorff dimension
of the support of $\mu$ is $\alpha$ and $d(\mu*\mu)(t)=f(t)\,dt$
where $f$ is Lipschitz $\alpha-\tfrac{1}{2}$.
\end{theorem}
(In~\cite{Wintner} I give appropriate versions of this result
for $\alpha=1$ and $\alpha=1/2$.)
The reader will probably be able to guess the framework
of our proof.
\begin{exercise}\label{E;Holder,Hausdorff}
If $1/2>\beta>0$, let ${\mathcal L}_{\beta}$
consist of the pairs $(E,\mu)$ where $E$ is compact and
$\mu$ is a probability measure with $\supp\mu\subseteq E$
and $d(\mu*\mu)(t)=f_{\mu}(t)\,dt$ where $f_{\mu}\in \Lambda_{\beta}$.
If $(E,\mu),\,(F,\sigma)\in \Lambda_{\beta}$
with $d(\mu*\mu)(t)=f_{\mu}(t)\,dt$
and $d(\sigma*\sigma)(t)=f_{\mu}(t)\,dt$, let
\[d_{\beta}\big((E,\mu),(F,\sigma)\big)
=d_{\mathcal H}(E,F)+\sup_{r\in{\mathbb Z}}|\hat{\mu}(r)-\hat{\sigma}(r)|
+\|f-g\|_{\beta}.\]
Show that $({\mathcal L}_{\beta},d_{\beta})$ is a complete
metric space.
\end{exercise}
We shall prove the Baire category version of
Theorem~\ref{T;main Hausdorff} for the space
$({\mathcal L}_{\beta},d_{\beta})$
defined in Exercise~\ref{E;Holder,Hausdorff}.
\begin{theorem}\label{T;Baire Hausdorff}
If $1>\alpha>1/2$ and $\beta=\alpha-\tfrac{1}{2}$,
then quasi-all $(E,\mu)\in{\mathcal L}_{\beta}$
are such that $E$ has Hausdorff dimension $\alpha$.
\end{theorem}
As usual, we can reduce this result to a simpler
one.
\begin{lemma}\label{L;small Hausdorff}
Let ${\mathcal H}_{\alpha,n}$ be the subset
of consisting of those
$(E,\mu)\in {\mathcal L}_{\beta}$
such that we can find a finite collection of intervals
${\mathcal I}$ with
\[\bigcup_{I\in{\mathcal I}}I\supseteq E
\ \text{and}
\ \sum_{I\in{\mathcal I}}|I|^{\alpha+1/n}<1/n.\]
Then ${\mathcal H}_{\alpha,n}$ is open
and dense in $({\mathcal L}_{\beta},d_{\beta})$.
\end{lemma}
\begin{exercise}\label{E;prepare Hausdorff 1}
(i) Deduce Theorem~\ref{T;Baire Hausdorff}
from Lemma~\ref{L;small Hausdorff}.
(ii) Show that, using the notation
of Lemma~\ref{L;small Hausdorff}, ${\mathcal H}_{\alpha,n}$
is open in $({\mathcal L}_{\beta},d_{\beta})$.
\end{exercise}
Let us write ${\mathcal L}_{S,\beta}$ for the set
of $(E,\mu)\in {\mathcal L}_{\beta}$ with $f_{\mu}$
infinitely differentiable.
\begin{exercise}\label{E;prepare Hausdorff 2}
Show, by our usual method of convolving with
a suitable $K_{n}$, or otherwise, that,
given $(F,\sigma)\in{\mathcal L}_{\beta}$
and $\epsilon>0$, we can find an
$(E,\mu)\in{\mathcal L}_{S,\beta}$
with $d_{\beta}\big((F,\sigma),(E,\mu)\big),\epsilon$.
\end{exercise}
\begin{exercise} Explain why Exercises~\ref{E;prepare Hausdorff 1}~(ii)
and~\ref{E;prepare Hausdorff 2} enable us to reduce the
proof of Lemma~\ref{L;small Hausdorff} to the proof
of the next lemma (Lemma~\ref{L;dense Hausdorff}).
\end{exercise}
\begin{lemma}\label{L;dense Hausdorff} Let
Let $1>\alpha>1/2$ and $\beta=\alpha-\tfrac{1}{2}$,
Given $(F,\sigma)\in{\mathcal L}_{S,\beta}$
and $\epsilon>0$, we can find an $(E,\mu)\in H_{\alpha,n}$
with $d_{\beta}\big((F,\sigma),(E,\mu)\big)\leq\epsilon$.
\end{lemma}
Of course, Lemma~\ref{L;dense Hausdorff} is the heart
of the matter. The next two sections are devoted to its proof.
\section{More probability} The proof of Lemma~\ref{L;dense Hausdorff}
depends on the following central step.
\begin{lemma}\label{L;key}
If $1>\gamma>\kappa>0$ and $\epsilon>0$,
there exist an $M(\alpha,\gamma)$
and $n_{0}(\kappa,\gamma)\geq 1$ with
the following property.
If $n\geq n_{0}$, $n$ is odd and
$n^{\kappa}\geq N>n^{\kappa}-1$
we can find $N$ points
\[x_{j}\in\{r/n\,:\,r\in{\mathbb Z}\}\]
(not necessarily distinct) such that, writing
\[\mu=N^{-1}\sum_{j=1}^{N}\delta_{x_{j}}\]
we have
\[|\mu*\mu(\{k/n\})-n^{-1}|\leq n^{\gamma-1/2} \]
and
\[\mu(\{k/n\})\leq \frac{M(\kappa)}{N}\]
for all $1\leq k\leq n$.
\end{lemma}
Since any event with positive probability must
have at least one instance, Lemma~\ref{L;key}
follows from its probabilistic version.
\begin{lemma}\label{L;key probability}
If $1>\gamma>\kappa>0$,
there exist an $M(\kappa)$
and $n_{0}(\kappa,\gamma)\geq 1$ with
the following property.
Suppose $n\geq n_{0}$, $n$ is odd,
$n^{\kappa}\geq N>n^{\kappa}-1$
and
$X_{1}$, $X_{2}$, \dots, $X_{N}$ are independent random variables
each uniformly distributed on
\[\Gamma_{n}=\{r/n\in{\mathbb T}\,:\,1\leq r\leq n\}.\]
Then, if we write,
$\sigma=N^{-1}\sum_{j=1}^{N}\delta_{X_{j}}$, we have
\[|\sigma*\sigma(\{k/n\})-n^{-1}|\leq
n^{\gamma-1/2} \]
and
\[\sigma(\{k/n\})\leq \frac{M(\kappa)}{N}\]
for all $1\leq k\leq n$
with probability at least $1/2$
\end{lemma}
\begin{exercise} How do you expect $\sigma*\sigma(\{k/n\}$
to behave, assuming that the random variables $X_{j}+X_{k}$
$[j\leq k]$ behave as though they are independent?
Are they in fact independent? Why?
\end{exercise}
The reader may be inclined to ask three questions.
(1) Why do we take $n$ odd? This is merely a technical convenience.
It will be helpful to know that $(k/n)+(k/n)=0$ only if $k/n=0$.
(2) Why do we distribute the $X_{j}$ uniformly on the $n$th roots of unity
rather than uniformly over ${\mathbb T}$? I think (though I have not checked
the details) that the arguments would transfer but (at least for me)
the details seem messier.
(3) Can we strengthen Lemma~\ref{L;key} somewhat? Yes we can
(see~\cite{Wintner}), but (at least in my argument) we lose
any extra sharpness when we come to the proof of
Lemma~\ref{L;dense Hausdorff 2}.
We start our proof of Lemma~\ref{L;key probability}
with a simple observation.
\begin{lemma}\label{L;near Poisson}
Suppose that $0\kappa>0$,
there exists an $M(\kappa)$
with
the following property.
If
$n^{\kappa}\geq N>n^{\kappa}-1$
and
$X_{1}$, $X_{2}$, \dots, $X_{N}$ are independent random variables
each uniformly distributed on
\[\Gamma_{n}=\{r/n\in{\mathbb T}\,:\,1\leq r\leq n\}.\]
Then, if we write
$\sigma=N^{-1}\sum_{j=1}^{N}\delta_{X_{j}}$, we have
\[\sigma(\{k/n\})\leq \frac{M(\kappa)}{N}\]
for all $1\leq k\leq n$.
with probability at least $1-(4n^{2})^{-1}$
\end{lemma}
\begin{proof} Take $M(\kappa)=3(1-\kappa)^{-1}$.
It is sufficient to look at the case when $n^{\kappa}\geq 8$
and so $N\geq 2$. Fix $r$ for the time being and set
\[Y_{j}=\delta_{X_{j}}(\{r/n\}).\]
We observe that
$Y_{1}$, $Y_{2}$, \dots, $Y_{N}$ are independent random variables
with
\[\Pr(Y_{j}=1)=n^{-1},\ \Pr(Y_{j}=0)=1-n^{-1}.\]
By Lemma~\ref{L;near Poisson}, it follows that
\begin{align*}
\Pr\bigg(\sum_{j=1}^{N}&\delta_{X_{j}}(\{r/n\})
\geq M(\kappa)\bigg)
=\Pr\bigg(\sum_{j=1}^{N}Y_{j}\geq M(\kappa)\bigg)\\
&\leq \frac{2(Nn^{-1})^{M(\kappa)}}{M(\kappa)!}
\leq 2n^{-(1-\kappa)M(\kappa)}\leq 2n^{-3}<\frac{1}{4n^{2}}.
\end{align*}
Thus
\[
\Pr\bigg(\sum_{j=1}^{N}\delta_{X_{j}}(\{r/n\})
\geq M(\kappa)\ \text{for some $0\leq r\leq n-1$}\bigg)
0$ and let
$W_{r}$ be a martingale
with respect to a sequence $X_{r}$ of random variables.
Write $Y_{r+1}=W_{r+1}-W_{r}$. Suppose that
\[{\mathbb E}(e^{\lambda |Y_{r+1}|}\,|\,X_{0},\,X_{1},\dots X_{r})
\leq e^{a_{r+1}\lambda^{2}/2}\]
for all $0<\lambda<\delta$ and some $a_{r+1}\geq 0$.
Then
\[{\mathbb E}(e^{\lambda (W_{N}-W_{0})})
\leq e^{A\lambda^{2}/2}\]
where $A=\sum_{r=1}^{N}a_{r}$.
(ii) Suppose $W$ is a a random variable
with
\[{\mathbb E}(e^{\lambda W})\leq e^{A\lambda^{2}/2}\]
for all $0<\lambda<\delta$.
Then, provided that $0\leq x<\delta A$,
we have
\[\Pr\big(|Y|\geq x\big)\leq
2\exp(-x^{2}/A).\]
\end{lemma}
\begin{proof} (i) Observe that, if $0<\lambda<\delta$,
\[{\mathbb E}(e^{\lambda (W_{N}-W_{0})})
={\mathbb E}(e^{\lambda (Y_{1}+Y_{2}+\dots+Y_{N})})
\leq {\mathbb E}\prod_{r=1}^{N}(e^{\lambda |Y_{r}|})
\leq \prod_{r=1}^{N}e^{a_{r}\lambda^{2}/2}
=e^{A\lambda^{2}/2}\]
where $A=\sum_{r=1}^{N}a_{r}$.
(ii) Use the argument of Lemma~\ref{L;Bernstein}.
\end{proof}
We can now embark on the proof of Lemma~\ref{L;key probability}.
\begin{proof}[Proof of Lemma~\ref{L;key probability}]
Let $M(\kappa)$ be as in Lemma~\ref{L;not many}.
Fix $r$ for the time being and define $Y_{1}$,
$Y_{2}$, \dots, $Y_{N}$ as follows.
If $\sum_{v=1}^{j-1}\delta_{X_{v}}(\{u/n\})\gamma>\kappa>0$,
there exist an $M(\kappa)$
and $n_{0}(\kappa,\gamma)\geq 1$ with
the following property.
Suppose $n\geq n_{0}$, $n$ is odd and
$n^{\kappa}\geq N>n^{\kappa}-1$.
Then
we can find
\[x_{j}\in\{r/n:r\in{\mathbb Z}\}\]
(not necessarily distinct) such that, writing
\[g=\frac{n}{N}\sum_{j=1}^{N}
{\mathbb I}_{[x_{j}-(2n)^{-1},x_{j}+(2n)^{-1})},\]
we have $g*g$ continuous and
(i) $\|g*g-1\|_{\infty}\leq n^{1/2-\gamma}$,
(ii) $|h|^{-1}|g*g(t+h)-g*g(t)|\leq
2n^{3/2-\gamma}$ for
all $t,h\in{\mathbb T}$, $h\neq 0$,
(iii) $|g(t)|\leq M(\gamma)n^{1-\kappa}+1$ for
all $t\in{\mathbb T}$.
\end{lemma}
\begin{proof} Let $x_{j}$ and $\mu$ be as in Lemma~\ref{L;key}.
Then $g=\mu*n{\mathbb I}_{[-(2n)^{-1},(2n)^{-1})}$
and so
\[g*g=\mu*\mu*
\big(n{\mathbb I}_{[-(2n)^{-1},(2n)^{-1})}
*n{\mathbb I}_{[-(2n)^{-1},(2n)^{-1})}\big)
=\mu*\mu*n^{2}\triangle_{n}\]
where
\[\triangle_{n}=\max(0,1-n|x|).\]
Thus $g*g$ is the simplest piecewise linear function
with
\[g*g(r/n)=n\mu*\mu(\{r/n\}).\]
By inspection, $g*g$ is continuous everywhere
and linear on
each interval $[r/n,(r+1)/n]$. Since
\[|g*g(r/n)-1|\leq n^{-\gamma+1/2}.\]
Conclusions (i) to (iii) follow at once.
\end{proof}
Condition~(iii) is not very important, but it is helpful
to have some bound on $\|g\|_{\infty}$.
We now smooth $g$ by convolving with a suitable function.
\begin{lemma}\label{L;smooth key}
If $1>\gamma>\kappa>0$,
there exist an $M_{1}(\kappa)$
and $n_{0}(\kappa,\gamma)\geq 1$ with
the following property.
Suppose $n\geq n_{0}$ and
$n^{\kappa}\geq N>n^{\kappa}-1$.
If $n\geq n_{0}(\kappa,\beta)$ and
$n^{\gamma}\geq N$,
we can find a
positive infinitely differentiable function $f$
such that
(i) $\|f*f-1\|_{\infty}\leq n^{1/2-\gamma}$
.
(ii) $\|(f*f)'\|_{\infty}\leq
n^{3/2-\gamma}$.
(iii) $\|f\|_\infty\leq M_{1}n^{1-\kappa}$.
(iv) $\|f'\|_{\infty}\leq M_{1}n^{2-\kappa}$.
(v) $\int_{\mathbb T}f(t)\,dt=1$.
(vi) $\supp f$ can be covered by $n^{\kappa}$ intervals of length
$2/n$.
\end{lemma}
\begin{proof} (By considering $1>\gamma>\gamma'>\kappa'>\kappa>0$
if necessary we can now drop the restriction $n$ odd.)
The result follows by considering $g*K_{n}$ where $g$
is chosen as in Lemma~\ref{L;linear key}
and $K_{n}$ as in Exercise~\ref{E;approximate unit}.
\end{proof}\begin{lemma}\label{L;continuous key start}
Suppose that
$\alpha-\tfrac{1}{2}>\beta>0$.
If $\epsilon>0$
there exists an $n_{1}(\alpha,\beta,\epsilon)\geq 1$ with
the following property.
If $n>n_{1}(\alpha,\beta,\epsilon)$
we can find a
positive infinitely differentiable function $f$
with the following properties.
(i) $\|f*f-1\|_{\infty}\leq\epsilon$.
(ii) $\|(f*f)'\|\leq \epsilon n^{1-\beta}$.
(iii) $\|f\|_\infty\leq \epsilon n$.
(iv) $\|f'\|_{\infty}\leq \epsilon n^{2}$.
(v) $\int_{\mathbb T}f(t)\,dt=1$.
(vi) $\supp f$ can be covered by less than
$\epsilon n^{\alpha}/2$
intervals
of length $2/n$.
(vii) $|h|^{\beta}|f*f(t+h)-f*f(t)|\leq \epsilon$ for
all $t,h\in{\mathbb T}$ with $h\neq 0$.
(viii) $|\hat{f}(r)|\leq\epsilon$ for all $r\neq 0$.
\end{lemma}
\begin{proof} Choose $\kappa=(2/3)\alpha+(1/3)(\beta+1/2)$
and $\gamma=(1/3)\alpha+(2/3)(\beta+1/2)$
and take $N=[n^{\kappa}]$.
Provided that $n$ is large enough,
Lemma~\ref{L;smooth key},
tells us that we can find a
positive infinitely differentiable function $f$
with the following properties.
(i) $\|f*f-1\|_{\infty}\leq
\epsilon n^{-\beta}/2$.
(ii) $\|(f*f)'\|\leq \epsilon n^{1-\beta}$.
(iii) $\|f\|_\infty\leq \epsilon n$.
(iv) $\|f'\|_{\infty}\leq \epsilon n^{2}$.
(v) $\int_{\mathbb T}f(t)\,dt=1$.
(vi) $\supp f$ can be covered by less than $\epsilon n^{\alpha}/2$
intervals of length
$2n^{-1}$.
By the mean value theorem, condition (ii)
gives
\[|h|^{-1}|f*f(t+h)-f*f(t)|\leq \epsilon n^{1-\beta}\]
for
all $t,h\in{\mathbb T}$ with $h\neq 0$.
In particular,
\begin{align*}
|h|^{-\beta}|f*f(t+h)-f*f(t)|&=|h|^{1-\beta}
|h|^{-1}|f*f(t+h)-f*f(t)|\\
&\leq \epsilon |h|^{1-\beta}n^{1-\beta}\leq\epsilon
\end{align*}
for $|h|\leq n^{-1}$. However, if $|h|\geq n^{-1}$,
then using (i),
\[
|h|^{-\beta}|f*f(t+h)-f*f(t)|\leq |h|^{-\beta}2\|f*f\|_{\infty}\\
\leq \epsilon |h|^{-\beta}n^{-\beta}\leq\epsilon.\]
\end{proof}
\begin{lemma}\label{L;continuous key}
Suppose that $\tfrac{1}{2}-\alpha>\beta>0$.
Then there there exists an integer $k_{0}(\alpha,\beta)$
such that, given any $\epsilon$,
there exists an $m_{1}(k,\alpha,\beta,\epsilon)\geq 1$ with
the following property.
If $m>m_{1}(k,\alpha,\beta,\epsilon)$,
we can find a
positive infinitely differentiable function $F$
which is periodic with period $1/m$ and obeys
the following conditions.
(i) $\|F*F-1\|_{\infty}\leq\epsilon$.
(ii) $\|(F*F)'\|\leq m^{k(1-\beta)}$.
(iii) $\|F\|_\infty\leq m^{k}$.
(iv) $\|F'\|_{\infty}\leq m^{2k+1}$.
(v) $\int_{\mathbb T}F(t)=1$.
(vi) We can find a finite collection of intervals ${\mathcal I}$
such that
\[\bigcup_{I\in{\mathcal I}}I\supseteq \supp F
\ \text{and}
\ \sum_{I\in{\mathcal I}}|I|^{\alpha}<\epsilon.\]
(vii) $|h|^{\beta}|F*F(t+h)-F*F(t)|\leq \epsilon$ for
all $t,\,h\in{\mathbb T}$ with $h\neq 0$.
(viii) $|\hat{F}(r)|\leq\epsilon$ for all $r\neq 0$.
\end{lemma}
\begin{proof} Let
\[\alpha_{1}=\frac{3}{4}\alpha+\frac{1}{4}\left(\beta+\frac{1}{2}\right),
\ \beta_{1}=\frac{1}{4}\left(\alpha-\frac{1}{2}\right)+
\frac{3}{4}\beta.\]
By Lemma~\ref{L;continuous key start} with $n=m^{k}$
we know that, provided only that $m$ is large enough,
we can find a
positive infinitely differentiable function $f$
with the following properties
(i$'$) $\|f*f-1\|_{\infty}\leq\epsilon$.
(ii$'$) $\|(f*f)'\|\leq \epsilon m^{k(1-\beta_{1})}$
(iii$'$) $\|f\|_\infty\leq \epsilon m^{k}$,
(iv$'$) $\|f'\|_{\infty}\leq \epsilon m^{2k}$
(v$'$) $\int_{\mathbb T}f(t)\,dt=1$.
(vi$'$) $\supp f$ can be covered by less than $m^{k\alpha}/2$
intervals
of length $2m^{-k}$.
(vii$'$) $|h|^{\beta_{1}}|f*f(t+h)-f*f(t)|\leq \epsilon$ for
all $t,h\in{\mathbb T}$ with $h\neq 0$.
(viii$'$) $|\hat{f}(r)|\leq\epsilon$ for all $r\neq 0$.
If we set $F(t)=f(mt)$, we see, at once, that $F$ is
positive infinitely differentiable function
such that
(i) $\|F*F-1\|_{\infty}\leq\epsilon$.
(ii) $\|(F*F)'\|\leq \epsilon m^{k(1-\beta_{1})+1}$
(iii) $\|F\|_\infty\leq \epsilon m^{k}$.
(iv) $\|F'\|_{\infty}\leq \epsilon m^{2k+1}$.
(v) $\int_{\mathbb T}F(t)\,dt=1$.
(vi) We can find a collection ${\mathcal I}$
of at most $m^{1+k\alpha_{1}}/2$
intervals
each of of length $m^{-k-1}$ such that
\[\bigcup_{I\in{\mathcal I}}I\supseteq \supp f.\]
Provided that $k$ is large enough (depending only on $\alpha$
and $\beta$) the result follows.
\end{proof}
We shall need some results on $\Lambda_{\beta}$ which are obtained
in much the same way as the corresponding results on differentiation.
\begin{exercise} If $f\in\Lambda_{\beta}$ let us write
\[\omega_{\beta}(f)=\sup_{t,h\in{\mathbb T},h\neq 0}|h|^{-\beta}
|f(t+h)-f(t)|\]
so that $\|f\|_{\beta}=\|f\|_{\infty}+\omega_{\beta}(f)$.
Prove the following results.
(i) If $f,\,g \Lambda_{\beta}$, then $f+g\in\Lambda_{\beta}$
and
\[\omega_{\beta}(fg)\leq
\omega_{\beta}(f)\|g\|_{\infty}+\omega_{\beta}(g)\|f\|_{\infty}.\]
(ii) If $f\in \Lambda_{\beta}$ and $g\in L^{1}({\mathbb T})$, then
$f*g\in \Lambda_{\beta}$ and
\[\omega_{\beta}(f*g)\leq
\omega_{\beta}(f)\|g\|_{1}.\]
(iii) If $f:{\mathbb T}\rightarrow{\mathbb C}$ has continuous derivative,
then $f\in\Lambda_{\beta}$ and
\[\omega_{\beta}(f)\leq\|f'\|_{\infty}.\]
\end{exercise}
We now prove Lemma~\ref{L;dense Hausdorff}
and conclude the proof. In fact we shall prove
the result a slightly more concrete form.
\begin{lemma}\label{L;dense Hausdorff 2} Let
Let $1>\alpha>1/2$ and $\beta=\alpha-\tfrac{1}{2}$.
Suppose that
$g:{\mathbb T}\rightarrow{\mathbb R}$
is an infinitely positive differentiable function
with
\[\int_{\mathbb T}g(t)\,dt=1\]
and $H$ is closed set with $H\supseteq\supp g$.
Then, given $\epsilon>0$, we can find
an infinitely differentiable
positive function $f:{\mathbb T}\rightarrow{\mathbb R}$
with
\[\int_{\mathbb T}f(t)\,dt=1\]
and a closed set $E\supseteq\supp f$ such that,
writing $d\sigma(t)=g(t)\,dt$, $d\mu(t)=f(t)\,dt$,
we have
$(E,\mu)\in{\mathcal H}_{n}$ and
\[d_{\beta}\big((E,\mu),(H,\sigma)\big)<\epsilon.\]
\end{lemma}
\begin{proof}
Since ${\mathcal H}_{n}\supseteq{\mathcal H}_{n+1}$,
we may restrict ourselves to the case when $\alpha+1/n<1$.
Lemma~\ref{L;continuous key} tells us that we can find a positive
integer $k$ with the property described in the next sentence.
Let $\eta>0$, then, when $m$ is sufficiently large,
we can find a
positive infinitely differentiable function $F_{m}$
which is periodic with period $1/(2m+1)$ satisfying
the following conditions.
(The result corresponding to (ii)$_{m}$ is not required.)
(i)$_{m}$ $\|F_{m}*F_{m}-1\|_{\infty}\leq\eta$.
(iii)$_{m}$ $\|F_{m}\|_\infty\leq 4^{2k}m^{2k}$.
(iv)$_{m}$ $\|F_{m}'\|_{\infty}\leq 4^{2k+1} m^{2k+1}$.
(v)$_{m}$ $\int_{\mathbb T}F_{m}(t)\,dt=1$.
(vi)$_{m}$ We can find a finite collection of intervals ${\mathcal I}_{m}$
such that
\[\bigcup_{I\in{\mathcal I}_{m}}I\supseteq \supp F_{m}
\ \text{and}
\ \sum_{I\in{\mathcal I}_{m}}|I|^{\alpha+1/n}<\frac{1}{n}.\]
(vii)$_{m}$ $\omega_{\psi}(F_{m}*F_{m})\leq \eta$.
(viii)$_{m}$ $|\hat{F}_{m}(r)|\leq\eta$ for all $r\neq 0$.
Since $g$ is infinitely differentiable, repeated integration
by parts shows that there exists a constant $C_{1}$
such that
\[|\hat{g}(r)|\leq C_{1}|r|^{-(2k+4)}\]
for $r\neq 0$
and so there exists a constant $C$ such that
\[\tag*{$\bigstar$}\sum_{|r|\geq m}|r||\hat{g}(r)|\leq C|m|^{-(2k+2)}\]
for all $m\geq 1$.
If we set $G_{m}(t)=g(t)F_{m}(t)$ and
\[f(t)=\left(\int_{\mathbb T}G_{m}(s)\,ds\right)^{-1}G_{m}(t),\]
then, automatically,
\[\supp f\subseteq E\cap\supp F_{m}.\]
Thus, by choosing an appropriate finite set $A$
and setting $H=A\cup \supp f$, we can ensure that
$(E,\mu)\in{\mathcal L}_{\beta}$,
\[d_{\beta}\big((E,\mu),(H,\sigma)\big)<\epsilon/4.\]
and we can find a finite collection of intervals ${\mathcal I}$
such that
\[\bigcup_{I\in{\mathcal I}_{m}}I\supseteq E
\ \text{and}
\sum_{I\in{\mathcal I}_{m}}|I|^{\alpha+1/n}<\frac{1}{n}.\]
We have shown that (setting $d\mu(t)=f(t)\,dt$) $(E,\mu)\in{\mathcal H}_{n}$
and all we need to do is to show that, for
appropriate choices of $\eta$ and $m$ we have
\[\sup_{r\in{\mathbb Z}}|\hat{f}(r)-\hat{g}(r)|<\epsilon/4,
\ \|f*f-g*g\|_{\infty}<\epsilon/4
\ \text{and}\ \omega_{\beta}(f*f-g*g)<\epsilon/4.\]
Without loss of generality we may suppose $\epsilon<1$,
so simple calculations show that it is sufficient
to prove
\[\sup_{r\in{\mathbb Z}}|\hat{g}(r)-\hat{G}_{m}(r)|<\epsilon/8,
\ \|f*f-G_{m}*G_{m}\|_{\infty}<\epsilon/8
\ \text{and}\ \omega_{\beta}(f*f-g*g)<\epsilon/8.\]
Using $(vii)_{m}$, we have
\begin{align*}
|\hat{g}(r)-\hat{G}_{m}(r)|&=
\left|\hat{g}(r)-\sum_{j=-\infty}^{\infty}
\hat{g}(r-j)\hat{F}_{m}(j)\right|\\
&=\left|\sum_{u\neq 0}
\hat{g}(r-u)\hat{F}_{m}(u)\right|
\leq \sum_{u\neq 0}|\hat{g}(r-u)||\hat{F}_{m}(u)|\\
&\leq \sum_{u\neq 0}|\hat{g}(r-u)|\eta
\leq \eta\sum_{j=-\infty}^{\infty}|\hat{g}(j)|<\epsilon/8
\end{align*}
for all $r$ provided only that $\eta$ is small enough.
We now fix $\eta$ once and for all so that the inequality
just stated holds and
\[\eta\big((1+\|g\|_{\infty})^{2}
+\omega_{\beta}(g*g)+2)<\epsilon/12\]
but
leave $m$ free.
We have now arrived at the central estimates
of the proof which show that
\[\|g*g-G_{m}*G_{m}\|_{\infty}<\epsilon/8
\ \text{and}\ \omega_{\beta}(g*g-G_{m}*G_{m})<\epsilon/8,\]
provided only that $m$ is large enough.
The proofs of the two inequalities are similar.
We start with the first which is slightly easier.
We write
\[P_{m}(t)=\sum_{|r|\leq m}\hat{g}(r)\exp(irt)
\ \text{and}
\ g_{m}(t)=g(t)-P_{m}(t).\]
By $\bigstar$, we see that, if $m\geq 1$,
\[\tag*{$\bigstar\bigstar$}
\|g-P_{m}\|_{\infty},\|g'-P_{m}'\|_{\infty}\leq C|m|^{-(2k+2)}.\]
We shall take $m$ sufficiently large that
\[
\|g-P_{m}\|_{\infty},\|g'-P_{m}'\|_{\infty}\leq 1.\]
Now since $F_{m}$ is periodic with period $1/(2m+1)$
and $P_{m}$ is a trigonometric polynomial of degree at most $m$,
\[\widehat{P_{m}F_{m}}\big((2m+1)u+v\big)
=\hat{F}_{m}((2m+1)u\big)\hat{P}_{j}(v)\]
for all $u$ and $v$
so that
\begin{align*}
\big((P_{m}F_{m})*(P_{m}F_{m})\big)\hat{\ }&\big((2m+1)u+v\big)
=\big(\hat{F}_{m}\big((2m+1)u\big)\hat{P}_{j}(v)\big)^{2}\\
&=\bigg((P_{m}F_{m})\hat{\ }\big((2m+1)u+v\big)\bigg)^{2}\\
&=\big((P_{m}*P_{m})(F_{m}*F_{m})\big)\hat{\ }((2m+1)u+v\big)
\end{align*}
and
\[(P_{m}F_{m})*(P_{m}F_{m})(t)=(P_{m}*P_{m})(t)(F_{m}*F_{m})(t).\]
Using this equality, we obtain
\begin{align*}
\|g*g&-G_{m}*G_{m}\|_{\infty}
=\|g*g-(gF_{m})*(gF_{m})\|_{\infty}\\
&\leq\|g*g-P_{m}*P_{m}\|_{\infty}
+\|P_{m}*P_{m}-(P_{m}F_{m})*(P_{m}F_{m})\|_{\infty}\\
&\qquad\qquad+\|(P_{m}F_{m})*(P_{m}F_{m})-(gF_{m})*(gF_{m})\|_{\infty}\\
&=\|g*g-P_{m}*P_{m}\|_{\infty}
+\|P_{m}*P_{m}-(P_{m}*P_{m})(F_{m}*F_{m})\|_{\infty}\\
&\qquad\qquad+\|(P_{m}F_{m})*(P_{m}F_{m})-(gF_{m})*(gF_{m})\|_{\infty}.
\end{align*}
We estimate the three terms separately.
First we observe that
\begin{align*}
\|g*g-P_{m}*P_{m}\|_{\infty}
&=\|(g-P_{m})*(g-P_{m})+2(g-P_{m})*P_{m}\|_{\infty}\\
&\leq \|(g-P_{m})*(g-P_{m})\|_{\infty}+2\|(g-P_{m})*P_{m}\|_{\infty}\\
&\leq \|g-P_{m}\|_{\infty}^{2}+ 2\|g-P_{m}\|\|P_{m}\|_{\infty}\\
&\leq \|g-P_{m}\|_{\infty}^{2}+ 2\|g-P_{m}\|(1+\|g\|_{\infty})
<\epsilon/12,
\end{align*}
provided only that $m$ is large enough.
Next we observe that
\begin{align*}
\|(&P_{m}F_{m})*(P_{m}F_{m})-(gF_{m})*(gF_{m})\|_{\infty}\\
&\leq
\big\|\big((g-P_{m})F_{m}\big)*\big((g-P_{m})F_{m}\big)\|_{\infty}
+2\big\|\big((g-P_{m})F_{m}\big)*(gP_{m})\|_{\infty}\\
&\leq \|(g-P_{m})F_{m}\|_{\infty}^{2}
+\|(g-P_{m})F_{m}\|_{\infty}\|gP_{m}\|_{\infty}\\
&\leq (\|(g-P_{m})\|_{\infty}\|F_{m}\|_{\infty})^{2}
+\|g-P_{m}\|_{\infty}\|F_{m}\|_{\infty}\|g\|_{\infty}\|P_{m}\|_{\infty}\\
&\leq (\|(g-P_{m})\|_{\infty}\|F_{m}\|_{\infty})^{2}
+\|g-P_{m}\|_{\infty}\|F_{m}\|_{\infty}\|g\|_{\infty}(1+\|g\|_{\infty})\\
&\leq \left(\frac{C}{m^{2k+2}}4^{k}m^{k}\right)^{2}
+\frac{C}{m^{2k+2}}4^{k}m^{k}\|g\|_{\infty}(1+\|g\|_{\infty})
<\epsilon/12,
\end{align*}
provided only that $m$ is large enough.
Finally we note that
\begin{align*}
\|P_{m}*P_{m}-(P_{m}*P_{m})(F_{m}*F_{m})\|_{\infty}
&=\|(P_{m}*P_{m})(1-(F_{m}*F_{m})\|_{\infty}\\
&=\|P_{m}*P_{m}\|_{\infty}\|(1-(F_{m}*F_{m})\|_{\infty}\\
&\leq \|P_{m}\|_{\infty}^{2}\|(1-(F_{m}*F_{m})\|_{\infty}\\
&\leq (1+\|g\|_{\infty})^{2}\eta<\epsilon/12.
\end{align*}
Combining our estimates we obtain
\[\|g*g-G_{m}*G_{m}\|_{\infty}<\epsilon/4\]
as required.
We turn now to the second inequality. Much as before,
\begin{align*}
\omega_{\beta}(g*g&-G_{m}*G_{m})\\
&=\omega_{\beta}(g*g-P_{m}*P_{m})
+\omega_{\beta}\big(P_{m}*P_{m}-(P_{m}*P_{m})(F_{m}*F_{m})\big)\\
&\qquad\qquad+
\omega_{\beta}\big(P_{m}F_{m})*(P_{m}F_{m})-(gF_{m})*(gF_{m})\big).
\end{align*}
We bound the first term.
\begin{align*}
\omega_{\beta}(g*g-&P_{m}*P_{m})
\leq \omega_{\beta}\big((g-P_{m})*(g-P_{m})\big)+
2\omega_{\beta}\big((g-P_{m})*P_{m}\big)\\
&\leq \big\|\big(((g-P_{m})*(g-P_{m})\big)'\big\|_{\infty}
+2\big\|\big((g-P_{m})*P_{m}\big)'\big\|_{\infty}\\
&=\|(g-P_{m})'*(g-P_{m})\|_{\infty}
+2\|(g-P_{m})'*P_{m}\|_{\infty}\\
&\leq \|(g-P_{m})'\|_{\infty}\|g-P_{m}\|_{\infty}
+ 2\|(g-P_{m})'\|_{\infty}\|P_{m}\|_{\infty}\\
&\leq \|(g-P_{m})'\|_{\infty}\|g-P_{m}\|_{\infty}
+ 2\|(g-P_{m})'\|_{\infty}(1+\|g\|_{\infty})
<\epsilon/12,
\end{align*}
provided only that $m$ is large enough.
Next we bound the third term.
\begin{align*}
&\omega_{\beta}\big((P_{m}F_{m})*(P_{m}F_{m})-(gF_{m})*(gF_{m})\big)\\
&\leq
\omega_{\beta}\bigg(\big((g-P_{m})F_{m}\big)*\big((g-P_{m})F_{m}\big)\bigg)
+2\omega_{\beta}\bigg(\big((g-P_{m})F_{m}\big)*(gP_{m})\bigg)\\
&\leq\big\|\big(\big((g-P_{m})F_{m}\big)
*\big((g-P_{m})F_{m}\big)\big)'\big\|_{\infty}
+2\big\|\big((g-P_{m})F_{m}\big)
*(gP_{m})\big)'\big\|_{\infty}\\
&=\|\big((g-P_{m})F_{m}\big)'*\big((g-P_{m})F_{m}\big)\|
+2\big\|\big((g-P_{m})F_{m}\big)'
*(gP_{m})\big\|_{\infty}\\
&\leq\|((g-P_{m})F_{m})'\|_{\infty}\|(g-P_{m})F_{m}\|_{\infty}
+2\|((g-P_{m})F_{m})'\|_{\infty}\|gP_{m}\|_{\infty}\\
&\leq (\|(g-P_{m})'\|_{\infty}\|F_{m}\|_{\infty}
+\|g-P_{m}\|_{\infty}\|F_{m}'\|_{\infty})
\|g-P_{m}\|_{\infty}\|F_{m}\|_{\infty}\\
&\qquad+2(\|(g-P_{m})'\|_{\infty}\|F_{m}\|_{\infty}
+\|g-P_{m}\|_{\infty}\|F_{m}'\|_{\infty})
\|g\|_{\infty}\|P_{m}\|_{\infty}\\
&\leq\left(
\frac{C}{m^{2k+2}}4^{2k}m^{2k}+\frac{C}{m^{2k+2}}4^{2k+1}m^{2k+1}\right)
\frac{C}{m^{2k+2}}4^{2k}m^{2k}\\
&\qquad\qquad+2
\left(
\frac{C}{m^{2k+2}}4^{2k}m^{2k}+\frac{C}{m^{2k+2}}4^{2k+1}m^{2k+1}\right)
\|g\|_{\infty}(1+\|g\|_{\infty})\\
&<\frac{\epsilon}{12},
\end{align*}
provided only that $m$ is large enough.
Finally we estimate the second term
\begin{align*}
\omega_{\beta}&\big(P_{m}*P_{m}-(P_{m}*P_{m})(F_{m}*F_{m})\big)
=(\omega_{\beta}\big(P_{m}*P_{m})(1-(F_{m}*F_{m})\big)\\
&\leq \|P_{m}*P_{m}\|_{\infty}\omega_{\beta}\big((1-(F_{m}*F_{m})\big)
+\omega_{\beta}(P_{m}*P_{m})\|1-F_{m}*F_{m}\|_{\infty}\\
&\leq \|P_{m}*P_{m}\|_{\infty}\omega_{\beta}(F_{m}*F_{m})
+\omega_{\beta}(P_{m}*P_{m})\|1-F_{m}*F_{m}\|_{\infty}\\
&\leq \|P_{m}*P_{m}\|_{\infty}\eta+\omega_{\beta}(P_{m}*P_{m})\eta.
\end{align*}
Estimates of a familiar kind show that
\begin{align*}
\omega_{\beta}(P_{m}*P_{m})
&\leq \omega_{\beta}(g*g)+\omega_{\beta}(P_{m}*P_{m}-g*g)\\
&\leq \omega_{\beta}(g*g)+\omega_{\beta}\big((P_{m}-g)*(P_{m}-g)\big)
+2\omega_{\beta}\big((P_{m}-g)*g\big)\\
&\leq \omega_{\beta}(g*g)+\|(g-P_{m})'\|_{\infty}\|g-P_{m}\|_{\infty}
+\|(g-P_{m})'\|_{\infty}\|g\|_{\infty}\\
&\leq \omega_{\beta}(g*g)+1
\end{align*}
and, similarly,
\[\|P_{m}*P_{m}\|_{\infty}\leq\|g*g\|_{\infty}+1
\leq \|g\|_{\infty}^{2}+1,\]
provided only that $m$ is large enough.
Thus
\begin{align*}
\omega_{\beta}&\big(P_{m}*P_{m}-(P_{m}*P_{m})(F_{m}*F_{m})\big)
&\leq (\omega_{\beta}(g*g)+\|g\|_{\infty}^{2}+2)\eta
<\epsilon/12,
\end{align*}
provided only that $m$ is large enough.
Combining our estimates we obtain
\[\omega_{\beta}(g*g-G_{m}*G_{m})<\epsilon/4\]
and this completes the proof.
\end{proof}
The estimates which occupy the last two pages of the previous proof
are nowhere delicate and I strongly suspect that there is a
much shorter direct argument which does not use Fourier series.
\section{Hausdorff dimension and sums} We have concluded the
main business
of these notes, but I cannot resist including one further
result on
sums and Hausdorff dimension.
\begin{theorem}\label{T;main sum}
Given a sequence $\alpha_{j}$ with
$0\leq\alpha_{j}\leq\alpha_{j+1}<1$, we can find
a closed set $E$ such that
\[E_{[j]}=\underbrace{E+E+\ldots+E}_{j}\]
has
Hausdorff dimension $\alpha_{j}$ for each $j\geq 1$.
\end{theorem}
(See also Exercise~\ref{E:one dimension}.)
We shall use Theorem~\ref{T,Hausdorff up} which the reader
is invited to reread together with
a couple of elementary observation.
\begin{lemma}\label{L;holes} Let $1>\beta>\alpha\geq 0$.
Let $g$ be a piecewise continuous positive
function. If we define $g_{n}$, for $n^{1-(1/\beta)}\geq 2$,
by the conditions
\[g_{n}(x)=
\begin{cases}a_{r,n}&\text{if $|x-rn^{-1}|\leq n^{-1/\beta}$,
$r\in{\mathbb Z}$},\\
0&\text{otherwise,}
\end{cases}\]
where
\[a_{r,n}=\int_{(r-1/2)/n}^{(r+1/2)/n}g(x)\,dx,\]
then
\[\iint_{{\mathbb T}^{2}}\frac{g_{n}(x)g_{n}(y)}
{|x-y|^{\alpha}}\,dx\,dy
\rightarrow \iint_{{\mathbb T}^{2}} \frac{g(x)g(y)}
{|x-y|^{\alpha}}\,dx\,dy\]
as $n\rightarrow \infty$.
\end{lemma}
\begin{proof} We show that, in fact,
\[\int_{\mathbb T}
\frac{g_{n}(x)}{|x-y|^{\alpha}}\,dx\rightarrow
\int_{\mathbb T} \frac{g(x)}{|x-y|^{\alpha}}\,dx,\]
uniformly in $y$.
To this end, observe that, if $10^{-1}>\delta>0$,
\[\int_{|x-y|\geq \delta}
\frac{g_{n}(x)}{|x-y|^{\alpha}}\,dx\rightarrow
\int_{|x-y|\geq\delta} \frac{g(x)}
{|x-y|^{\alpha}}\,dx\]
uniformly as $n\rightarrow \infty$.
Next note that
\[\int_{|x-y|\leq\delta} \frac{g(x)}{|x-y|^{\alpha}}\,dx
\leq\|g\|_{\infty}\int_{|x|\leq\delta}
{|x|^{\alpha}}\,dx=\frac{2\|g\|_{\infty}}{1-\alpha}\delta^{1-\alpha}
\rightarrow 0\]
as $\delta\rightarrow 0$. Finally observe that simple estimates give
$|a_{r,n}|\leq 2n^{(1/\beta)-1}\|g\|_{\infty}$ and
\begin{align*}
\int_{|x-y|\geq \delta}
\frac{g_{n}(x)}{|x-y|^{\alpha}}\,dx
&\leq 2\|g\|_{\infty}
n^{(1/\beta)-1}
\int_{|x|\leq 8n^{-1/\beta}}\frac{1}{|x|^{\alpha}}\,dx
+2\|g\|_{\infty}\sum_{1\leq r\leq n\delta}\frac{n^{\alpha}}{|r|^{\alpha}}\\
&\leq 2\|g\|_{\infty}n^{(1/\beta)-1}
\frac{8^{1-\alpha}}{1-\alpha}n^{-(1-\alpha)/\beta}
+\frac{4\|g\|_{\infty}}{1-\alpha}\delta^{1-\alpha}\\
&\leq\frac{16\|g\|_{\infty}}{1-\alpha}n^{(\alpha/\beta)-1}
+\frac{4\|g\|_{\infty}}{1-\alpha}\delta^{1-\alpha}
\rightarrow 0
\end{align*}
as $\delta\rightarrow 0$ and $n\rightarrow\infty$.
\end{proof}
\begin{lemma}\label{L;one close} Let $j$ be a strictly positive integer
and let $K>0$. Suppose that $E(n)$ is a closed
subset of ${\mathbb T}$ such that there exists a probability
measure $\mu_{n}$ with
\[\supp\mu_{n}\subseteq E(n)_{[j]}
\ \text{and}
\ \iint_{{\mathbb T}^{2}} \frac{d\mu_{n}(x)\,d\mu_{n}(y)}
{|x-y|^{\alpha}}\leq K.\]
Then, if $E\in{\mathcal F}$ and $d_{\mathcal F}(E(n),E)\rightarrow 0$
as $n\rightarrow\infty$, there exists a probability
measure $\mu$ with
\[\supp\mu\subseteq E_{[j]}
\ \text{and}
\ \iint_{{\mathbb T}^{2}} \frac{d\mu(x)\,d\mu(y)}
{|x-y|^{\alpha}}\leq K.\]
\end{lemma}
\begin{proof} Since the set of probability measures is
weak-star compact, we may suppose, by extracting a subsequence,
that $\mu_{n}\rightarrow\mu$ weakly as $n\rightarrow\infty$.
Since $d_{\mathcal F}(E(n),E)\rightarrow 0$
we have $d_{\mathcal F}(E(n)_{[j]},E_{[j]})\rightarrow 0$
and $\supp\mu\subseteq E_{[j]}$. Since
\[\iint_{{\mathbb T}^{2}} \frac{d\mu(x)\,d\mu(y)}
{|x-y|^{\alpha}}\leq \liminf_{n\rightarrow\infty}
\iint_{{\mathbb T}^{2}} \frac{d\mu_{n}(x)\,d\mu_{n}(y)}
{|x-y|^{\alpha}}
\leq K\]
we are done.
\end{proof}
\begin{lemma}\label{L;closed}
We work in $({\mathcal E},d_{\mathcal E})$ the space of compact
subsets of ${\mathbb T}$ with the usual Hausdorff metric
$d_{\mathcal E}$.
Let
$0\leq\alpha_{j}\leq\alpha_{j+1}<1$ and $K_{j}>0$.
Let ${\mathcal G}$
be the collection of compact sets $E$ such that,
for each $j\geq 1$,
there exists a probability
measure $\mu_{j}$ with
\[\supp\mu_{j}\subseteq E_{[j]}
\ \text{and}
\ \iint_{{\mathbb T}^{2}} \frac{d\mu_{j}(x)\,d\mu_{j}(y)}
{|x-y|^{\alpha_{j}}}\leq K_{j}.\]
Then ${\mathcal G}$ is a closed subset of $({\mathcal E},d_{\mathcal E})$.
\end{lemma}
As matters stand, ${\mathcal G}$ could be empty.
However, if $E$ is the union of a finite collection of closed
intervals (for example if $E={\mathbb T}$), then, if we take
t$\tau$ to be the uniform probability measure
on $E$ and set
\[K_{j}=1+\iint_{{\mathbb T}^{2}} \frac{d\tau(x)\,d\tau(y)}
{|x-y|^{\alpha_{j}}},\]
we will have $E\in{\mathcal G}$.
From now on, the $\alpha_{j}$ will form a fixed sequence
satisfying the conditions of Lemma~\ref{L;closed}
and the $K_{j}$ will be fixed sequence chosen so that
\[K_{j}>\iint_{{\mathbb T}^{2}} \frac{1}
{|x-y|^{\alpha_{j}}}dx\,dy.\]
If $d_{\mathcal G}$ is the restriction
of the metric $d_{\mathcal E}$ to the space ${\mathcal G}$,
we now know that $({\mathcal G},d_{\mathcal G})$ is
complete and non-empty.
Theorem~\ref{T;main sum} thus follows from its
Baire category version.
\begin{theorem}\label{T;Baire sum} The set of $E\in{\mathcal G}$
such that $E_{[j]}$ has Hausdorff dimension $\alpha_{j}$
for all $j\geq 1$ is of second category in $({\mathcal G},d_{\mathcal G})$.
\end{theorem}
We can now reduce the proof of Theorem~\ref{T;Baire sum}
to the following lemma in our usual manner.
\begin{lemma}\label{L;sum lemma} Let $\eta>0$
and $n\geq 1$. Then the set ${\mathcal J}$
of $E\in{\mathcal G}$ such that there exist
a finite collection ${\mathcal I}$ of closed intervals
with
\[\bigcup_{I\in{\mathcal I}}I\supseteq E_{[n]}
\ \text{and}
\ \sum_{I\in{\mathcal I}}|I|^{\alpha_{n}+\eta}<\eta\]
is dense in $({\mathcal G},d_{\mathcal G})$.
\end{lemma}
\begin{proof}[Proof of Theorem~\ref{T;Baire sum}
from Lemma~\ref{L;sum lemma}]
We first observe that if
\[\bigcup_{I\in{\mathcal I}}I\supseteq E_{[n]}
\ \text{and}
\ \sum_{I\in{\mathcal I}}|I|^{\alpha_{n}+\eta}<\eta\]
then, if $\theta>0$
is small enough,
\[\sum_{I\in{\mathcal I}}
\big|(I+[-\theta,\theta])\big|^{\alpha_{n}+\eta}<\eta\]
and
\[\bigcup_{I\in{\mathcal I}}(I+[-\theta,\theta])\supseteq F_{[n]}\]
whenever $d(F,E)<\theta/n$. Thus ${\mathcal E}$ is open.
Let us write ${\mathcal E}(j,m)$ for the set of
$E\in{\mathcal G}$ such that there exist
a finite collection ${\mathcal I}(j,m)$ of closed intervals
with
\[\bigcup_{I\in{\mathcal I}(j,m)}I\supseteq E_{[j]}
\ \text{and}
\ \sum_{I\in{\mathcal I}(j,m)}|I|^{\alpha_{j}+1/m}<1/m.\tag*{$\bigstar$}\]
By the first paragraph and the conclusion of
Lemma~\ref{L;sum lemma} ${\mathcal I}(j,m)$
is open and dense, so the complement of
\[{\mathcal H}=\bigcap_{j=1}^{\infty}\bigcap_{m=1}^{\infty}
{\mathcal E}(j,m)\]
is of first category in in $({\mathcal G},d_{\mathcal G})$.
If $E\in{\mathcal H}$ and $j\geq 1$,
then the definition of ${\mathcal G}$
together with Theorem~\ref{T,Hausdorff up}
tells us that $E_{[j]}$ has Hausdorff dimension
at least $\alpha_{j}$. However, $E_{[j]}$ also obeys the
conditions given in $\bigstar$ so $E_{[j]}$ has Hausdorff dimension
at most $\alpha_{j}$ and we are done.
\end{proof}
\section{The final construction}
The central step in our
proof of Lemma~\ref{L;sum lemma} is laid out in the next lemma.
\begin{lemma}\label{L;split start}
Let $\delta,\,\eta>0$, and $n,m\geq 1$.
Write
\[\Lambda=\{r\,:\,n+m\geq r\geq 1\}\]
Suppose that
$E_{1}$, $E_{2}$, \dots, $E_{n+m}$
are each the finite union of non-trivial closed
intervals
such that, whenever $L\subseteq\Lambda$,
and $L$ contains $j$ elements with $n\geq j\geq 1$,
there exists a piecewise
continuous positive $g_{L}:{\mathbb T}\rightarrow{\mathbb R}$
with
\[\supp g_{L}\subseteq \left(\bigcup_{r\in L}E_{r}\right)_{[j]},
\ \int_{\mathbb T}g_{L}(x)\,dx=1
\ \text{and}
\ \iint_{{\mathbb T}^{2}} \frac{g_{L}(x)g_{L}(y)}
{|x-y|^{\alpha_{j}}}\,dx\,dy0$, and $n,m\geq 1$.
Write
\[\Lambda=\{r\,:\,n+m\geq r\geq 1\}\]
Suppose
$E_{1}$, $E_{2}$, \dots, $E_{n+m}$
are each the finite union of non-trivial closed
intervals
such that, whenever $L\subseteq\Lambda$,
and $L$ contains $j$ elements with $n\geq j\geq 1$,
there exists a piecewise
continuous positive $g_{L}:{\mathbb T}\rightarrow{\mathbb R}$
with
\[\supp g_{L}\subseteq \left(\bigcup_{r\in L}E_{r}\right)_{[j]},
\ \int_{\mathbb T}g_{L}(x)\,dx=1
\ \text{and}
\ \iint_{{\mathbb T}^{2}} \frac{g_{L}(x)g_{L}(y)}
{|x-y|^{\alpha_{j}}}\,dx\,dy\delta>0$ and $E\in{\mathcal G}$.
Then we can find $F\in{\mathcal G}$
with $d_{\mathcal G}(E,F)<\delta$ such that
$E$ is the finite union of non-trivial closed
intervals and there exist piecewise
continuous positive $g_{j}:{\mathbb T}\rightarrow{\mathbb R}$
such that
\[\int_{{\mathbb T}} g_{j}(x)\,dx=1,
\ \supp g_{j}\subseteq E_{[j]}
\ \text{and}
\ \iint_{{\mathbb T}^{2}}\frac{g_{j}(x)g_{j}(y)}
{|x-y|^{\alpha_{j}}}
\iint_{{\mathbb T}^{2}}\frac{1}
{|x-y|^{\alpha_{j}}}dx\,dy.\]
Thus, if we set $F=E+[\delta/2,-\delta/2]$
and $g_{j}=\Delta*\mu_{j}$, we have the required result.
\end{proof}
\begin{proof}[Proof of Lemma~\ref{L;sum lemma}]
By Lemma~\ref{L;further}, it suffices to show that,
given $n\geq 1$, $\delta,\,\eta>0$ and $E$ satisfying the conclusions
of Lemma~\ref{L;further}, we can find an $F\in{\mathcal E}$
with $d(F,E)<\delta$.
Since $E$ contains non-trivial intervals, we can
find an $m\geq 1$ such that $E_{[n+m]}={\mathbb T}$.
Write $E_{r}=E$ for $1\leq r\leq n+m$,
\[\Lambda=\{r\,:\,n+m\geq r\geq 1\}\]
and, if $L\subseteq\Lambda$
contains $j$ elements with $n\geq j\geq 1$,
set $g_{L}=g_{j}$.
Now choose $\tilde{E}_{r}$ and $\tilde{g}_{L}$
so that the conclusions of Lemma~\ref{L;split}
hold. We set $F=\bigcup_{r=1}^{n+m}\tilde{E}_{r}$.
By Lemma~\ref{L;split}~(i),
\[d_{\mathcal F}(E,\tilde{E}_{r})<\delta\]
for all $r$ and so $d_{\mathcal F}(E,F)<\delta$.
If we write $\Gamma$ for the collection
of subsets of $\Lambda$ with exactly $n$ elements,
then
\[\bigcup_{P\in{\Gamma}}\left(\bigcup_{r\in P}\tilde{E}_{r}\right)_{[n]}
=\left(\bigcup_{r=1}^{n+m}\tilde{E}_{r}\right)_{[n]}
=F\]
so, by Lemma~\ref{L;split}~(iii),
\[\bigcup_{P\in{\Gamma}}\bigcup_{I\in{\mathcal I}(P)}I\supseteq F\]
and
\[\sum_{P\in{\Gamma}}\sum_{I\in{\mathcal I}(P)}|I|^{\alpha_{n}+\eta}
<\delta.\]
Thus, if $F\in{\mathcal G}$, then $F\in{\mathcal E}$.
In order to show that $F\in{\mathcal G}$ we shall find
piecewise continuous positive
functions $f_{j}:{\mathbb T}\rightarrow{\mathbb R}$
such that
\[\int_{{\mathbb T}} f_{j}(x)\,dx=1,
\ \supp f_{j}\subseteq F_{[j]}
\ \text{and}
\ \iint_{{\mathbb T}^{2}}\frac{f_{j}(x)f_{j}(y)}
{|x-y|^{\alpha_{j}}}\,dx,\,dy\iint_{{\mathbb T}^{2}}\frac{dx\,dy}{|x-y|^{\alpha}}.\]
Let ${\mathcal G}(\alpha)$
be the collection of compact sets $E$ such that
there exists a probability
measure $\mu$ with
\[\supp\mu\subseteq E
\ \text{and}
\ \iint_{{\mathbb T}^{2}} \frac{d\mu(x)\,d\mu(y)}
{|x-y|^{\alpha}}\leq K.\]
Show that
${\mathcal G}(\alpha)$ is a closed subset of $({\mathcal E},d_{\mathcal E})$
Show that, if we use the restriction metric,
quasi-all subsets of ${\mathcal G}(\alpha)$ are Kronecker
sets with Hausdorff dimension exactly $\alpha$. (The existence
of Kronecker sets with specified Hausdorff dimension was
proved, in a much neater manner, by Kaufman
in~\cite{Kaufman2}.)
\end{exercise}
\begin{exercise}\label{E:one dimension}
(This is a very long exercise and is really just included
for the reader's information.) Given a sequence $\alpha_{j}$ with
$0\leq\alpha_{j}\leq\alpha_{j+1}\leq 1$,
show that we can find
a closed set $E$ such that $E_{[j]}$ has
Hausdorff dimension $\alpha_{j}$ for each $j\geq 1$.
If $\alpha_{k+1}=1$, show that we can choose $E$ so that, in addition,
$E_{[k+1]}={\mathbb T}$ but $E_{[k]}$ has Lebesgue measure zero
or we can choose $E$ so that $E_{[j]}$
has Lebesgue measure zero
for all $j$.
\end{exercise}
\section{Remarks} The history of the use of probabilistic methods
to prove results outside probability theory remains to be written,
but I suspect that the diligent historian will be able to
trace an uninterrupted path back to Borel. (This does not
exclude the possibility of isolated examples before Borel
and repeated independent discoveries afterwards.) I discovered
the usefulness of throwing delta measures down at random
from~\cite{Kaufman2}. The reader in search of further inspiration
cannot do better than read Kahane's beautiful book~\cite{Kahane}.
The kind of coin tossing estimates we have used are pretty crude
(but, correspondingly robust). The first chapter
of Bollob{\'a}s's book shows what clever and determined
mathematicians can do with coin tossing.
Baire category arguments go back to Baire (and as a set
of related tools much earlier). They were powerfully exploited
by Banach and his school. Kaufman introduced
category methods
into harmonic analysis in~\cite{Kaufman1} and they were
further exploited by Kahane in~\cite{Kahane 3}.
It must be said that, whilst category methods are a useful
tool, probabilistic methods constitute an entire programme.
Debs and Saint-Reymond obtained their famous theorem in~\cite{DS}
by the methods
of descriptive set theory. The book~\cite{KL} discusses this
and other applications of descriptive set theory. In
particular, as we noted earlier,
Matheron and Zelen{\'y} have used these methods
to obtain Theorem~\ref{T;how} independently.(see~\cite{MZ}).
\begin{thebibliography}{10}
\bibitem{Bari}
N.~K.~Bari, A treatise on Trigonometric
Series, (English translation by M.~F.~Mullins).
Pergamon Press, Oxford, 1964.
\bibitem{Besicovitch} A.~S.~Besicovitch, \emph{On Kakeya's problem
and a similar one}, Mat. Zeit. {\bf 27}, 1928, 312--320.
\bibitem{Bollobas} B. Bollob{\'a}s,
Random Graphs, CUP, Cambridge (2nd edition), 2001.
\bibitem{Falconer} K.~J.~Falconer, The Geometry of Fractal Sets, CUP,
Cambridge, 1986.
\bibitem{DS}
G.~Debs and J.~Saint-Raymond,
\emph{Ensembles bor{\'e}liens d'unicit{\'e} et d'unicit{\'e} au sens large},
Ann. Inst. Fourier {\bf 37} (1987), no. 3, p. 217-239.
\bibitem{Gupta} S.~K.~Gupta and K.~E.~Hare,
\emph{On convolution squares of singular measures}
Colloq. Math. {\bf 100} (2004) 9--16.
\bibitem{Hausdorff} F.~Hausdorff,
Set theory. Second edition.
Translated from the German by John R. Aumann et al,
Chelsea Publishing Co., New York, 1957.
\bibitem{IV} O, S. Iva\v{s}ev-Musatov \emph{M-sets and Hausdorff
measure} Izv. Akad. Nauk SSSR Ser. Mat. {\bf 21} (1957),559--578:
Amer. Math. Soc. Transl. (2) {\bf 14} (1960),289--310.
\bibitem{Kahane}
J.~P.~Kahane,
Some random series of functions. Second edition.
Cambridge Studies in Advanced Mathematics, {\bf 5}.
CUP, Cambridge, 1985.
\bibitem{Kahane 2} J.-P. Kahane,
\emph{Trois notes sur les ensembles parfaits lin\'{e}aires},
Enseignement Math. (2) {\bf 15}, 1969, 185--192.
\bibitem{Kahane 3} J.-P, Kahane,
\emph{Sur les r{\'e}arrangements de fonctions de la classe {$A$},}
Studia Math, {\bf 31}, 1968, 287--293.
\bibitem{KS} J.-P.~Kahane and R. Salem,
Ensembles parfaits et s{\'e}ries trigonom{\'e}triques.
Hermann, Paris, 1963.
\bibitem{Kaufman1} R. Kaufman \emph{A functional method
for linear sets I}\/ Israel J. Math. {\bf 5} (1967) 185--187.
\bibitem{Kaufman2}
R.~Kaufman,
\emph{Small subsets of finite Abelian groups}
Annales de l'Institut Fourier, {\bf 18} (1968) 99--102.
\bibitem{KL} A.~S.~Kechris and A.~Louveau,
Descriptive set theory and the structure of sets of uniqueness,
LMS Lecture Notes {\bf 128}, CUP, Cambridge, 1987.
\bibitem{Korner,Helson}
T.~W.~K{\"o}rner,
\emph{Kahane's Helson curve.}
Proceedings of the Conference in Honor of Jean-Pierre Kahane (Orsay, 1993).
J. Fourier Anal. Appl. 1995, Special Issue, 325--346.
\bibitem{Korner,Besicovitch}
T.~W.~K{\"o}rner,
\emph{Besicovitch via Baire,}
Studia Math. {\bf 158} (2003), no. 1, 65--78.
\bibitem{Korner,Rudin}
T.~W.~K{\"o}rner,
\emph{Measures on independent sets,
a quantitative version of Rudin's theorem,}
Proc. Amer. Math. Soc. {\bf 135} (2007), no. 12, 3823--3832
\bibitem{Wintner}
T.~W.~K{\"o}rner,
\emph{On a theorem of Saeki concerning
convolution squares of singular measures,}
Bull. Soc. Math. France {\bf 136} (2008), no. 3, 439--464.
\bibitem{Korner,Sum}
T.~W.~K{\"o}rner,
\emph{Hausdorff dimension of sums of sets with themselves,}
Studia Math. {\bf 188} (2008), no. 3, 287--295.
\bibitem{Korner,Debs}
T.~W.~K{\"o}rner,
\emph{Variations on a theme de Debs and Saint Raymond,}
J. Lond. Math. Soc. (2) 79 (2009), no. 1, 33--52.
\bibitem{Korner,zero}
T.~W.~K{\"o}rner,
\emph{Baire category and zero sets,}
C. R. Math. Acad. Sci. Paris 346 (2008), no. 13-14, 741--743.
\bibitem{Extra Rudin} T.~W.~K{\"o}rner
\emph{Fourier transforms of measures and algebraic
relations on their supports,}
Annales de l'Institut Fourier, {\bf 59}
(2009), no. 4, 1291-1319.
\bibitem{MZ}
E.~Matheron, and M.~Zelen{\'y},
\emph{Descriptive set theory of families of small sets,}
Bull. Symbolic Logic, {\bf 13} (2007) no 4, 482--537
\bibitem{Rudin}
W.~Rudin
\emph{Fourier--Stieltjes transforms of measures on independent sets,}
Bull. Amer. Math. Soc. {\bf 66} (1960) 199--202.
\bibitem{Saeki}
S.~Saeki,
\emph{On convolution squares of singular measures,}
Illinois J. Math. {\bf 24} (1980), 225--232.
\bibitem{Salem} R.~Salem,
\emph{On singular monotonic functions whose
spectrum has a given Hausdorff dimension,}
Ark. Mat. {\bf 1}, (1951) 353--365.
\bibitem{WW} N.~Wiener and A.~Wintner,
\emph{Fourier--Stieltjes transforms and singular infinite
convolutions,} Amer. J. Math. {\bf 60} (1938), 513--22.
\end{thebibliography}
\end{document}