2024-10-15 15:47:13 +00:00
% Copyright 2017-2024 Ian Jauslin
2023-05-10 23:33:29 +00:00
%
% Licensed under the Apache License, Version 2.0 (the "License");
% you may not use this file except in compliance with the License.
% You may obtain a copy of the License at
%
% http://www.apache.org/licenses/LICENSE-2.0
%
% Unless required by applicable law or agreed to in writing, software
% distributed under the License is distributed on an "AS IS" BASIS,
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
% See the License for the specific language governing permissions and
% limitations under the License.
2018-01-11 22:48:14 +00:00
\documentclass { ian}
\usepackage { largearray}
2024-02-19 16:41:35 +00:00
\usepackage { dsfont}
2018-01-11 22:48:14 +00:00
\begin { document}
\hbox { }
\hfil { \bf \LARGE
{ \tt nstrophy}
}
\vfill
\tableofcontents
\vfill
\eject
\setcounter { page} 1
\pagestyle { plain}
\section { Description of the computation}
\subsection { Irreversible equation}
2022-05-25 14:56:48 +00:00
\indent Consider the incompressible Navier-Stokes equation in 2 dimensions
2018-01-11 22:48:14 +00:00
\begin { equation}
2023-03-31 19:40:15 +00:00
\partial _ tU=\nu \Delta U+G-(U\cdot \nabla )U,\quad
\nabla \cdot U=0
2018-01-11 22:48:14 +00:00
\label { ins}
\end { equation}
2023-03-31 19:40:15 +00:00
in which $ G $ is the forcing term.
We take periodic boundary conditions, so, at every given time, $ U ( t, \cdot ) $ is a function on the torus $ \mathbb T ^ 2 : = \mathbb R ^ 2 / ( L \mathbb Z ) ^ 2 $ . We represent $ U ( t, \cdot ) $ using its Fourier series
2018-01-11 22:48:14 +00:00
\begin { equation}
2023-03-31 19:40:15 +00:00
\hat U_ k(t):=\frac 1{ L^ 2} \int _ { \mathbb T^ 2} dx\ e^ { i\frac { 2\pi } L kx} U(t,x)
2018-01-11 22:48:14 +00:00
\end { equation}
for $ k \in \mathbb Z ^ 2 $ , and rewrite~\- (\ref { ins} ) as
\begin { equation}
2023-03-31 19:40:15 +00:00
\partial _ t\hat U_ k=
-\frac { 4\pi ^ 2} { L^ 2} \nu k^ 2\hat U_ k+\hat G_ k
2022-05-25 14:56:48 +00:00
-i\frac { 2\pi } L\sum _ { \displaystyle \mathop { \scriptstyle p,q\in \mathbb Z^ 2} _ { p+q=k} }
2023-03-31 19:40:15 +00:00
(q\cdot \hat U_ p)\hat U_ q
2018-01-11 22:48:14 +00:00
,\quad
2023-03-31 19:40:15 +00:00
k\cdot \hat U_ k=0
2018-01-11 22:48:14 +00:00
\label { ins_ k}
\end { equation}
We then reduce the equation to a scalar one, by writing
\begin { equation}
2023-03-31 19:40:15 +00:00
\hat U_ k=\frac { i2\pi k^ \perp } { L|k|} \hat u_ k\equiv \frac { i2\pi } { L|k|} (-k_ y\hat u_ k,k_ x\hat u_ k)
\label { udef}
2018-01-11 22:48:14 +00:00
\end { equation}
2022-05-25 14:56:48 +00:00
in terms of which, multiplying both sides of the equation by $ \frac L { i 2 \pi } \frac { k ^ \perp } { |k| } $ ,
2018-01-11 22:48:14 +00:00
\begin { equation}
2023-03-31 19:40:15 +00:00
\partial _ t\hat u_ k=
-\frac { 4\pi ^ 2} { L^ 2} \nu k^ 2\hat u_ k
+\hat g_ k
2022-05-25 14:56:48 +00:00
+\frac { 4\pi ^ 2} { L^ 2|k|} \sum _ { \displaystyle \mathop { \scriptstyle p,q\in \mathbb Z^ 2} _ { p+q=k} }
2023-03-31 19:40:15 +00:00
\frac { (q\cdot p^ \perp )(k^ \perp \cdot q^ \perp )} { |q||p|} \hat u_ p\hat u_ q
2018-01-11 22:48:14 +00:00
\label { ins_ k}
\end { equation}
2023-03-31 19:40:15 +00:00
with
\begin { equation}
\hat g_ k:=\frac { Lk^ \perp } { 2i\pi |k|} \cdot \hat G_ k
.
\label { gdef}
\end { equation}
2018-01-12 19:20:59 +00:00
Furthermore
\begin { equation}
(q\cdot p^ \perp )(k^ \perp \cdot q^ \perp )
=
(q\cdot p^ \perp )(q^ 2+p\cdot q)
\end { equation}
and $ q \cdot p ^ \perp $ is antisymmetric under exchange of $ q $ and $ p $ . Therefore,
2018-01-11 22:48:14 +00:00
\begin { equation}
2023-03-31 19:40:15 +00:00
\partial _ t\hat u_ k=
-\frac { 4\pi ^ 2} { L^ 2} \nu k^ 2\hat u_ k+\hat g_ k
2023-03-31 20:58:52 +00:00
+\frac { 4\pi ^ 2} { L^ 2|k|} T(\hat u,k)
2024-02-19 16:41:35 +00:00
=:\mathfrak F_ k(\hat u)
2023-03-31 20:58:52 +00:00
\label { ins_ k}
\end { equation}
with
\begin { equation}
T(\hat u,k):=
\sum _ { \displaystyle \mathop { \scriptstyle p,q\in \mathbb Z^ 2} _ { p+q=k} }
2023-03-31 19:40:15 +00:00
\frac { (q\cdot p^ \perp )|q|} { |p|} \hat u_ p\hat u_ q
2018-01-11 22:48:14 +00:00
.
2023-03-31 20:58:52 +00:00
\label { T}
2018-01-11 22:48:14 +00:00
\end { equation}
2023-03-31 19:40:15 +00:00
We truncate the Fourier modes and assume that $ \hat u _ k = 0 $ if $ |k _ 1 |>K _ 1 $ or $ |k _ 2 |>K _ 2 $ . Let
2018-01-11 22:48:14 +00:00
\begin { equation}
\mathcal K:=\{ (k_ 1,k_ 2),\ |k_ 1|\leqslant K_ 1,\ |k_ 2|\leqslant K_ 2\}
.
\end { equation}
2024-02-18 19:16:48 +00:00
\subsubsection { Runge-Kutta methods} .
2023-06-13 22:45:19 +00:00
To solve the equation numerically, we will use Runge-Kutta methods, which compute an approximate value $ \hat u _ k ^ { ( n ) } $ for $ \hat u _ k ( t _ n ) $ .
{ \tt nstrophy} supports the 4th order Runge-Kutta ({ \tt RK4} ) and 2nd order Runge-Kutta ({ \tt RK2} ) algorithms.
In addition, several variable step methods are implemented:
\begin { itemize}
\item the Runge-Kutta-Dormand-Prince method ({ \tt RKDP54} ), which is of 5th order, and adjusts the step by comparing to a 4th order method;
\item the Runge-Kutta-Fehlberg method ({ \tt RKF45} ), which is of 4th order, and adjusts the step by comparing to a 5th order method;
\item the Runge-Kutta-Bogacki-Shampine method ({ \tt RKBS32} ), which is of 3d order, and adjusts the step by comparing to a 2nd order method.
\end { itemize}
In these adaptive step methods, two steps are computed at different orders: $ \hat u _ k ^ { ( n ) } $ and $ \hat U _ k ^ { ( n ) } $ , the step size is adjusted at every step in such a way that the error is small enough:
\begin { equation}
\| \hat u^ { (n)} -\hat U^ { (n)} \|
<\epsilon _ { \mathrm { target} }
\end { equation}
for some given $ \epsilon _ { \mathrm { target } } $ , set using the { \tt adaptive\_ tolerance} parameter.
The choice of the norm matters, and will be discussed below.
If the error is larger than the target, then the step size is decreased.
How this is done depends on the order of algorithm.
If the order is $ q $ (here we mean the smaller of the two orders, so 4 for { \tt RKDP54} and { \tt RKF45} and 2 for { \tt RKBS32} ), then we expect
\begin { equation}
\| \hat u^ { (n)} -\hat U^ { (n)} \| =\delta _ n^ qC_ n
.
\end { equation}
We wish to set $ \delta _ { n + 1 } $ so that
\begin { equation}
\delta _ { n+1} ^ qC_ n=\epsilon _ { \mathrm { target} }
\end { equation}
so
\begin { equation}
\delta _ { n+1}
=\left (\frac { \epsilon _ { \mathrm { target} } } { C_ n} \right )^ { \frac 1q}
=\delta _ n\left (\frac { \epsilon _ { \mathrm { target} } } { \| \hat u^ { (n)} -\hat U^ { (n)} \| } \right )^ { \frac 1q}
.
\label { adaptive_ delta}
\end { equation}
(Actually, to be safe and ensure that $ \delta $ decreases sufficiently, we multiply this by a safety factor that can be set using the { \tt adaptive\_ factor} parameter.)
If the error is smaller than the target, we increase $ \delta $ using\- ~(\ref { adaptive_ delta} ) (without the safety factor).
To be safe, we also set a maximal value for $ \delta $ via the { \tt max\_ delta} parameter.
\bigskip
\indent
The choice of the norm $ \| \cdot \| $ matters.
2023-06-14 03:56:35 +00:00
It can be made by specifying the parameter { \tt adaptive\_ norm} .
2023-06-13 22:45:19 +00:00
\begin { itemize}
\item A naive choice is to take $ \| \cdot \| $ to be the normalized $ L _ 1 $ norm:
\begin { equation}
\| f\| :=
\frac 1{ \mathcal N} \sum _ k|f_ k|
,\quad
\mathcal N:=\sum _ k|\hat u_ k^ { (n)} -\hat u_ k^ { (n-1)} |
.
\end { equation}
2023-06-14 03:56:35 +00:00
This norm is selected by choosing { \tt adaptive\_ norm=L1} .
2023-06-13 22:45:19 +00:00
\item Empirically, we have found that $ | \hat u - \hat U| $ behaves like $ k ^ { - 3 } $ for { \tt RKDP54} and { \tt RKF45} , and like $ k ^ { - \frac 32 } $ for { \tt RKBS32} , so a norm of the form
\begin { equation}
\| f\| :=\frac 1{ \mathcal N} \sum _ k|f_ k|k^ { -3}
,\quad
\mathcal N:=\sum _ k|\hat u_ k^ { (n)} -\hat u_ k^ { (n-1)} |k^ { -3}
\end { equation}
or
\begin { equation}
\| f\| :=\frac 1{ \mathcal N} \sum _ k|f_ k|k^ { -\frac 32}
,\quad
\mathcal N:=\sum _ k|\hat u_ k^ { (n)} -\hat u_ k^ { (n-1)} |k^ { -\frac 32}
\end { equation}
are sensible choices.
2023-06-14 03:56:35 +00:00
These norms are selected by choosing { \tt adaptive\_ norm=k3} and { \tt adaptive\_ norm=k32} respectively.
2023-06-13 22:45:19 +00:00
\item
Another option is to define a norm based on the expression of the enstrophy\- ~(\ref { enstrophy} ):
\begin { equation}
\| f\| :=\frac 1{ \mathcal N} \sqrt { \sum _ k k^ 2|f_ k|^ 2}
2023-09-17 21:37:14 +00:00
,\quad
2023-09-19 15:51:55 +00:00
\mathcal N:=\frac { \sqrt { \sum _ k k^ 2|\hat u_ k^ { (n)} |^ 2} +\sqrt { \sum _ k k^ 2|\hat U_ k^ { (n)} |^ 2} } { \sum _ k k^ 2|\hat u_ k^ { (n)} |^ 2}
2023-06-13 22:45:19 +00:00
.
\end { equation}
Doing so controls the error of the enstrophy through
\begin { equation}
2023-09-19 15:51:55 +00:00
\frac 1{ \mathcal N^ 2} |\mathcal En(\hat u)-\mathcal En(\hat U)|\equiv |\| \hat u\| ^ 2-\| \hat U\| ^ 2|\leqslant \| \hat u-\hat U\| (\| \hat u\| +\| \hat U\| )
2023-06-13 23:01:37 +00:00
\end { equation}
so
\begin { equation}
2023-09-19 15:51:55 +00:00
\frac 1{ \mathcal N^ 2}
2023-06-13 23:01:37 +00:00
|\mathcal En(\hat u)-\mathcal En(\hat U)|\leqslant
\| \hat u-\hat U\| \frac 1{ \mathcal N} \left (\sqrt { \sum _ k k^ 2|\hat u_ k|^ 2} +\sqrt { \sum _ k k^ 2|\hat U_ k|^ 2} \right )
\end { equation}
and thus
\begin { equation}
\frac { |\mathcal En(\hat u)-\mathcal En(\hat U)|} { \mathcal En(\hat u)} \leqslant
\| \hat u-\hat U\|
2023-06-13 22:45:19 +00:00
.
\end { equation}
2023-06-14 03:56:35 +00:00
This norm is selected by choosing { \tt adaptive\_ norm=enstrophy} .
2023-06-13 22:45:19 +00:00
\end { itemize}
2024-02-18 19:16:48 +00:00
\subsubsection { Reality} .
2023-03-31 20:58:52 +00:00
Since $ U $ is real, $ \hat U _ { - k } = \hat U _ k ^ * $ , and so
2018-01-11 22:48:14 +00:00
\begin { equation}
2023-03-31 20:58:52 +00:00
\hat u_ { -k} =\hat u_ k^ *
.
\label { realu}
2018-01-11 22:48:14 +00:00
\end { equation}
2023-03-31 20:58:52 +00:00
Similarly,
\begin { equation}
\hat g_ { -k} =\hat g_ k^ *
.
\label { realg}
\end { equation}
Thus,
\begin { equation}
T(\hat u,-k)
=
T(\hat u,k)^ *
.
\label { realT}
\end { equation}
2023-04-11 22:45:45 +00:00
In order to keep the computation as quick as possible, we only compute and store the values for $ k _ 1 \geqslant 0 $ .
2023-03-31 20:58:52 +00:00
2024-02-18 19:16:48 +00:00
\subsubsection { FFT} . We compute T using a fast Fourier transform, defined as
2018-01-11 22:48:14 +00:00
\begin { equation}
\mathcal F(f)(n):=\sum _ { m\in \mathcal N} e^ { -\frac { 2i\pi } { N_ 1} m_ 1n_ 1-\frac { 2i\pi } { N_ 2} m_ 2n_ 2} f(m_ 1,m_ 2)
\end { equation}
where
\begin { equation}
2022-05-12 07:32:58 +00:00
\mathcal N:=\{ (n_ 1,n_ 2),\ 0\leqslant n_ 1< N_ 1,\ 0\leqslant n_ 2< N_ 2\}
2018-01-11 22:48:14 +00:00
\end { equation}
for some fixed $ N _ 1 ,N _ 2 $ . The transform is inverted by
\begin { equation}
\frac 1{ N_ 1N_ 2} \mathcal F^ *(\mathcal F(f))(n)=f(n)
\end { equation}
in which $ \mathcal F ^ * $ is defined like $ \mathcal F $ but with the opposite phase.
\bigskip
\indent The condition $ p + q = k $ can be rewritten as
\begin { equation}
2023-03-31 19:40:15 +00:00
T(\hat u,k)
2018-01-11 22:48:14 +00:00
=
\sum _ { p,q\in \mathcal K}
\frac 1{ N_ 1N_ 2}
\sum _ { n\in \mathcal N} e^ { -\frac { 2i\pi } { N_ 1} n_ 1(p_ 1+q_ 1-k_ 1)-\frac { 2i\pi } { N_ 2} n_ 2(p_ 2+q_ 2-k_ 2)}
2023-03-31 19:40:15 +00:00
(q\cdot p^ \perp )\frac { |q|} { |p|} \hat u_ q\hat u_ p
2018-01-11 22:48:14 +00:00
\end { equation}
provided
\begin { equation}
2022-05-27 00:54:30 +00:00
N_ i>3K_ i.
2018-01-11 22:48:14 +00:00
\end { equation}
2022-05-27 00:54:30 +00:00
Indeed, $ \sum _ { n _ i = 0 } ^ { N _ i } e ^ { - \frac { 2 i \pi } { N _ i } n _ im _ i } $ vanishes unless $ m _ i = 0 \% N _ i $ (in which $ \% N _ i $ means `modulo $ N _ i $ '), and, if $ p,q,k \in \mathcal K $ , then $ |p _ i + q _ i - k _ i| \leqslant 3 K _ i $ , so, as long as $ N _ i> 3 K _ i $ , then $ ( p _ i + q _ i - k _ i ) = 0 \% N _ i $ implies $ p _ i + q _ i = k _ i $ .
2022-05-12 07:32:58 +00:00
Therefore,
2018-01-11 22:48:14 +00:00
\begin { equation}
2023-03-31 19:40:15 +00:00
T(\hat u,k)
2018-01-11 22:48:14 +00:00
=
\textstyle
2018-01-12 19:20:59 +00:00
\frac 1{ N_ 1N_ 2}
2018-01-11 22:48:14 +00:00
\mathcal F^ *\left (
2023-03-31 19:40:15 +00:00
\mathcal F\left (\frac { p_ x\hat u_ p} { |p|} \right )(n)
\mathcal F\left (q_ y|q|\hat u_ q\right )(n)
2018-01-12 19:20:59 +00:00
-
2023-03-31 19:40:15 +00:00
\mathcal F\left (\frac { p_ y\hat u_ p} { |p|} \right )(n)
\mathcal F\left (q_ x|q|\hat u_ q\right )(n)
2018-01-11 22:48:14 +00:00
\right )(k)
\end { equation}
2024-02-18 19:16:48 +00:00
\subsubsection { Energy} .
2022-05-19 17:04:30 +00:00
We define the energy as
\begin { equation}
2023-03-31 19:40:15 +00:00
E(t)=\frac 12\int \frac { dx} { L^ 2} \ U^ 2(t,x)=\frac 12\sum _ { k\in \mathbb Z^ 2} |\hat U_ k|^ 2
2022-05-19 17:04:30 +00:00
.
\end { equation}
We have
\begin { equation}
2023-03-31 19:40:15 +00:00
\partial _ t E=\int \frac { dx} { L^ 2} \ U\partial tU
2022-05-19 17:04:30 +00:00
=
2023-03-31 19:40:15 +00:00
\nu \int \frac { dx} { L^ 2} \ U\Delta U
+\int \frac { dx} { L^ 2} \ UG
-\int \frac { dx} { L^ 2} \ U(U\cdot \nabla )U
2022-05-19 17:04:30 +00:00
.
\end { equation}
Since we have periodic boundary conditions,
\begin { equation}
2023-03-31 19:40:15 +00:00
\int dx\ U\Delta U=-\int dx\ |\nabla U|^ 2
2022-05-19 17:04:30 +00:00
.
\end { equation}
Furthermore,
\begin { equation}
2023-03-31 19:40:15 +00:00
I:=\int dx\ U(U\cdot \nabla )U
=\sum _ { i,j=1,2} \int dx\ U_ iU_ j\partial _ jU_ i
2022-05-19 17:04:30 +00:00
=
2023-03-31 19:40:15 +00:00
-\sum _ { i,j=1,2} \int dx\ (\partial _ jU_ i)U_ jU_ i
-\sum _ { i,j=1,2} \int dx\ U_ i(\partial _ jU_ j)U_ i
2022-05-19 17:04:30 +00:00
\end { equation}
2023-03-31 19:40:15 +00:00
and since $ \nabla \cdot U = 0 $ ,
2022-05-19 17:04:30 +00:00
\begin { equation}
I
=
-I
\end { equation}
and so $ I = 0 $ .
Thus,
\begin { equation}
\partial _ t E=
2023-03-31 19:40:15 +00:00
\int \frac { dx} { L^ 2} \ \left (-\nu |\nabla U|^ 2+UG\right )
2022-05-19 17:04:30 +00:00
=
2023-03-31 19:40:15 +00:00
\sum _ { k\in \mathbb Z^ 2} \left (-\frac { 4\pi ^ 2} { L^ 2} \nu k^ 2|\hat U_ k|^ 2+\hat U_ { -k} \hat G_ k\right )
2022-05-19 17:04:30 +00:00
.
\end { equation}
Furthermore,
\begin { equation}
2023-03-31 19:40:15 +00:00
\sum _ { k\in \mathbb Z^ 2} k^ 2|\hat U_ k|^ 2\geqslant
\sum _ { k\in \mathbb Z^ 2} |\hat U_ k|^ 2-|\hat U_ 0|^ 2
=2E-|\hat U_ 0|^ 2
2022-05-19 17:04:30 +00:00
\end { equation}
so
\begin { equation}
2023-03-31 19:40:15 +00:00
\partial _ t E\leqslant -\frac { 8\pi ^ 2} { L^ 2} \nu E+\frac { 4\pi ^ 2} { L^ 2} \nu \hat U_ 0^ 2+\sum _ { k\in \mathbb Z^ 2} \hat U_ { -k} \hat G_ k
2022-05-19 17:04:30 +00:00
\leqslant
2023-03-31 19:40:15 +00:00
-\frac { 8\pi ^ 2} { L^ 2} \nu E+\frac { 4\pi ^ 2} { L^ 2} \nu \hat U_ 0^ 2+
\| \hat G\| _ 2\sqrt { 2E}
2022-05-19 17:04:30 +00:00
.
\end { equation}
2023-03-31 19:40:15 +00:00
In particular, if $ \hat U _ 0 = 0 $ (which corresponds to keeping the center of mass fixed),
2022-05-19 17:04:30 +00:00
\begin { equation}
2023-03-31 19:40:15 +00:00
\partial _ t E\leqslant -\frac { 8\pi ^ 2} { L^ 2} \nu E+\| \hat G\| _ 2\sqrt { 2E}
2022-05-19 17:04:30 +00:00
.
\end { equation}
2023-03-31 19:40:15 +00:00
Now, if $ \frac { 8 \pi ^ 2 } { L ^ 2 } \nu \sqrt E< \sqrt 2 \| \hat G \| _ 2 $ , then
2022-05-19 17:04:30 +00:00
\begin { equation}
2023-03-31 19:40:15 +00:00
\frac { \partial _ t E} { -\frac { 8\pi ^ 2} { L^ 2} \nu E+\| \hat G\| _ 2\sqrt { 2E} } \leqslant 1
2022-05-19 17:04:30 +00:00
\end { equation}
and so
\begin { equation}
2023-03-31 19:40:15 +00:00
\frac { \log (1-\frac { 8\pi ^ 2\nu } { L^ 2\sqrt 2\| \hat G\| _ 2} \sqrt { E(t)} )} { -\frac { 4\pi ^ 2} { L^ 2} \nu } \leqslant t+
\frac { \log (1-\frac { 8\pi ^ 2\nu } { L^ 2\sqrt 2\| \hat G\| _ 2} \sqrt { E(0)} )} { -\frac { 4\pi ^ 2} { L^ 2} \nu }
2022-05-19 17:04:30 +00:00
\end { equation}
and
\begin { equation}
E(t)
\leqslant
\left (
2023-03-31 19:40:15 +00:00
\frac { L^ 2\sqrt 2\| \hat G\| _ 2} { 8\pi ^ 2\nu } (1-e^ { -\frac { 4\pi ^ 2} { L^ 2} \nu t} )
2022-05-25 14:56:48 +00:00
+e^ { -\frac { 4\pi ^ 2} { L^ 2} \nu t} \sqrt { E(0)}
2022-05-19 17:04:30 +00:00
\right )^ 2
.
\end { equation}
2023-03-31 19:40:15 +00:00
If $ \frac { 8 \pi ^ 2 } { L ^ 2 } \nu \sqrt E> \sqrt 2 \| \hat G \| _ 2 $ ,
2022-05-19 17:04:30 +00:00
\begin { equation}
2023-03-31 19:40:15 +00:00
\frac { \partial _ t E} { -\frac { 8\pi ^ 2} { L^ 2} \nu E+\| \hat G\| _ 2\sqrt { 2E} } \geqslant 1
2022-05-19 17:04:30 +00:00
\end { equation}
and so
\begin { equation}
2023-03-31 19:40:15 +00:00
\frac { \log (\frac { 8\pi ^ 2\nu } { L^ 2\sqrt 2\| \hat G\| _ 2} \sqrt { E(t)} -1)} { -\frac { 4\pi ^ 2} { L^ 2} \nu } \geqslant t+
\frac { \log (\frac { 8\pi ^ 2\nu } { L^ 2\sqrt 2\| \hat G\| _ 2} \sqrt { E(0)} )-1} { -\frac { 4\pi ^ 2} { L^ 2} \nu }
2022-05-19 17:04:30 +00:00
\end { equation}
and
\begin { equation}
E(t)
\leqslant
\left (
2023-03-31 19:40:15 +00:00
\frac { L^ 2\sqrt 2\| \hat G\| _ 2} { 8\pi ^ 2\nu } (1-e^ { -\frac { 4\pi ^ 2} { L^ 2} \nu t} )
2022-05-25 14:56:48 +00:00
+e^ { -\frac { 4\pi ^ 2} { L^ 2} \nu t} \sqrt { E(0)}
2022-05-19 17:04:30 +00:00
\right )^ 2
.
2023-06-13 22:45:19 +00:00
\label { enstrophy}
2022-05-19 17:04:30 +00:00
\end { equation}
2024-02-18 19:16:48 +00:00
\subsubsection { Enstrophy} .
2022-05-19 17:04:30 +00:00
The enstrophy is defined as
\begin { equation}
2023-03-31 19:40:15 +00:00
\mathcal En(t)=\int \frac { dx} { L^ 2} \ |\nabla U|^ 2
=\frac { 4\pi ^ 2} { L^ 2} \sum _ { k\in \mathbb Z^ 2} k^ 2|\hat U_ k|^ 2
2022-05-19 17:04:30 +00:00
.
\end { equation}
2022-05-26 18:12:18 +00:00
2024-02-19 16:41:35 +00:00
\subsubsection { Lyapunov exponents}
\indent
To compute the Lyapunov exponents, we must first compute the Jacobian of $ \hat u ^ { ( n ) } \mapsto \hat u ^ { ( n + 1 ) } $ .
This map is always of Runge-Kutta type, that is,
\begin { equation}
2024-03-11 22:54:11 +00:00
\hat u(t_ { n+1} )=\mathfrak F_ { t_ n} (\hat u(t_ n))
.
2024-02-19 16:41:35 +00:00
\end { equation}
2024-03-11 22:54:11 +00:00
Let $ D \mathfrak F _ { t _ { n } } $ be the Jacobian of this map, in which we split the real and imaginary parts: if
2024-02-19 16:41:35 +00:00
\begin { equation}
2024-03-11 22:54:11 +00:00
\hat u_ k(t_ n)=:\rho _ k+i\iota _ k
,\quad
\mathfrak F_ { t_ n} (\hat u(t_ n))_ k=:\phi _ k+i\psi _ k
2024-02-19 16:41:35 +00:00
\end { equation}
2024-03-11 22:54:11 +00:00
then
2024-02-19 16:41:35 +00:00
\begin { equation}
2024-03-11 22:54:11 +00:00
(D\mathfrak F_ { t_ n} )_ { k,p} :=\left (\begin { array} { cc}
\partial _ { \rho _ p} \phi _ k& \partial _ { \iota _ p} \phi _ k\\
\partial _ { \rho _ p} \psi _ k& \partial _ { \iota _ p} \psi _ k
\end { array} \right )
2024-02-19 16:41:35 +00:00
\end { equation}
2024-03-11 22:54:11 +00:00
We compute this Jacobian numerically using a finite difference, by computing
2024-02-19 16:41:35 +00:00
\begin { equation}
2024-03-11 22:54:11 +00:00
(D\mathfrak F_ { t_ n} )_ { k,p} :=\frac 1\epsilon
\left (\begin { array} { cc}
\phi _ k(\hat u+\epsilon \delta _ p)-\phi _ k(\hat u)& \phi _ k(\hat u+i\epsilon \delta _ p)-\phi _ k(\hat u)\\
\psi _ k(\hat u+\epsilon \delta _ p)-\psi _ k(\hat u)& \psi _ k(\hat u+i\epsilon \delta _ p)-\psi _ k(\hat u)
\end { array} \right )
2024-02-19 16:41:35 +00:00
.
\end { equation}
2024-03-11 22:54:11 +00:00
The parameter $ \epsilon $ can be set using the parameter { \tt D\_ epsilon} .
%, so
%\begin{equation}
% D\hat u^{(n+1)}=\mathds 1+\delta\sum_{i=1}^q w_iD\mathfrak F(\hat u^{(n)})
% .
%\end{equation}
%We then compute
%\begin{equation}
% (D\mathfrak F(\hat u))_{k,\ell}
% =
% -\frac{4\pi^2}{L^2}\nu k^2\delta_{k,\ell}
% +\frac{4\pi^2}{L^2|k|}\partial_{\hat u_\ell}T(\hat u,k)
%\end{equation}
%and, by\-~(\ref{T}),
%\begin{equation}
% \partial_{\hat u_\ell}T(\hat u,k)
% =
% \sum_{\displaystyle\mathop{\scriptstyle q\in\mathbb Z^2}_{\ell+q=k}}
% \left(
% \frac{(q\cdot \ell^\perp)|q|}{|\ell|}
% +
% \frac{(\ell\cdot q^\perp)|\ell|}{|q|}
% \right)\hat u_q
% =
% (k\cdot \ell^\perp)\left(
% \frac{|k-\ell|}{|\ell|}
% -
% \frac{|\ell|}{|k-\ell|}
% \right)\hat u_{k-\ell}
% .
%\end{equation}
2024-02-19 16:41:35 +00:00
\bigskip
\indent
The Lyapunov exponents are then defined as
\begin { equation}
2024-03-11 22:54:11 +00:00
\frac 1{ t_ n} \log \mathrm { spec} (\pi _ { t_ n} )
2024-02-19 16:41:35 +00:00
,\quad
2024-03-11 22:54:11 +00:00
\pi _ { t_ n} :=\prod _ { i=1} ^ nD\hat u(t_ i)
2024-02-19 16:41:35 +00:00
.
\end { equation}
However, the product of $ D \hat u $ may become quite large or quite small (if the exponents are not all 1).
To avoid this, we periodically rescale the product.
2024-03-11 22:54:11 +00:00
We set $ \mathfrak L _ r> 0 $ (set by adjusting the { \tt lyanpunov\_ reset} parameter), and, when $ t _ n $ crosses a multiple of $ \mathfrak L _ r $ , we rescale the eigenvalues of $ \pi _ i $ to 1.
2024-02-19 16:41:35 +00:00
To do so, we perform a $ QR $ decomposition:
\begin { equation}
\pi _ { \alpha \mathfrak L_ r}
2024-03-11 22:54:11 +00:00
=R^ { (\alpha )} Q^ { (\alpha )}
2024-02-19 16:41:35 +00:00
\end { equation}
2024-03-11 22:54:11 +00:00
where $ Q ^ { ( \alpha ) } $ is orthogonal and $ R ^ { ( \alpha ) } $ is a diagonal matrix, and we divide by $ R ^ { ( \alpha ) } $ (thus only keeping $ Q ^ { ( \alpha ) } $ ).
2024-02-19 16:41:35 +00:00
The Lyapunov exponents at time $ \alpha \mathfrak L _ r $ are then
\begin { equation}
\frac 1{ \alpha \mathfrak L_ r} \sum _ { \beta =1} ^ \alpha \log \mathrm { spec} (Q^ { (\beta )} )
.
\end { equation}
2024-02-18 19:16:48 +00:00
\subsubsection { Numerical instability} .
2022-05-26 18:12:18 +00:00
In order to prevent the algorithm from blowing up, it is necessary to impose the reality of $ u ( x ) $ by hand, otherwise, truncation errors build up, and lead to divergences.
2023-03-31 19:40:15 +00:00
It is sufficient to ensure that the convolution term $ T ( \hat u,k ) $ satisfies $ T ( \hat u, - k ) = T ( \hat u,k ) ^ * $ .
2022-05-26 18:12:18 +00:00
After imposing this condition, the algorithm no longer blows up, but it is still unstable (for instance, increasing $ K _ 1 $ or $ K _ 2 $ leads to very different results).
2018-01-11 22:48:14 +00:00
2023-03-31 19:40:15 +00:00
\subsection { Reversible equation}
\indent The reversible equation is similar to\- ~(\ref { ins} ) but instead of fixing the viscosity, we fix the enstrophy\- ~\cite { Ga22} .
It is defined directly in Fourier space:
\begin { equation}
\partial _ t\hat U_ k=
-\frac { 4\pi ^ 2} { L^ 2} \alpha (\hat U) k^ 2\hat U_ k+\hat G_ k
-i\frac { 2\pi } L\sum _ { \displaystyle \mathop { \scriptstyle p,q\in \mathbb Z^ 2} _ { p+q=k} }
(q\cdot \hat U_ p)\hat U_ q
,\quad
k\cdot \hat U_ k=0
\end { equation}
where $ \alpha $ is chosen such that the enstrophy is constant.
In terms of $ \hat u $ \- ~(\ref { udef} ), (\ref { gdef} ), (\ref { T} ):
\begin { equation}
\partial _ t\hat u_ k=
-\frac { 4\pi ^ 2} { L^ 2} \alpha (\hat u) k^ 2\hat u_ k
+\hat g_ k
+\frac { 4\pi ^ 2} { L^ 2|k|} T(\hat u,k)
.
\label { rns_ k}
\end { equation}
To compute $ \alpha $ , we use the constancy of the enstrophy:
\begin { equation}
\sum _ { k\in \mathbb Z^ 2} k^ 2\hat U_ k\cdot \partial _ t\hat U_ k
=0
\end { equation}
which, in terms of $ \hat u $ is
\begin { equation}
2023-03-31 20:58:52 +00:00
\sum _ { k\in \mathbb Z^ 2} k^ 2\hat u_ k^ *\partial _ t\hat u_ k
2023-03-31 19:40:15 +00:00
=0
\end { equation}
that is
\begin { equation}
2023-03-31 20:58:52 +00:00
\frac { 4\pi ^ 2} { L^ 2} \alpha (\hat u)\sum _ { k\in \mathbb Z^ 2} k^ 4|\hat u_ k|^ 2
2023-03-31 19:40:15 +00:00
=
2023-03-31 20:58:52 +00:00
\sum _ { k\in \mathbb Z^ 2} k^ 2\hat u_ k^ *\hat g_ k
+\frac { 4\pi ^ 2} { L^ 2} \sum _ { k\in \mathbb Z^ 2} |k|\hat u_ k^ *T(\hat u,k)
2023-03-31 19:40:15 +00:00
\end { equation}
and so
\begin { equation}
\alpha (\hat u)
2023-03-31 20:58:52 +00:00
=\frac { \frac { L^ 2} { 4\pi ^ 2} \sum _ k k^ 2\hat u_ k^ *\hat g_ k+\sum _ k|k|\hat u_ k^ *T(\hat u,k)} { \sum _ kk^ 4|\hat u_ k|^ 2}
.
\end { equation}
Note that, by\- ~(\ref { realu} )-(\ref { realT} ),
\begin { equation}
\alpha (\hat u)\in \mathbb R
2023-03-31 19:40:15 +00:00
.
\end { equation}
2018-01-11 22:48:14 +00:00
\vfill
\eject
\begin { thebibliography} { WWW99}
\small
\IfFileExists { bibliography/bibliography.tex} { \input bibliography/bibliography.tex} { }
\end { thebibliography}
\end { document}