532 lines
17 KiB
TeX
532 lines
17 KiB
TeX
% Copyright 2017-2024 Ian Jauslin
|
|
%
|
|
% Licensed under the Apache License, Version 2.0 (the "License");
|
|
% you may not use this file except in compliance with the License.
|
|
% You may obtain a copy of the License at
|
|
%
|
|
% http://www.apache.org/licenses/LICENSE-2.0
|
|
%
|
|
% Unless required by applicable law or agreed to in writing, software
|
|
% distributed under the License is distributed on an "AS IS" BASIS,
|
|
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
% See the License for the specific language governing permissions and
|
|
% limitations under the License.
|
|
|
|
\documentclass{ian}
|
|
|
|
\usepackage{largearray}
|
|
\usepackage{dsfont}
|
|
|
|
\begin{document}
|
|
|
|
\hbox{}
|
|
\hfil{\bf\LARGE
|
|
{\tt nstrophy}
|
|
}
|
|
\vfill
|
|
|
|
\tableofcontents
|
|
|
|
\vfill
|
|
\eject
|
|
|
|
\setcounter{page}1
|
|
\pagestyle{plain}
|
|
|
|
\section{Description of the computation}
|
|
\subsection{Irreversible equation}
|
|
\indent Consider the incompressible Navier-Stokes equation in 2 dimensions
|
|
\begin{equation}
|
|
\partial_tU=\nu\Delta U+G-(U\cdot\nabla)U,\quad
|
|
\nabla\cdot U=0
|
|
\label{ins}
|
|
\end{equation}
|
|
in which $G$ is the forcing term.
|
|
We take periodic boundary conditions, so, at every given time, $U(t,\cdot)$ is a function on the torus $\mathbb T^2:=\mathbb R^2/(L\mathbb Z)^2$. We represent $U(t,\cdot)$ using its Fourier series
|
|
\begin{equation}
|
|
\hat U_k(t):=\frac1{L^2}\int_{\mathbb T^2}dx\ e^{i\frac{2\pi}L kx}U(t,x)
|
|
\end{equation}
|
|
for $k\in\mathbb Z^2$, and rewrite~\-(\ref{ins}) as
|
|
\begin{equation}
|
|
\partial_t\hat U_k=
|
|
-\frac{4\pi^2}{L^2}\nu k^2\hat U_k+\hat G_k
|
|
-i\frac{2\pi}L\sum_{\displaystyle\mathop{\scriptstyle p,q\in\mathbb Z^2}_{p+q=k}}
|
|
(q\cdot\hat U_p)\hat U_q
|
|
,\quad
|
|
k\cdot\hat U_k=0
|
|
\label{ins_k}
|
|
\end{equation}
|
|
We then reduce the equation to a scalar one, by writing
|
|
\begin{equation}
|
|
\hat U_k=\frac{i2\pi k^\perp}{L|k|}\hat u_k\equiv\frac{i2\pi}{L|k|}(-k_y\hat u_k,k_x\hat u_k)
|
|
\label{udef}
|
|
\end{equation}
|
|
in terms of which, multiplying both sides of the equation by $\frac L{i2\pi}\frac{k^\perp}{|k|}$,
|
|
\begin{equation}
|
|
\partial_t\hat u_k=
|
|
-\frac{4\pi^2}{L^2}\nu k^2\hat u_k
|
|
+\hat g_k
|
|
+\frac{4\pi^2}{L^2|k|}\sum_{\displaystyle\mathop{\scriptstyle p,q\in\mathbb Z^2}_{p+q=k}}
|
|
\frac{(q\cdot p^\perp)(k^\perp\cdot q^\perp)}{|q||p|}\hat u_p\hat u_q
|
|
\label{ins_k}
|
|
\end{equation}
|
|
with
|
|
\begin{equation}
|
|
\hat g_k:=\frac{Lk^\perp}{2i\pi|k|}\cdot\hat G_k
|
|
.
|
|
\label{gdef}
|
|
\end{equation}
|
|
Furthermore
|
|
\begin{equation}
|
|
(q\cdot p^\perp)(k^\perp\cdot q^\perp)
|
|
=
|
|
(q\cdot p^\perp)(q^2+p\cdot q)
|
|
\end{equation}
|
|
and $q\cdot p^\perp$ is antisymmetric under exchange of $q$ and $p$. Therefore,
|
|
\begin{equation}
|
|
\partial_t\hat u_k=
|
|
-\frac{4\pi^2}{L^2}\nu k^2\hat u_k+\hat g_k
|
|
+\frac{4\pi^2}{L^2|k|}T(\hat u,k)
|
|
=:\mathfrak F_k(\hat u)
|
|
\label{ins_k}
|
|
\end{equation}
|
|
with
|
|
\begin{equation}
|
|
T(\hat u,k):=
|
|
\sum_{\displaystyle\mathop{\scriptstyle p,q\in\mathbb Z^2}_{p+q=k}}
|
|
\frac{(q\cdot p^\perp)|q|}{|p|}\hat u_p\hat u_q
|
|
.
|
|
\label{T}
|
|
\end{equation}
|
|
We truncate the Fourier modes and assume that $\hat u_k=0$ if $|k_1|>K_1$ or $|k_2|>K_2$. Let
|
|
\begin{equation}
|
|
\mathcal K:=\{(k_1,k_2),\ |k_1|\leqslant K_1,\ |k_2|\leqslant K_2\}
|
|
.
|
|
\end{equation}
|
|
|
|
\subsubsection{Runge-Kutta methods}.
|
|
To solve the equation numerically, we will use Runge-Kutta methods, which compute an approximate value $\hat u_k^{(n)}$ for $\hat u_k(t_n)$.
|
|
{\tt nstrophy} supports the 4th order Runge-Kutta ({\tt RK4}) and 2nd order Runge-Kutta ({\tt RK2}) algorithms.
|
|
In addition, several variable step methods are implemented:
|
|
\begin{itemize}
|
|
\item the Runge-Kutta-Dormand-Prince method ({\tt RKDP54}), which is of 5th order, and adjusts the step by comparing to a 4th order method;
|
|
\item the Runge-Kutta-Fehlberg method ({\tt RKF45}), which is of 4th order, and adjusts the step by comparing to a 5th order method;
|
|
\item the Runge-Kutta-Bogacki-Shampine method ({\tt RKBS32}), which is of 3d order, and adjusts the step by comparing to a 2nd order method.
|
|
\end{itemize}
|
|
In these adaptive step methods, two steps are computed at different orders: $\hat u_k^{(n)}$ and $\hat U_k^{(n)}$, the step size is adjusted at every step in such a way that the error is small enough:
|
|
\begin{equation}
|
|
\|\hat u^{(n)}-\hat U^{(n)}\|
|
|
<\epsilon_{\mathrm{target}}
|
|
\end{equation}
|
|
for some given $\epsilon_{\mathrm{target}}$, set using the {\tt adaptive\_tolerance} parameter.
|
|
The choice of the norm matters, and will be discussed below.
|
|
If the error is larger than the target, then the step size is decreased.
|
|
How this is done depends on the order of algorithm.
|
|
If the order is $q$ (here we mean the smaller of the two orders, so 4 for {\tt RKDP54} and {\tt RKF45} and 2 for {\tt RKBS32}), then we expect
|
|
\begin{equation}
|
|
\|\hat u^{(n)}-\hat U^{(n)}\|=\delta_n^qC_n
|
|
.
|
|
\end{equation}
|
|
We wish to set $\delta_{n+1}$ so that
|
|
\begin{equation}
|
|
\delta_{n+1}^qC_n=\epsilon_{\mathrm{target}}
|
|
\end{equation}
|
|
so
|
|
\begin{equation}
|
|
\delta_{n+1}
|
|
=\left(\frac{\epsilon_{\mathrm{target}}}{C_n}\right)^{\frac1q}
|
|
=\delta_n\left(\frac{\epsilon_{\mathrm{target}}}{\|\hat u^{(n)}-\hat U^{(n)}\|}\right)^{\frac1q}
|
|
.
|
|
\label{adaptive_delta}
|
|
\end{equation}
|
|
(Actually, to be safe and ensure that $\delta$ decreases sufficiently, we multiply this by a safety factor that can be set using the {\tt adaptive\_factor} parameter.)
|
|
If the error is smaller than the target, we increase $\delta$ using\-~(\ref{adaptive_delta}) (without the safety factor).
|
|
To be safe, we also set a maximal value for $\delta$ via the {\tt max\_delta} parameter.
|
|
\bigskip
|
|
|
|
\indent
|
|
The choice of the norm $\|\cdot\|$ matters.
|
|
It can be made by specifying the parameter {\tt adaptive\_norm}.
|
|
\begin{itemize}
|
|
\item A naive choice is to take $\|\cdot\|$ to be the normalized $L_1$ norm:
|
|
\begin{equation}
|
|
\|f\|:=
|
|
\frac1{\mathcal N}\sum_k|f_k|
|
|
,\quad
|
|
\mathcal N:=\sum_k|\hat u_k^{(n)}-\hat u_k^{(n-1)}|
|
|
.
|
|
\end{equation}
|
|
This norm is selected by choosing {\tt adaptive\_norm=L1}.
|
|
|
|
\item Empirically, we have found that $|\hat u-\hat U|$ behaves like $k^{-3}$ for {\tt RKDP54} and {\tt RKF45}, and like $k^{-\frac32}$ for {\tt RKBS32}, so a norm of the form
|
|
\begin{equation}
|
|
\|f\|:=\frac1{\mathcal N}\sum_k|f_k|k^{-3}
|
|
,\quad
|
|
\mathcal N:=\sum_k|\hat u_k^{(n)}-\hat u_k^{(n-1)}|k^{-3}
|
|
\end{equation}
|
|
or
|
|
\begin{equation}
|
|
\|f\|:=\frac1{\mathcal N}\sum_k|f_k|k^{-\frac32}
|
|
,\quad
|
|
\mathcal N:=\sum_k|\hat u_k^{(n)}-\hat u_k^{(n-1)}|k^{-\frac32}
|
|
\end{equation}
|
|
are sensible choices.
|
|
These norms are selected by choosing {\tt adaptive\_norm=k3} and {\tt adaptive\_norm=k32} respectively.
|
|
|
|
\item
|
|
Another option is to define a norm based on the expression of the enstrophy\-~(\ref{enstrophy}):
|
|
\begin{equation}
|
|
\|f\|:=\frac1{\mathcal N}\sqrt{\sum_k k^2|f_k|^2}
|
|
,\quad
|
|
\mathcal N:=\frac{\sqrt{\sum_k k^2|\hat u_k^{(n)}|^2}+\sqrt{\sum_k k^2|\hat U_k^{(n)}|^2}}{\sum_k k^2|\hat u_k^{(n)}|^2}
|
|
.
|
|
\end{equation}
|
|
Doing so controls the error of the enstrophy through
|
|
\begin{equation}
|
|
\frac1{\mathcal N^2}|\mathcal En(\hat u)-\mathcal En(\hat U)|\equiv|\|\hat u\|^2-\|\hat U\|^2|\leqslant \|\hat u-\hat U\|(\|\hat u\|+\|\hat U\|)
|
|
\end{equation}
|
|
so
|
|
\begin{equation}
|
|
\frac1{\mathcal N^2}
|
|
|\mathcal En(\hat u)-\mathcal En(\hat U)|\leqslant
|
|
\|\hat u-\hat U\|\frac1{\mathcal N}\left(\sqrt{\sum_k k^2|\hat u_k|^2}+\sqrt{\sum_k k^2|\hat U_k|^2}\right)
|
|
\end{equation}
|
|
and thus
|
|
\begin{equation}
|
|
\frac{|\mathcal En(\hat u)-\mathcal En(\hat U)|}{\mathcal En(\hat u)}\leqslant
|
|
\|\hat u-\hat U\|
|
|
.
|
|
\end{equation}
|
|
This norm is selected by choosing {\tt adaptive\_norm=enstrophy}.
|
|
\end{itemize}
|
|
|
|
\subsubsection{Reality}.
|
|
Since $U$ is real, $\hat U_{-k}=\hat U_k^*$, and so
|
|
\begin{equation}
|
|
\hat u_{-k}=\hat u_k^*
|
|
.
|
|
\label{realu}
|
|
\end{equation}
|
|
Similarly,
|
|
\begin{equation}
|
|
\hat g_{-k}=\hat g_k^*
|
|
.
|
|
\label{realg}
|
|
\end{equation}
|
|
Thus,
|
|
\begin{equation}
|
|
T(\hat u,-k)
|
|
=
|
|
T(\hat u,k)^*
|
|
.
|
|
\label{realT}
|
|
\end{equation}
|
|
In order to keep the computation as quick as possible, we only compute and store the values for $k_1\geqslant 0$.
|
|
|
|
\subsubsection{FFT}. We compute T using a fast Fourier transform, defined as
|
|
\begin{equation}
|
|
\mathcal F(f)(n):=\sum_{m\in\mathcal N}e^{-\frac{2i\pi}{N_1}m_1n_1-\frac{2i\pi}{N_2}m_2n_2}f(m_1,m_2)
|
|
\end{equation}
|
|
where
|
|
\begin{equation}
|
|
\mathcal N:=\{(n_1,n_2),\ 0\leqslant n_1< N_1,\ 0\leqslant n_2< N_2\}
|
|
\end{equation}
|
|
for some fixed $N_1,N_2$. The transform is inverted by
|
|
\begin{equation}
|
|
\frac1{N_1N_2}\mathcal F^*(\mathcal F(f))(n)=f(n)
|
|
\end{equation}
|
|
in which $\mathcal F^*$ is defined like $\mathcal F$ but with the opposite phase.
|
|
\bigskip
|
|
|
|
\indent The condition $p+q=k$ can be rewritten as
|
|
\begin{equation}
|
|
T(\hat u,k)
|
|
=
|
|
\sum_{p,q\in\mathcal K}
|
|
\frac1{N_1N_2}
|
|
\sum_{n\in\mathcal N}e^{-\frac{2i\pi}{N_1}n_1(p_1+q_1-k_1)-\frac{2i\pi}{N_2}n_2(p_2+q_2-k_2)}
|
|
(q\cdot p^\perp)\frac{|q|}{|p|}\hat u_q\hat u_p
|
|
\end{equation}
|
|
provided
|
|
\begin{equation}
|
|
N_i>3K_i.
|
|
\end{equation}
|
|
Indeed, $\sum_{n_i=0}^{N_i}e^{-\frac{2i\pi}{N_i}n_im_i}$ vanishes unless $m_i=0\%N_i$ (in which $\%N_i$ means `modulo $N_i$'), and, if $p,q,k\in\mathcal K$, then $|p_i+q_i-k_i|\leqslant3K_i$, so, as long as $N_i>3K_i$, then $(p_i+q_i-k_i)=0\%N_i$ implies $p_i+q_i=k_i$.
|
|
Therefore,
|
|
\begin{equation}
|
|
T(\hat u,k)
|
|
=
|
|
\textstyle
|
|
\frac1{N_1N_2}
|
|
\mathcal F^*\left(
|
|
\mathcal F\left(\frac{p_x\hat u_p}{|p|}\right)(n)
|
|
\mathcal F\left(q_y|q|\hat u_q\right)(n)
|
|
-
|
|
\mathcal F\left(\frac{p_y\hat u_p}{|p|}\right)(n)
|
|
\mathcal F\left(q_x|q|\hat u_q\right)(n)
|
|
\right)(k)
|
|
\end{equation}
|
|
|
|
\subsubsection{Energy}.
|
|
We define the energy as
|
|
\begin{equation}
|
|
E(t)=\frac12\int\frac{dx}{L^2}\ U^2(t,x)=\frac12\sum_{k\in\mathbb Z^2}|\hat U_k|^2
|
|
.
|
|
\end{equation}
|
|
We have
|
|
\begin{equation}
|
|
\partial_t E=\int\frac{dx}{L^2}\ U\partial tU
|
|
=
|
|
\nu\int\frac{dx}{L^2}\ U\Delta U
|
|
+\int\frac{dx}{L^2}\ UG
|
|
-\int\frac{dx}{L^2}\ U(U\cdot\nabla)U
|
|
.
|
|
\end{equation}
|
|
Since we have periodic boundary conditions,
|
|
\begin{equation}
|
|
\int dx\ U\Delta U=-\int dx\ |\nabla U|^2
|
|
.
|
|
\end{equation}
|
|
Furthermore,
|
|
\begin{equation}
|
|
I:=\int dx\ U(U\cdot\nabla)U
|
|
=\sum_{i,j=1,2}\int dx\ U_iU_j\partial_jU_i
|
|
=
|
|
-\sum_{i,j=1,2}\int dx\ (\partial_jU_i)U_jU_i
|
|
-\sum_{i,j=1,2}\int dx\ U_i(\partial_jU_j)U_i
|
|
\end{equation}
|
|
and since $\nabla\cdot U=0$,
|
|
\begin{equation}
|
|
I
|
|
=
|
|
-I
|
|
\end{equation}
|
|
and so $I=0$.
|
|
Thus,
|
|
\begin{equation}
|
|
\partial_t E=
|
|
\int\frac{dx}{L^2}\ \left(-\nu|\nabla U|^2+UG\right)
|
|
=
|
|
\sum_{k\in\mathbb Z^2}\left(-\frac{4\pi^2}{L^2}\nu k^2|\hat U_k|^2+\hat U_{-k}\hat G_k\right)
|
|
.
|
|
\end{equation}
|
|
Furthermore,
|
|
\begin{equation}
|
|
\sum_{k\in\mathbb Z^2}k^2|\hat U_k|^2\geqslant
|
|
\sum_{k\in\mathbb Z^2}|\hat U_k|^2-|\hat U_0|^2
|
|
=2E-|\hat U_0|^2
|
|
\end{equation}
|
|
so
|
|
\begin{equation}
|
|
\partial_t E\leqslant -\frac{8\pi^2}{L^2}\nu E+\frac{4\pi^2}{L^2}\nu\hat U_0^2+\sum_{k\in\mathbb Z^2}\hat U_{-k}\hat G_k
|
|
\leqslant
|
|
-\frac{8\pi^2}{L^2}\nu E+\frac{4\pi^2}{L^2}\nu\hat U_0^2+
|
|
\|\hat G\|_2\sqrt{2E}
|
|
.
|
|
\end{equation}
|
|
In particular, if $\hat U_0=0$ (which corresponds to keeping the center of mass fixed),
|
|
\begin{equation}
|
|
\partial_t E\leqslant -\frac{8\pi^2}{L^2}\nu E+\|\hat G\|_2\sqrt{2E}
|
|
.
|
|
\end{equation}
|
|
Now, if $\frac{8\pi^2}{L^2}\nu\sqrt E<\sqrt2\|\hat G\|_2$, then
|
|
\begin{equation}
|
|
\frac{\partial_t E}{-\frac{8\pi^2}{L^2}\nu E+\|\hat G\|_2\sqrt{2E}}\leqslant1
|
|
\end{equation}
|
|
and so
|
|
\begin{equation}
|
|
\frac{\log(1-\frac{8\pi^2\nu}{L^2\sqrt2\|\hat G\|_2}\sqrt{E(t)})}{-\frac{4\pi^2}{L^2}\nu}\leqslant t+
|
|
\frac{\log(1-\frac{8\pi^2\nu}{L^2\sqrt2\|\hat G\|_2}\sqrt{E(0)})}{-\frac{4\pi^2}{L^2}\nu}
|
|
\end{equation}
|
|
and
|
|
\begin{equation}
|
|
E(t)
|
|
\leqslant
|
|
\left(
|
|
\frac{L^2\sqrt2\|\hat G\|_2}{8\pi^2\nu}(1-e^{-\frac{4\pi^2}{L^2}\nu t})
|
|
+e^{-\frac{4\pi^2}{L^2}\nu t}\sqrt{E(0)}
|
|
\right)^2
|
|
.
|
|
\end{equation}
|
|
If $\frac{8\pi^2}{L^2}\nu\sqrt E>\sqrt2\|\hat G\|_2$,
|
|
\begin{equation}
|
|
\frac{\partial_t E}{-\frac{8\pi^2}{L^2}\nu E+\|\hat G\|_2\sqrt{2E}}\geqslant1
|
|
\end{equation}
|
|
and so
|
|
\begin{equation}
|
|
\frac{\log(\frac{8\pi^2\nu}{L^2\sqrt2\|\hat G\|_2}\sqrt{E(t)}-1)}{-\frac{4\pi^2}{L^2}\nu}\geqslant t+
|
|
\frac{\log(\frac{8\pi^2\nu}{L^2\sqrt2\|\hat G\|_2}\sqrt{E(0)})-1}{-\frac{4\pi^2}{L^2}\nu}
|
|
\end{equation}
|
|
and
|
|
\begin{equation}
|
|
E(t)
|
|
\leqslant
|
|
\left(
|
|
\frac{L^2\sqrt2\|\hat G\|_2}{8\pi^2\nu}(1-e^{-\frac{4\pi^2}{L^2}\nu t})
|
|
+e^{-\frac{4\pi^2}{L^2}\nu t}\sqrt{E(0)}
|
|
\right)^2
|
|
.
|
|
\label{enstrophy}
|
|
\end{equation}
|
|
|
|
\subsubsection{Enstrophy}.
|
|
The enstrophy is defined as
|
|
\begin{equation}
|
|
\mathcal En(t)=\int\frac{dx}{L^2}\ |\nabla U|^2
|
|
=\frac{4\pi^2}{L^2}\sum_{k\in\mathbb Z^2}k^2|\hat U_k|^2
|
|
.
|
|
\end{equation}
|
|
|
|
\subsubsection{Lyapunov exponents}
|
|
\indent
|
|
To compute the Lyapunov exponents, we must first compute the Jacobian of $\hat u^{(n)}\mapsto\hat u^{(n+1)}$.
|
|
This map is always of Runge-Kutta type, that is,
|
|
\begin{equation}
|
|
\hat u(t_{n+1})=\mathfrak F_{t_n}(\hat u(t_n))
|
|
.
|
|
\end{equation}
|
|
Let $D\mathfrak F_{t_{n}}$ be the Jacobian of this map, in which we split the real and imaginary parts: if
|
|
\begin{equation}
|
|
\hat u_k(t_n)=:\rho_k+i\iota_k
|
|
,\quad
|
|
\mathfrak F_{t_n}(\hat u(t_n))_k=:\phi_k+i\psi_k
|
|
\end{equation}
|
|
then
|
|
\begin{equation}
|
|
(D\mathfrak F_{t_n})_{k,p}:=\left(\begin{array}{cc}
|
|
\partial_{\rho_p}\phi_k&\partial_{\iota_p}\phi_k\\
|
|
\partial_{\rho_p}\psi_k&\partial_{\iota_p}\psi_k
|
|
\end{array}\right)
|
|
\end{equation}
|
|
We compute this Jacobian numerically using a finite difference, by computing
|
|
\begin{equation}
|
|
(D\mathfrak F_{t_n})_{k,p}:=\frac1\epsilon
|
|
\left(\begin{array}{cc}
|
|
\phi_k(\hat u+\epsilon\delta_p)-\phi_k(\hat u)&\phi_k(\hat u+i\epsilon\delta_p)-\phi_k(\hat u)\\
|
|
\psi_k(\hat u+\epsilon\delta_p)-\psi_k(\hat u)&\psi_k(\hat u+i\epsilon\delta_p)-\psi_k(\hat u)
|
|
\end{array}\right)
|
|
.
|
|
\end{equation}
|
|
The parameter $\epsilon$ can be set using the parameter {\tt D\_epsilon}.
|
|
%, so
|
|
%\begin{equation}
|
|
% D\hat u^{(n+1)}=\mathds 1+\delta\sum_{i=1}^q w_iD\mathfrak F(\hat u^{(n)})
|
|
% .
|
|
%\end{equation}
|
|
%We then compute
|
|
%\begin{equation}
|
|
% (D\mathfrak F(\hat u))_{k,\ell}
|
|
% =
|
|
% -\frac{4\pi^2}{L^2}\nu k^2\delta_{k,\ell}
|
|
% +\frac{4\pi^2}{L^2|k|}\partial_{\hat u_\ell}T(\hat u,k)
|
|
%\end{equation}
|
|
%and, by\-~(\ref{T}),
|
|
%\begin{equation}
|
|
% \partial_{\hat u_\ell}T(\hat u,k)
|
|
% =
|
|
% \sum_{\displaystyle\mathop{\scriptstyle q\in\mathbb Z^2}_{\ell+q=k}}
|
|
% \left(
|
|
% \frac{(q\cdot \ell^\perp)|q|}{|\ell|}
|
|
% +
|
|
% \frac{(\ell\cdot q^\perp)|\ell|}{|q|}
|
|
% \right)\hat u_q
|
|
% =
|
|
% (k\cdot \ell^\perp)\left(
|
|
% \frac{|k-\ell|}{|\ell|}
|
|
% -
|
|
% \frac{|\ell|}{|k-\ell|}
|
|
% \right)\hat u_{k-\ell}
|
|
% .
|
|
%\end{equation}
|
|
\bigskip
|
|
|
|
\indent
|
|
The Lyapunov exponents are then defined as
|
|
\begin{equation}
|
|
\frac1{t_n}\log\mathrm{spec}(\pi_{t_n})
|
|
,\quad
|
|
\pi_{t_n}:=\prod_{i=1}^nD\hat u(t_i)
|
|
.
|
|
\end{equation}
|
|
However, the product of $D\hat u$ may become quite large or quite small (if the exponents are not all 1).
|
|
To avoid this, we periodically rescale the product.
|
|
We set $\mathfrak L_r>0$ (set by adjusting the {\tt lyanpunov\_reset} parameter), and, when $t_n$ crosses a multiple of $\mathfrak L_r$, we rescale the eigenvalues of $\pi_i$ to 1.
|
|
To do so, we perform a $QR$ decomposition:
|
|
\begin{equation}
|
|
\pi_{\alpha\mathfrak L_r}
|
|
=R^{(\alpha)}Q^{(\alpha)}
|
|
\end{equation}
|
|
where $Q^{(\alpha)}$ is orthogonal and $R^{(\alpha)}$ is a diagonal matrix, and we divide by $R^{(\alpha)}$ (thus only keeping $Q^{(\alpha)}$).
|
|
The Lyapunov exponents at time $\alpha\mathfrak L_r$ are then
|
|
\begin{equation}
|
|
\frac1{\alpha\mathfrak L_r}\sum_{\beta=1}^\alpha\log\mathrm{spec}(Q^{(\beta)})
|
|
.
|
|
\end{equation}
|
|
|
|
\subsubsection{Numerical instability}.
|
|
In order to prevent the algorithm from blowing up, it is necessary to impose the reality of $u(x)$ by hand, otherwise, truncation errors build up, and lead to divergences.
|
|
It is sufficient to ensure that the convolution term $T(\hat u,k)$ satisfies $T(\hat u,-k)=T(\hat u,k)^*$.
|
|
After imposing this condition, the algorithm no longer blows up, but it is still unstable (for instance, increasing $K_1$ or $K_2$ leads to very different results).
|
|
|
|
\subsection{Reversible equation}
|
|
\indent The reversible equation is similar to\-~(\ref{ins}) but instead of fixing the viscosity, we fix the enstrophy\-~\cite{Ga22}.
|
|
It is defined directly in Fourier space:
|
|
\begin{equation}
|
|
\partial_t\hat U_k=
|
|
-\frac{4\pi^2}{L^2}\alpha(\hat U) k^2\hat U_k+\hat G_k
|
|
-i\frac{2\pi}L\sum_{\displaystyle\mathop{\scriptstyle p,q\in\mathbb Z^2}_{p+q=k}}
|
|
(q\cdot\hat U_p)\hat U_q
|
|
,\quad
|
|
k\cdot\hat U_k=0
|
|
\end{equation}
|
|
where $\alpha$ is chosen such that the enstrophy is constant.
|
|
In terms of $\hat u$\-~(\ref{udef}), (\ref{gdef}), (\ref{T}):
|
|
\begin{equation}
|
|
\partial_t\hat u_k=
|
|
-\frac{4\pi^2}{L^2}\alpha(\hat u) k^2\hat u_k
|
|
+\hat g_k
|
|
+\frac{4\pi^2}{L^2|k|}T(\hat u,k)
|
|
.
|
|
\label{rns_k}
|
|
\end{equation}
|
|
To compute $\alpha$, we use the constancy of the enstrophy:
|
|
\begin{equation}
|
|
\sum_{k\in\mathbb Z^2}k^2\hat U_k\cdot\partial_t\hat U_k
|
|
=0
|
|
\end{equation}
|
|
which, in terms of $\hat u$ is
|
|
\begin{equation}
|
|
\sum_{k\in\mathbb Z^2}k^2\hat u_k^*\partial_t\hat u_k
|
|
=0
|
|
\end{equation}
|
|
that is
|
|
\begin{equation}
|
|
\frac{4\pi^2}{L^2}\alpha(\hat u)\sum_{k\in\mathbb Z^2}k^4|\hat u_k|^2
|
|
=
|
|
\sum_{k\in\mathbb Z^2}k^2\hat u_k^*\hat g_k
|
|
+\frac{4\pi^2}{L^2}\sum_{k\in\mathbb Z^2}|k|\hat u_k^*T(\hat u,k)
|
|
\end{equation}
|
|
and so
|
|
\begin{equation}
|
|
\alpha(\hat u)
|
|
=\frac{\frac{L^2}{4\pi^2}\sum_k k^2\hat u_k^*\hat g_k+\sum_k|k|\hat u_k^*T(\hat u,k)}{\sum_kk^4|\hat u_k|^2}
|
|
.
|
|
\end{equation}
|
|
Note that, by\-~(\ref{realu})-(\ref{realT}),
|
|
\begin{equation}
|
|
\alpha(\hat u)\in\mathbb R
|
|
.
|
|
\end{equation}
|
|
|
|
|
|
|
|
\vfill
|
|
\eject
|
|
|
|
\begin{thebibliography}{WWW99}
|
|
\small
|
|
\IfFileExists{bibliography/bibliography.tex}{\input bibliography/bibliography.tex}{}
|
|
\end{thebibliography}
|
|
|
|
\end{document}
|