|
| 1 | +% main.tex |
| 2 | +\documentclass[a4paper,9pt]{article} |
| 3 | +\usepackage{amsmath,amssymb,amsthm,amsbsy,amsfonts} |
| 4 | +\usepackage{todonotes} |
| 5 | +\usepackage{systeme} |
| 6 | +\usepackage{physics} |
| 7 | +\usepackage{cleveref} |
| 8 | +\newcommand{\correspondsto}{\;\widehat{=}\;} |
| 9 | +\usepackage{bm} |
| 10 | +\usepackage{enumitem} % label enumerate |
| 11 | +\newtheorem{theorem}{Theorem} |
| 12 | +\theoremstyle{definition} |
| 13 | +\newtheorem{definition}{Definition}[section] |
| 14 | +\theoremstyle{remark} |
| 15 | +\newtheorem*{remark}{Remark} |
| 16 | +% change Q.D.E symbol |
| 17 | +\renewcommand\qedsymbol{$\hfill \mbox{\raggedright \rule{0.1in}{0.2in}}$} |
| 18 | + |
| 19 | +\begin{document} |
| 20 | + |
| 21 | +\author{Yingbo Ma\\ |
| 22 | + \tt{mayingbo5@gmail.com}} |
| 23 | +\title{Efficient Implicit Runge-Kutta Implementation} |
| 24 | +\date{April, 2020} |
| 25 | + |
| 26 | +\maketitle |
| 27 | + |
| 28 | +\section{Notation} |
| 29 | +\todo{Is this necessary?} |
| 30 | + |
| 31 | +\section{Runge-Kutta Methods} |
| 32 | +Runge-Kutta methods can numerically solve differential-algebraic equations (DAEs) |
| 33 | +that are written in the form of |
| 34 | +\begin{equation} |
| 35 | + M \dv{u}{t} = f(u, t),\quad u(a)=u_a \in \mathbb{R}^m, \quad t\in [a, b]. |
| 36 | +\end{equation} |
| 37 | + |
| 38 | +\begin{definition} \label{def:rk} |
| 39 | + An \emph{$s$-stage Runge Kutta} has the coefficients $a_{ij}, b_i,$ and $c_j$ |
| 40 | + for $i=1,2,\dots,s$ and $j=1,2,\dots,s$. One can also denote the coefficients |
| 41 | + simply by $\bm{A}, \bm{b},$ and $\bm{c}$. A step of the method is |
| 42 | + \begin{equation} \label{eq:rk_sum} |
| 43 | + u_{n+1} = u_n + \sum_{i=1}^s b_i k_i, |
| 44 | + \end{equation} |
| 45 | + where |
| 46 | + \begin{align} \label{eq:rk_lin} |
| 47 | + M z_i &= \sum_{j=1}^s a_{ij}k_j\qq{or} (I_s \otimes M) \bm{z} = |
| 48 | + (\bm{A}\otimes I_m)\bm{k}, \quad g_i = u_n + z_i \\ |
| 49 | + k_i &= hf(g_i, t+c_ih). |
| 50 | + \end{align} |
| 51 | +\end{definition} |
| 52 | + |
| 53 | +We observe that when $a_{ij} = 0$ for $i\ge j$, \cref{eq:rk_lin} can be computed |
| 54 | +without solving a system of algebraic equations, and we call methods with this |
| 55 | +property \emph{explicit} otherwise \emph{implicit}. |
| 56 | + |
| 57 | +We should note solving $z_i$ is much preferred over $g_i$, as they have a |
| 58 | +smaller magnitude. A method is stiffly accurate, i.e. the stability function |
| 59 | +$R(\infty) = 0$, when the matrix $\bm{A}$ is fully ranked and $a_{si} = b_i$. |
| 60 | +Hence, $\bm{b}^T\bm{A}^{-1}$ gives the last row of the identity matrix $I_s$. We |
| 61 | +can use this condition to further simplify \cref{eq:rk_sum} (by slight abuse of |
| 62 | +notation): |
| 63 | +\begin{equation} \label{eq:rk_sim} |
| 64 | + u_{n+1} = u_n + \bm{b}^T\bm{k} = u_n + |
| 65 | + \bm{b}^T\underbrace{\bm{A}^{-1}\bm{z}}_\text{\cref{eq:rk_lin}} = u_n + |
| 66 | + \mqty(0 & \cdots & 0 & 1)\bm{z} = u_n + z_s. |
| 67 | +\end{equation} |
| 68 | + |
| 69 | +\section{Solving Nonlinear Systems from Implicit Runge-Kutta Methods} |
| 70 | +We have to solve \cref{eq:rk_lin} for $\bm{z}$ when a Runge-Kutta method is |
| 71 | +implicit. More explicitly, we need to solve $G(\bm{z}) = \bm{0}$, where |
| 72 | +\begin{equation} \label{eq:nonlinear_rk} |
| 73 | + G(\bm{z}) = (I_s \otimes M) \bm{z} - h(\bm{A}\otimes I_m) \tilde{\bm{f}}(\bm{z}) \qq{and} |
| 74 | + \tilde{f}(\bm{z})_i = f(u_n+z_i, t+c_i h) |
| 75 | +\end{equation} |
| 76 | +The propose of introducing a computationally expensive nonlinear |
| 77 | +system solving step is to combat extreme stiffness. Hence, the Jacobian matrix |
| 78 | +arising from \cref{eq:rk_lin} is ill-conditioned. We must use Newton |
| 79 | +iteration to ensure stability, since fixed-point iteration only converges for |
| 80 | +contracting maps, which greatly limits the step size. |
| 81 | + |
| 82 | +Astute readers may notice that the Jacobian |
| 83 | +\begin{equation} |
| 84 | + \tilde{\bm{J}}_{ij} = \pdv{\tilde{\bm{f}}_i}{\bm{z}_j} = \pdv{f(u_n + z_i, t+c_ih)}{u} |
| 85 | +\end{equation} |
| 86 | +requires us to compute the Jacobian of $f$ at $s$ many different points, which |
| 87 | +is very expensive. We can approximate it by |
| 88 | +\begin{equation} |
| 89 | + \tilde{\bm{J}}_{ij} \approx J = \pdv{f(u_n, t)}{u}. |
| 90 | +\end{equation} |
| 91 | +Our simplified Newton iteration from \cref{eq:nonlinear_rk} is then |
| 92 | +\begin{align} \label{eq:newton_1} |
| 93 | + (I_s \otimes M - h\bm{A}\otimes J) \Delta \bm{z}^k &= -G(\bm{z}^k) = -(I_s |
| 94 | + \otimes M) \bm{z}^k + h(\bm{A}\otimes I_m) \tilde{\bm{f}}(\bm{z}^k) \\ |
| 95 | + \bm{z}^{k+1} &= \bm{z}^{k} + \Delta \bm{z}^k \label{eq:newton_2}, |
| 96 | +\end{align} |
| 97 | +where $\bm{z}^k$ is the approximation to $\bm{z}$ at the $k$-th iteration. |
| 98 | + |
| 99 | +In Hairer's Radau IIA implementation, he left multiplies \cref{eq:newton_1} by |
| 100 | +$(hA)^{-1} \otimes I_m$ to exploit the structure of the iteration matrix |
| 101 | +\cite{hairer1999stiff}, so we have |
| 102 | +\begin{align} |
| 103 | + ((hA)^{-1} \otimes I_m)(I_s \otimes M) &= (hA)^{-1} \otimes M \\ |
| 104 | + ((hA)^{-1} \otimes I_m)(h\bm{A}\otimes J) &= I_s\otimes J \\ |
| 105 | + ((hA)^{-1} \otimes I_m)G(\bm{z}^k) &= ((hA)^{-1} \otimes M) \bm{z}^k - |
| 106 | + \tilde{\bm{f}}(\bm{z}^k), |
| 107 | +\end{align} |
| 108 | +and finally, |
| 109 | +\begin{equation} \label{eq:newton1} |
| 110 | + ((hA)^{-1} \otimes M - I_s\otimes J) \Delta \bm{z}^k = -((hA)^{-1} \otimes M) |
| 111 | + \bm{z}^k + \tilde{\bm{f}}(\bm{z}^k). |
| 112 | +\end{equation} |
| 113 | +Hairer also diagonalizes $A^{-1}$, i.e. |
| 114 | +\begin{equation} |
| 115 | + V^{-1}A^{-1}V = \Lambda, |
| 116 | +\end{equation} |
| 117 | +to decouple the $sm \times sm$ system. To transforming \cref{eq:newton1} to |
| 118 | +the eigenbasis of $A^{-1}$, notice |
| 119 | +\begin{equation} |
| 120 | + A^{-1}x = b \implies V^{-1}A^{-1}x = V^{-1}b \implies \Lambda V^{-1}x = |
| 121 | + V^{-1}b. |
| 122 | +\end{equation} |
| 123 | +Similarly, we have |
| 124 | +\begin{align} |
| 125 | + &(h^{-1} \Lambda \otimes M - I_s\otimes J) (V^{-1}\otimes I_m)\Delta\bm{z}^k\\ |
| 126 | + =& (V^{-1}\otimes I_m)(-((hA)^{-1} \otimes M) |
| 127 | + \bm{z}^k + \tilde{\bm{f}}(\bm{z}^k)). |
| 128 | +\end{align} |
| 129 | +We can introduce the transformed variable $\bm{w} = (V^{-1}\otimes I_m) \bm{z}$ |
| 130 | +to further reduce computation, so \cref{eq:newton1} and \cref{eq:newton2} is now |
| 131 | +\begin{align} \label{eq:newton2} |
| 132 | + (h^{-1} \Lambda \otimes M - I_s\otimes J) \Delta\bm{w}^k |
| 133 | + &= -(h^{-1} \Lambda \otimes M) \bm{w}^k + |
| 134 | + (V^{-1}\otimes I_m)\tilde{\bm{f}}((V\otimes I_m)\bm{w}^k) \\ |
| 135 | + \bm{w}^{k+1} &= \bm{w}^{k} + \Delta \bm{w}^k. |
| 136 | +\end{align} |
| 137 | + |
| 138 | +\bibliography{reference.bib} |
| 139 | +\bibliographystyle{siam} |
| 140 | + |
| 141 | +\end{document} |
0 commit comments