$$
\newcommand{\LetThereBe}[2]{\newcommand{#1}{#2}}
\newcommand{\letThereBe}[3]{\newcommand{#1}[#2]{#3}}
% Declare mathematics (so they can be overwritten for PDF)
\newcommand{\declareMathematics}[2]{\DeclareMathOperator{#1}{#2}}
\newcommand{\declareMathematicsStar}[2]{\DeclareMathOperator*{#1}{#2}}
% striked integral
\newcommand{\avint}{\mathop{\mathchoice{\,\rlap{-}\!\!\int}
{\rlap{\raise.15em{\scriptstyle -}}\kern-.2em\int}
{\rlap{\raise.09em{\scriptscriptstyle -}}\!\int}
{\rlap{-}\!\int}}\nolimits}
% \d does not work well for PDFs
\LetThereBe{\d}{\differential}
$$
$$
% Simply for testing
\LetThereBe{\foo}{\textrm{FIXME: this is a test!}}
% Font styles
\letThereBe{\mcal}{1}{\mathcal{#1}}
\letThereBe{\chem}{1}{\mathrm{#1}}
% Sets
\LetThereBe{\C}{\mathbb{C}}
\LetThereBe{\R}{\mathbb{R}}
\LetThereBe{\Z}{\mathbb{Z}}
\LetThereBe{\N}{\mathbb{N}}
\LetThereBe{\im}{\mathrm{i}}
\LetThereBe{\Im}{\mathrm{Im}}
\LetThereBe{\Re}{\mathrm{Re}}
% Sets from PDEs
\LetThereBe{\boundary}{\partial}
\letThereBe{\closure}{1}{\overline{#1}}
\letThereBe{\contf}{2}{C^{#2}(#1)}
\letThereBe{\compactContf}{2}{C_c^{#2}(#1)}
\letThereBe{\ball}{2}{B\brackets{#1, #2}}
\letThereBe{\closedBall}{2}{B\parentheses{#1, #2}}
\LetThereBe{\compactEmbed}{\subset\subset}
\letThereBe{\inside}{1}{#1^o}
\LetThereBe{\neighborhood}{\mcal O}
\letThereBe{\neigh}{1}{\neighborhood \brackets{#1}}
% Basic notation - vectors and random variables
\letThereBe{\vi}{1}{\boldsymbol{#1}} %vector or matrix
\letThereBe{\dvi}{1}{\vi{\dot{#1}}} %differentiated vector or matrix
\letThereBe{\vii}{1}{\mathbf{#1}} %if \vi doesn't work
\letThereBe{\dvii}{1}{\vii{\dot{#1}}} %if \dvi doesn't work
\letThereBe{\rnd}{1}{\mathup{#1}} %random variable
\letThereBe{\vr}{1}{\mathbf{#1}} %random vector or matrix
\letThereBe{\vrr}{1}{\boldsymbol{#1}} %random vector if \vr doesn't work
\letThereBe{\dvr}{1}{\vr{\dot{#1}}} %differentiated vector or matrix
\letThereBe{\vb}{1}{\pmb{#1}} %#TODO
\letThereBe{\dvb}{1}{\vb{\dot{#1}}} %#TODO
\letThereBe{\oper}{1}{\mathsf{#1}}
% Basic notation - general
\letThereBe{\set}{1}{\left\{#1\right\}}
\letThereBe{\seqnc}{4}{\set{#1_{#2}}_{#2 = #3}^{#4}}
\letThereBe{\Seqnc}{3}{\set{#1}_{#2}^{#3}}
\letThereBe{\brackets}{1}{\left( #1 \right)}
\letThereBe{\parentheses}{1}{\left[ #1 \right]}
\letThereBe{\dom}{1}{\mcal{D}\, \brackets{#1}}
\letThereBe{\complexConj}{1}{\overline{#1}}
\LetThereBe{\divider}{\; \vert \;}
% Special symbols
\LetThereBe{\const}{\mathrm{const}}
\LetThereBe{\konst}{\mathrm{konst.}}
\LetThereBe{\vf}{\varphi}
\LetThereBe{\ve}{\varepsilon}
\LetThereBe{\tht}{\theta}
\LetThereBe{\Tht}{\Theta}
\LetThereBe{\after}{\circ}
\LetThereBe{\lmbd}{\lambda}
% Shorthands
\LetThereBe{\xx}{\vi x}
\LetThereBe{\yy}{\vi y}
\LetThereBe{\XX}{\vi X}
\LetThereBe{\AA}{\vi A}
\LetThereBe{\bb}{\vi b}
\LetThereBe{\vvf}{\vi \vf}
\LetThereBe{\ff}{\vi f}
\LetThereBe{\gg}{\vi g}
% Basic functions
\letThereBe{\absval}{1}{\left| #1 \right|}
\LetThereBe{\id}{\mathrm{id}}
\letThereBe{\floor}{1}{\left\lfloor #1 \right\rfloor}
\letThereBe{\ceil}{1}{\left\lceil #1 \right\rceil}
\declareMathematics{\im}{im} %image
\declareMathematics{\tg}{tg}
\declareMathematics{\sign}{sign}
\declareMathematics{\card}{card} %cardinality
\letThereBe{\setSize}{1}{\left| #1 \right|}
\declareMathematics{\exp}{exp}
\letThereBe{\Exp}{1}{\exp\brackets{#1}}
\letThereBe{\indicator}{1}{\mathbb{1}_{#1}}
\declareMathematics{\arccot}{arccot}
\declareMathematics{\complexArg}{arg}
\declareMathematics{\gcd}{gcd} % Greatest Common Divisor
\declareMathematics{\lcm}{lcm} % Least Common Multiple
\letThereBe{\limInfty}{1}{\lim_{#1 \to \infty}}
\letThereBe{\limInftyM}{1}{\lim_{#1 \to -\infty}}
% Useful commands
\letThereBe{\onTop}{2}{\mathrel{\overset{#2}{#1}}}
\letThereBe{\onBottom}{2}{\mathrel{\underset{#2}{#1}}}
\letThereBe{\tOnTop}{2}{\mathrel{\overset{\text{#2}}{#1}}}
\letThereBe{\tOnBottom}{2}{\mathrel{\underset{\text{#2}}{#1}}}
\LetThereBe{\EQ}{\onTop{=}{!}}
\LetThereBe{\letDef}{:=} %#TODO: change the symbol
\LetThereBe{\isPDef}{\onTop{\succ}{?}}
\LetThereBe{\inductionStep}{\tOnTop{=}{induct. step}}
% Optimization
\declareMathematicsStar{\argmin}{argmin}
\declareMathematicsStar{\argmax}{argmax}
\letThereBe{\maxOf}{1}{\max\set{#1}}
\letThereBe{\minOf}{1}{\min\set{#1}}
\declareMathematics{\prox}{prox}
\declareMathematics{\loss}{loss}
\declareMathematics{\supp}{supp}
\letThereBe{\Supp}{1}{\supp\brackets{#1}}
\LetThereBe{\constraint}{\text{s.t.}\;}
$$
$$
% Operators - Analysis
\LetThereBe{\hess}{\nabla^2}
\LetThereBe{\lagr}{\mcal L}
\LetThereBe{\lapl}{\Delta}
\declareMathematics{\grad}{grad}
\declareMathematics{\Dgrad}{D}
\LetThereBe{\gradient}{\nabla}
\LetThereBe{\jacobi}{\nabla}
\LetThereBe{\Jacobi}{\mathrm J}
\letThereBe{\jacobian}{2}{D_{#1}\brackets{#2}}
\LetThereBe{\d}{\mathrm{d}}
\LetThereBe{\dd}{\,\mathrm{d}}
\letThereBe{\partialDeriv}{2}{\frac {\partial #1} {\partial #2}}
\letThereBe{\npartialDeriv}{3}{\partialDeriv{^{#1} #2} {#3^{#1}}}
\letThereBe{\partialOp}{1}{\frac {\partial} {\partial #1}}
\letThereBe{\npartialOp}{2}{\frac {\partial^{#1}} {\partial #2^{#1}}}
\letThereBe{\pDeriv}{2}{\partialDeriv{#1}{#2}}
\letThereBe{\npDeriv}{3}{\npartialDeriv{#1}{#2}{#3}}
\letThereBe{\deriv}{2}{\frac {\d #1} {\d #2}}
\letThereBe{\nderiv}{3}{\frac {\d^{#1} #2} {\d #3^{#1}}}
\letThereBe{\derivOp}{1}{\frac {\d} {\d #1}\,}
\letThereBe{\nderivOp}{2}{\frac {\d^{#1}} {\d #2^{#1}}\,}
$$
$$
% Linear algebra
\letThereBe{\norm}{1}{\left\lVert #1 \right\rVert}
\letThereBe{\scal}{2}{\left\langle #1, #2 \right\rangle}
\letThereBe{\avg}{1}{\overline{#1}}
\letThereBe{\Avg}{1}{\bar{#1}}
\letThereBe{\linspace}{1}{\mathrm{lin}\set{#1}}
\letThereBe{\algMult}{1}{\mu_{\mathrm A} \brackets{#1}}
\letThereBe{\geomMult}{1}{\mu_{\mathrm G} \brackets{#1}}
\LetThereBe{\Nullity}{\mathrm{nullity}}
\letThereBe{\nullity}{1}{\Nullity \brackets{#1}}
\LetThereBe{\nulty}{\nu}
% Linear algebra - Matrices
\LetThereBe{\tr}{\top}
\LetThereBe{\Tr}{^\tr}
\LetThereBe{\pinv}{\dagger}
\LetThereBe{\Pinv}{^\dagger}
\LetThereBe{\Inv}{^{-1}}
\LetThereBe{\ident}{\vi{I}}
\letThereBe{\mtr}{1}{\begin{pmatrix}#1\end{pmatrix}}
\letThereBe{\bmtr}{1}{\begin{bmatrix}#1\end{bmatrix}}
\declareMathematics{\trace}{tr}
\declareMathematics{\diagonal}{diag}
$$
$$
% Statistics
\LetThereBe{\iid}{\overset{\text{i.i.d.}}{\sim}}
\LetThereBe{\ind}{\overset{\text{ind}}{\sim}}
\LetThereBe{\condp}{\,\vert\,}
\letThereBe{\complement}{1}{\overline{#1}}
\LetThereBe{\acov}{\gamma}
\LetThereBe{\acf}{\rho}
\LetThereBe{\stdev}{\sigma}
\LetThereBe{\procMean}{\mu}
\LetThereBe{\procVar}{\stdev^2}
\declareMathematics{\variance}{var}
\letThereBe{\Variance}{1}{\variance \brackets{#1}}
\declareMathematics{\cov}{cov}
\declareMathematics{\corr}{cor}
\letThereBe{\sampleVar}{1}{\rnd S^2_{#1}}
\letThereBe{\populationVar}{1}{V_{#1}}
\declareMathematics{\expectedValue}{\mathbb{E}}
\declareMathematics{\rndMode}{Mode}
\letThereBe{\RndMode}{1}{\rndMode\brackets{#1}}
\letThereBe{\expect}{1}{\expectedValue #1}
\letThereBe{\Expect}{1}{\expectedValue \brackets{#1}}
\letThereBe{\expectIn}{2}{\expectedValue_{#1} #2}
\letThereBe{\ExpectIn}{2}{\expectedValue_{#1} \brackets{#2}}
\LetThereBe{\betaF}{\mathrm B}
\LetThereBe{\fisherMat}{J}
\LetThereBe{\mutInfo}{I}
\LetThereBe{\expectedGain}{I_e}
\letThereBe{\KLDiv}{2}{D\brackets{#1 \parallel #2}}
\LetThereBe{\entropy}{H}
\LetThereBe{\diffEntropy}{h}
\LetThereBe{\probF}{\pi}
\LetThereBe{\densF}{\vf}
\LetThereBe{\att}{_t} %at time
\letThereBe{\estim}{1}{\hat{#1}}
\letThereBe{\estimML}{1}{\hat{#1}_{\mathrm{ML}}}
\letThereBe{\estimOLS}{1}{\hat{#1}_{\mathrm{OLS}}}
\letThereBe{\estimMAP}{1}{\hat{#1}_{\mathrm{MAP}}}
\letThereBe{\predict}{3}{\estim {\rnd #1}_{#2 | #3}}
\letThereBe{\periodPart}{3}{#1+#2-\ceil{#2/#3}#3}
\letThereBe{\infEstim}{1}{\tilde{#1}}
\letThereBe{\predictDist}{1}{{#1}^*}
\LetThereBe{\backs}{\oper B}
\LetThereBe{\diff}{\oper \Delta}
\LetThereBe{\BLP}{\oper P}
\LetThereBe{\arPoly}{\Phi}
\letThereBe{\ArPoly}{1}{\arPoly\brackets{#1}}
\LetThereBe{\maPoly}{\Theta}
\letThereBe{\MaPoly}{1}{\maPoly\brackets{#1}}
\letThereBe{\ARmod}{1}{\mathrm{AR}\brackets{#1}}
\letThereBe{\MAmod}{1}{\mathrm{MA}\brackets{#1}}
\letThereBe{\ARMA}{2}{\mathrm{ARMA}\brackets{#1, #2}}
\letThereBe{\sARMA}{3}{\mathrm{ARMA}\brackets{#1}\brackets{#2}_{#3}}
\letThereBe{\SARIMA}{3}{\mathrm{ARIMA}\brackets{#1}\brackets{#2}_{#3}}
\letThereBe{\ARIMA}{3}{\mathrm{ARIMA}\brackets{#1, #2, #3}}
\LetThereBe{\pacf}{\alpha}
\letThereBe{\parcorr}{3}{\rho_{#1 #2 | #3}}
\LetThereBe{\noise}{\mathscr{N}}
\LetThereBe{\jeffreys}{\mathcal J}
\LetThereBe{\likely}{\mcal L}
\letThereBe{\Likely}{1}{\likely\brackets{#1}}
\LetThereBe{\loglikely}{\mcal l}
\letThereBe{\Loglikely}{1}{\loglikely \brackets{#1}}
\LetThereBe{\CovMat}{\Gamma}
\LetThereBe{\covMat}{\vi \CovMat}
\LetThereBe{\rcovMat}{\vrr \CovMat}
\LetThereBe{\AIC}{\mathrm{AIC}}
\LetThereBe{\BIC}{\mathrm{BIC}}
\LetThereBe{\AICc}{\mathrm{AIC}_c}
\LetThereBe{\nullHypo}{H_0}
\LetThereBe{\altHypo}{H_1}
\LetThereBe{\rve}{\rnd \ve}
\LetThereBe{\rtht}{\rnd \theta}
\LetThereBe{\rX}{\rnd X}
\LetThereBe{\rY}{\rnd Y}
\LetThereBe{\rZ}{\rnd Z}
\LetThereBe{\rA}{\rnd A}
\LetThereBe{\rB}{\rnd B}
\LetThereBe{\vrZ}{\vr Z}
\LetThereBe{\vrY}{\vr Y}
\LetThereBe{\vrX}{\vr X}
% Bayesian inference
\LetThereBe{\paramSet}{\mcal T}
\LetThereBe{\sampleSet}{\mcal Y}
\LetThereBe{\bayesSigmaAlg}{\mcal B}
% Different types of convergence
\LetThereBe{\inDist}{\onTop{\to}{d}}
\letThereBe{\inDistWhen}{1}{\onBottom{\onTop{\longrightarrow}{d}}{#1}}
\LetThereBe{\inProb}{\onTop{\to}{P}}
\letThereBe{\inProbWhen}{1}{\onBottom{\onTop{\longrightarrow}{P}}{#1}}
\LetThereBe{\inMeanSq}{\onTop{\to}{L^2}}
\letThereBe{\inMeanSqWhen}{1}{\onBottom{\onTop{\longrightarrow}{L^2}}{#1}}
\LetThereBe{\convergeAS}{\tOnTop{\to}{a.s.}}
\letThereBe{\convergeASWhen}{1}{\onBottom{\tOnTop{\longrightarrow}{a.s.}}{#1}}
$$
$$
% Distributions
\letThereBe{\WN}{2}{\mathrm{WN}\brackets{#1,#2}}
\declareMathematics{\uniform}{Unif}
\declareMathematics{\binomDist}{Bi}
\declareMathematics{\negbinomDist}{NBi}
\declareMathematics{\betaDist}{Beta}
\declareMathematics{\betabinomDist}{BetaBin}
\declareMathematics{\gammaDist}{Gamma}
\declareMathematics{\igammaDist}{IGamma}
\declareMathematics{\invgammaDist}{IGamma}
\declareMathematics{\expDist}{Ex}
\declareMathematics{\poisDist}{Po}
\declareMathematics{\erlangDist}{Er}
\declareMathematics{\altDist}{A}
\declareMathematics{\geomDist}{Ge}
\LetThereBe{\normalDist}{\mathcal N}
%\declareMathematics{\normalDist}{N}
\letThereBe{\normalD}{1}{\normalDist \brackets{#1}}
\letThereBe{\mvnormalD}{2}{\normalDist_{#1} \brackets{#2}}
\letThereBe{\NormalD}{2}{\normalDist \brackets{#1, #2}}
\LetThereBe{\lognormalDist}{\log\normalDist}
$$
$$
% Game Theory
\LetThereBe{\doms}{\succ}
\LetThereBe{\isdom}{\prec}
\letThereBe{\OfOthers}{1}{_{-#1}}
\LetThereBe{\ofOthers}{\OfOthers{i}}
\LetThereBe{\pdist}{\sigma}
\letThereBe{\domGame}{1}{G_{DS}^{#1}}
\letThereBe{\ratGame}{1}{G_{Rat}^{#1}}
\letThereBe{\bestRep}{2}{\mathrm{BR}_{#1}\brackets{#2}}
\letThereBe{\perf}{1}{{#1}_{\mathrm{perf}}}
\LetThereBe{\perfG}{\perf{G}}
\letThereBe{\imperf}{1}{{#1}_{\mathrm{imp}}}
\LetThereBe{\imperfG}{\imperf{G}}
\letThereBe{\proper}{1}{{#1}_{\mathrm{proper}}}
\letThereBe{\finrep}{2}{{#2}_{#1{\text -}\mathrm{rep}}} %T-stage game
\letThereBe{\infrep}{1}{#1_{\mathrm{irep}}}
\LetThereBe{\repstr}{\tau} %strategy in a repeated game
\LetThereBe{\emptyhist}{\epsilon}
\letThereBe{\extrep}{1}{{#1^{\mathrm{rep}}}}
\letThereBe{\avgpay}{1}{#1^{\mathrm{avg}}}
\LetThereBe{\succf}{\pi} %successor function
\LetThereBe{\playf}{\rho} %player function
\LetThereBe{\actf}{\chi} %action function
% ODEs
\LetThereBe{\timeInt}{\mcal I}
\LetThereBe{\stimeInt}{\mcal J}
\LetThereBe{\Wronsk}{\mcal W}
\letThereBe{\wronsk}{1}{\Wronsk \parentheses{#1}}
\LetThereBe{\prufRadius}{\rho}
\LetThereBe{\prufAngle}{\vf}
\LetThereBe{\weyr}{\sigma}
\LetThereBe{\linDifOp}{\mathsf{L}}
\LetThereBe{\Hurwitz}{\vi H}
\letThereBe{\hurwitz}{1}{\Hurwitz \brackets{#1}}
% Cont. Models
\LetThereBe{\dirac}{\delta}
% PDEs
% \avint -- defined in format-respective tex files
\LetThereBe{\fundamental}{\Phi}
\LetThereBe{\fund}{\fundamental}
\letThereBe{\normaDeriv}{1}{\partialDeriv{#1}{\vec{n}}}
\letThereBe{\volAvg}{2}{\avint_{\ball{#1}{#2}}}
\LetThereBe{\VolAvg}{\volAvg{x}{\ve}}
\letThereBe{\surfAvg}{2}{\avint_{\boundary \ball{#1}{#2}}}
\LetThereBe{\SurfAvg}{\surfAvg{x}{\ve}}
\LetThereBe{\corrF}{\varphi^{\times}}
\LetThereBe{\greenF}{G}
\letThereBe{\reflect}{1}{\tilde{#1}}
\letThereBe{\unitBall}{1}{\alpha(#1)}
\LetThereBe{\conv}{*}
\letThereBe{\dotP}{2}{#1 \cdot #2}
\letThereBe{\translation}{1}{\tau_{#1}}
\declareMathematics{\dist}{dist}
\letThereBe{\regularizef}{1}{\eta_{#1}}
\letThereBe{\fourier}{1}{\widehat{#1}}
\letThereBe{\ifourier}{1}{\check{#1}}
\LetThereBe{\fourierOp}{\mcal F}
\LetThereBe{\ifourierOp}{\mcal F^{-1}}
\letThereBe{\FourierOp}{1}{\fourierOp\set{#1}}
\letThereBe{\iFourierOp}{1}{\ifourierOp\set{#1}}
\LetThereBe{\laplaceOp}{\mcal L}
\letThereBe{\LaplaceOp}{1}{\laplaceOp\set{#1}}
\letThereBe{\Norm}{1}{\absval{#1}}
% SINDy
\LetThereBe{\Koop}{\mcal K}
\letThereBe{\oneToN}{1}{\left[#1\right]}
\LetThereBe{\meas}{\mathrm{m}}
\LetThereBe{\stateLoss}{\mcal J}
\LetThereBe{\lagrm}{p}
% Stochastic analysis
\LetThereBe{\RiemannInt}{(\mcal R)}
\LetThereBe{\RiemannStieltjesInt}{(\mcal {R_S})}
\LetThereBe{\LebesgueInt}{(\mcal L)}
\LetThereBe{\ItoInt}{(\mcal I)}
\LetThereBe{\Stratonovich}{\circ}
\LetThereBe{\infMean}{\alpha}
\LetThereBe{\infVar}{\beta}
% Dynamical systems
\LetThereBe{\nUnit}{\mathrm N}
\LetThereBe{\timeUnit}{\mathrm T}
% Masters thesis
\LetThereBe{\evolOp}{\oper{\vf}}
\letThereBe{\obj}{1}{\mathbb{#1}}
\LetThereBe{\timeSet}{\obj T}
\LetThereBe{\stateSpace}{\obj X}
\LetThereBe{\orbit}{Or}
\letThereBe{\Orbit}{1}{\orbit\brackets{#1}}
\LetThereBe{\limitSet}{\obj \Lambda}
$$
In the Introduction 1, we have motivated the entire thesis with the usefulness of the knowledge and of the understanding of synchronization in neuroscience. But we will not describe any experiments performed on real couples of neurons in a lab. Instead, we shall deal with the mathematical abstraction for the studied object, e.g. the coupled neurons.
This abstraction is typically called a (mathematical) model (of the reality). The model should, in theory, capture all the important characteristics of the underlying reality. If the state of the model evolves in time, e.g. a model of neuron starts spiking, we usually call this model a dynamical system.
Definition 2.1 (Dynamical system) A dynamical system is a triple \(\set{\timeSet, \stateSpace, \evolOp^t}\), where \(\timeSet \subseteq \R\) (time) endowed with addition \(+\) is a subgroup of \((\R, +)\), \(\stateSpace\) is a metric space (called a state space), and \(\set{\evolOp^t}_{t \in \timeSet}\) is a family of evolution operators parametrized by \(t \in \timeSet\), such that \(\evolOp^t : \stateSpace \to \stateSpace\) maps a certain point \(x_0 \in \stateSpace\) to some other state \(x_t = \evolOp^t x_0 \in \stateSpace\).
In the Definition 2.1, the time set \(\timeSet\) can take on various forms. In ecology, we often see a discrete \(\timeSet = \N_0\) or \(\timeSet = \Z\) representing a yearly interval between measurements of our system. On the other hand, in physics (and neuroscience) we typically employ \(\timeSet = \R\) as we are concerted with even the shortest time intervals and associated changes. Similarly, the exact choice of the state space \(\stateSpace\) is dependent of the system in question, but typically we use \(\R^n\).
Right now, nothing in the Definition 2.1 guarantees the system does not abruptly change state, because in general \(x \neq \evolOp^0 x\). If this equality does not hold for at least one \(x \in \stateSpace\), then such system is called stochastic.
Definition 2.2 (Deterministic dynamical system) A dynamical system, see Definition 2.1, is called deterministic if and only if the following condition is fulfilled \[
\evolOp^0 = \id,
\tag{2.1}\] in other words \(\forall x \in \stateSpace: x = \evolOp^0 x\).
Onwards, we will predominantly use deterministic dynamical systems. Another assumption we shall make throughout this thesis is that the “laws of nature” do not change in time, i.e., we presume the dynamical system in question is autonomous (although it may depend on the past).
Definition 2.3 (Autonomous dynamical system) A deterministic dynamical system, see Definition 2.2, is called autonomous if and only if the following condition is fulfilled \[
\forall t,s \in \timeSet: \evolOp^{t+s} = \evolOp^{t} \circ \evolOp^{s},
\tag{2.2}\] in other words \(\forall x \in \stateSpace \, \forall t,s \in \timeSet: \evolOp^{t+s} x = \evolOp^t (\evolOp^s x)\).
Most often, a dynamical system is given implicitly by some differential equation, be it an ordinary differential equation (ODE), e.g. \[
\dot{x}(t) = \deriv {x(t)} t = x(t) \cdot r_0 \cdot \brackets{1 - \frac {x(t)} K},
\tag{2.3}\] or a delay differential equation (DDE), for example a modified (2.3) \[
\dot{x}(t) = x(t - \tau) \cdot r_0 \cdot \brackets{1 - \frac {x(t)} K},
\] where \(\tau > 0\).
Basic concepts
In this section, we shall introduce basic concepts regarding dynamical systems including, but not limited to, notions of certain special solutions and their stability. Little comment beside the definitions themselves will be provided, as an interested reader can find much more in
Definition 2.4 (Orbit) An orbit (trajectory) with an initial point \(x_0 \in \stateSpace\) is an ordered subset of the state space \(\stateSpace\), \[
\Orbit{x_0} = \set{x \in \stateSpace \divider x = \evolOp^t x_0, \forall t \in \timeSet \text{ such that } \evolOp^t x_0 \text{ is defined}}
\]
In the case of a continuous dynamical system, the orbits are oriented curves in the state space. For a discrete dynamical systems, they become sequences of points in \(\stateSpace\).
Definition 2.5 (Phase portrait) A phase portrait of a dynamical system is a partitioning of the state space into trajectories.
Definition 2.6 (Equilibrium) A point \(x_0 \in \stateSpace\) is called an equilibrium (fixed point) if \(\evolOp^t x_0 = x_0\) for all \(t \in \timeSet\).
Definition 2.7 (Cycle) A cycle is a periodic orbit, namely a non-equilibrium orbit \(L\), such that each point \(x_0 \in L\) satisfies \(\evolOp^{t + T} x_0 = \evolOp^{t} x_0\) with some \(T > 0\), for all \(t \in \timeSet\). The smallest admissible \(T\) is called the period of the cycle \(L\).
Definition 2.8 (Invariant set) An invariant set of a dynamical system \(\set{\timeSet, \stateSpace, \evolOp^t}\) is a subset \(\obj S \subset \stateSpace\) which satisfies \[
x \in \obj{S} \implies \evolOp^t x \in \obj{S} \; \forall t \in \timeSet.
\]
Definition 2.9 (\(\omega\)-limit and \(\alpha\)-limit point) A point \(x_* \in \stateSpace\) is called an \(\omega\)-limit point (resp. \(\alpha\)-limit point) of the orbit \(\Orbit{x_0}\) starting at \(x_0 \in \stateSpace\) if their exists a sequence of times \(\seqnc{t}{k}{1}{\infty} \subseteq \timeSet\) with \(t_k \to \infty\) (resp. \(t_k \to - \infty\)), such that \[
\evolOp^{t_k} x_0 \onBottom{\longrightarrow}{k \to \infty} x_*.
\]
Definition 2.10 (\(\omega\)-limit and \(\alpha\)-limit set) A set \(\obj \Omega(\Orbit{x_0})\) of all \(\omega\)-limit points of the orbit \(\Orbit{x_0}\), see Definition 2.9, is called an \(\omega\)-limit set. Similarly, a set \(\obj A(\Orbit{x_0})\) of all \(\alpha\)-limit points of the orbit \(\Orbit{x_0}\) is called an \(\alpha\)-limit set.
Lastly, a set \(\limitSet(\Orbit{x_0}) = \obj \Omega(\Orbit{x_0}) \cup \obj A(\Orbit{x_0})\) of all limit points of the orbit \(\Orbit{x_0}\) is called its limit set.
Definition 2.11 (Limit cycle) A limit cycle is a cycle of a dynamical system, see Definition 2.7, which is also a limit set, see Definition 2.10, of neighboring orbits.
Ahmadi, Amir Ali, and Pablo A. Parrilo. 2013.
“Stability of Polynomial Differential Equations: Complexity and Converse Lyapunov Questions.” https://arxiv.org/abs/1308.6833.
BARZILAI, JONATHAN, and JONATHAN M. BORWEIN. 1988.
“Two-Point Step Size Gradient Methods.” IMA Journal of Numerical Analysis 8 (1): 141–48.
https://doi.org/10.1093/imanum/8.1.141.
Kidger, Patrick. 2022.
“On Neural Differential Equations.” https://arxiv.org/abs/2202.02435.
Luo, Albert C. J. 2009.
“A Theory for Synchronization of Dynamical Systems.” Communications in Nonlinear Science and Numerical Simulation 14 (5): 1901–51. https://doi.org/
https://doi.org/10.1016/j.cnsns.2008.07.002.
Pikovsky, Arkady, Michael Rosenblum, and Jürgen Kurths. 2001.
Synchronization: A Universal Concept in Nonlinear Sciences. Cambridge University Press.
https://doi.org/10.1017/cbo9780511755743.
Willms, Allan R., Petko M. Kitanov, and William F. Langford. 2017.
“Huygens’ Clocks Revisited.” Royal Society Open Science 4 (9): 170777.
https://doi.org/10.1098/rsos.170777.
Zhou, Danqing, Shiqian Ma, and Junfeng Yang. 2024.
“AdaBB: Adaptive Barzilai-Borwein Method for Convex Optimization.” https://arxiv.org/abs/2401.08024.