This commit is contained in:
Palindrom73 2019-03-03 20:34:58 +01:00
commit ca45778cd1
42 changed files with 4859 additions and 88 deletions

View file

@ -1 +1,122 @@
\section{Eindeutigkeit von Maßen}
Algebra = $\sigma$-Algebra und $G$ ist Grundmenge.\\
\underline{Ziel:} $\lambda^d$ (oder algemeines Maß) auf Erzeuger $\mathscr{G}$ definieren und dann auf $\sigma(\mathscr{G})$ fortsetzen.
\underline{Brauche:} Wohldefiniertheit $\longleftrightarrow \exists !$ Fortsetzung\\
\underline{Problem:} $\sigma(\mathscr{G})$ im Allgeimeinen nicht ``konstruierbar''\\
\begin{definition}[\person{Dynkin}-System}]
$\mathscr{D} \subset \mathscr{P}(E)$ heißt \begriff{\person{Dynkin}-System}}, wenn
\begin{itemize}
\item $(D_1)$ $E \in \mathscr{D}$
\item $(D_2)$ $D \in \mathscr{D} \Rightarrow D^C \in \mathscr{D}$
\item $(D_3)$ $(D_n)_{n\in \natur} \subset \mathscr{D}$ und paarweise Disjunktheit $\Rightarrow \biguplus_{n \in \natur} D_n \in \mathscr{D}$
\end{itemize}
\end{definition}
\begin{remark}
\begin{enumerate}[label=(\alph*)]
\item Jede $\sigma$-Algebra ist insbesondere eine Dynkin-System, da $(D_3)$ schwächer als $S_3$.
\item Wie in \propref{2_2}a)b) sieht man $\emptyset \in \mathscr{D}, A,B \in \mathscr{D}$ und $A \cap B = \emptyset \Rightarrow A \sqcup B \in \mathscr{D}$
\end{enumerate}
\end{remark}
\begin{proposition}
\proplbl{4_3}
\begin{enumerate}[label=(\alph*)]
\item Für $\mathscr{G} \subset \mathscr{P}(E)$ beliebig existiert ein kleinstes (minimales) Dynkin-System $\mathscr{D}$ mit $\mathscr{G} \subset \mathscr{D}$ mit Notation $\delta(\mathscr{G})$. $\delta/\mathscr{G}$ ist von $\mathscr{G}$ erzeugtes Dynkin-System.
\item $\mathscr{G} \subset \delta(\mathscr{G}) \subset \sigma(\mathscr{G})$
\end{enumerate}
\end{proposition}
\begin{proof}
\begin{enumerate}[label=(\alph*)]
\item wie in \propref{2.4} wörtlich %TODO set ref
\item todo
\end{enumerate}
\end{proof}
\underline{Ziel:} Zusammenhang Dynkin-System $\leftrightarrow \sigma$-Algebra
\begin{lemma}
\proplbl{4_4}
Ein Dynkin-System $\mathscr{D}$ ist eine Algebra $\Leftrightarrow \mathscr{D}$ ist $\cap$-stabil $(\forall D,F \in \mathscr{D}\colon D \cap F \in \mathscr{D})$
\end{lemma}
\begin{proof}
\begin{itemize}
\item $(\Rightarrow)$ Wenn $\mathscr{D}$ Algebra dann insbesondere
\begin{enumerate}[label=(\alph*)]
\item $\mathscr{D}$ Dynkin-System, da $S_1=D_1, S_2=D_2, S_3 \to D_3$
\item $\mathscr{D}$ $\cap$-stabil nach \propref{2_2}c)
\end{enumerate}
\item $(\Leftarrow)$ Sei $\mathscr{D}$ ein $\cap$-stabiles Dynkin-System. Zeige $S_3$, d.h. $(D_n)_{n\in \natur} \subset \mathscr{D} \Rightarrow \bigcap_{n\in \natur} D_n \in \mathscr{D}$\\
Idee: disjunkt machen, also $F_{n+1} := ((D_{n+1}\setminus D_n)\setminus D_{n-1})\cdots \setminus D_1$, wobei $F_1 := D_1$\\
Bemerke: $F_{n+1} = D_{n+1} \cap \left(\bigcap_{i=1}^n D_i^C\right) \in \mathscr{D}$, da $\cap$-stabil und $F_n$ disjunkt\\
$\Rightarrow D:= \biguplus_{n\in \natur} D_n = \biguplus_{n \in \natur} F_n \in \mathscr{D}$ (wegen $D_3$)
\end{itemize}
\end{proof}
\begin{proposition}
\proplbl{4_5}
$\mathscr{G} \subset \mathscr{P}(E)$ $\cap$-stabil $\Rightarrow \delta(\mathscr{G}) = \sigma(\mathscr{G})$.
\end{proposition}
\begin{proof}
\begin{enumerate}[label=(\arabic*)]
\item $\delta(\mathscr{G}) \subset \sigma(\mathscr{G})$ Klar!
\item Wäre $\sigma(\mathscr{G})$ eine Algebra, dann $\sigma(\mathscr{G}) \subset \delta(\mathscr{G})$\\
Grund: $\mathscr{G} \subset \delta(\mathscr{G})$ wegen Defintion und $\delta(\mathscr{G})$ wäre Algebra, damit folgt $\sigma(\mathscr{G}) \subset \delta(\mathscr{G})$ ($\sigma(\mathscr{G})$ minimale Algebra mit $\mathscr{G} \subset \sigma(\mathscr{G})$), dann folgt mit 1) und \propref{4_3}a) $\delta(\mathscr{G}) \subset \sigma(\mathscr{G})$
\item Zeige $\delta(\mathscr{G})$ $\cap$-stabil. Dann \propref{4_4} $\Rightarrow \delta(\mathscr{G})$ Algebra, fertig
\item $D \in \delta(\mathscr{G})$ fest und behaupte $\mathscr{D}_{D} = \{Q \subset E \mid Q \cap D \in \delta(\mathscr{D})\}$ ist ein Dynkin-System:
\begin{itemize}
\item $(D_1)$ $\emptyset \in \mathscr{D}_D$, da $Q=\emptyset$ setzen kann
\item $(D_2)$ Sei $Q \in \mathscr{D}_D$. zu zeigen: $Q^C \in \mathscr{D}_D$
\begin{align}
Q^C \cap D = (Q^C \cup D^C)\cap D &\overset{\ast}{=} (Q \cap D)^C \cap D \quad& \ast: \text{ de Morgan} \notag\\
&= ((Q \cap D)) \uplus D^C)^C \in \delta(\mathscr{G}) \label{4_4_eq} \tag{\#}
\end{align}
Damit folgt aus der Definition von $\mathscr{D}_D$, dass $Q^C \in \mathscr{D}_D$. In \eqref{4_4_eq} wurde benutzt, das $Q \cap D \subset D$ und $D^C \not \subset D$
\item $(D_3)$ $(Q_n)_{n\in \natur} \subset \mathscr{D}_D$ disjunkt $\Rightarrow (Q_N \cap D)_{n \in \natur} \subset \delta(\mathscr{D})$ disjunkt (gilt wegen Def von $\mathscr{D}_D$)\\
$\delta(\mathscr{G}) \overset{D_3}{\ni} \sqcup_{n \in \natur} (Q \cap D) = (\biguplus_{n \in \natur} Q_n)\cap D \in \delta(\mathscr{G}) \overset{\text{Def. }\mathscr{D}_D}{\Rightarrow} \uplus_{n \in \natur} Q_n \in \mathscr{D}_D$
\end{itemize}
\item Zeige $\delta(\mathscr{G}) \subset \mathscr{D}_D$ für $D \in \delta(\mathscr{G})$ fest aber beliebig\\
$\forall D \in \delta(\mathscr{G}) \colon \delta(\mathscr{G}) \cap D \in \delta(\mathscr{G}) \Rightarrow \delta(\mathscr{G}) \subset \delta(\mathscr{G})$ wäre $\cap$-stabil\\
Klar $\mathscr{G} \subset \delta(\mathscr{G})$ und $\mathscr{G}$ sei $\cap$-stabil (Vorraussetzung) $\mathscr{G} \subset \mathscr{D}_G\quad \forall G \in \mathscr{D}_G$
\begin{align}
&\Rightarrow \delta(\mathscr{G}) \subset \mathscr{D}_G\quad \forall G \in \mathscr{G} \mathscr{D}_G\text{ Dynkin-System}\notag \\
&\overset{\text{Def. } \mathscr{D}_G}{\Rightarrow} G \cap D \in \delta(\mathscr{G}) \forall D \in \mathscr{G} \forall D \in \delta(\mathscr{G})\notag \\
&\overset{\text{Def. }\mathscr{D}_D}{\Rightarrow} G \in \mathscr{D}_D \forall G \in \mathscr{G} \forall D \in \delta(\mathscr{G}) \quad \text{ Tausche } D \leftrightarrow G \notag \\
&\Rightarrow \mathscr{G} \subset \mathscr{D}_D \forall D \in \delta(\mathscr{G}) \notag\\
&\xRightarrow[\text{Dyn. Sys}]{\mathscr{D}_D} \delta(\mathscr{G}) \subset \mathscr{D}_D \forall D \in \delta(\mathscr{G}) \notag\\
&\Rightarrow \forall Q \in \delta(\mathscr{G}) \colon Q \cap D \in \delta(\mathscr{G})\notag \\
&\Rightarrow \delta(\mathscr{G}) \cap\text{-stabil}\notag
\end{align} %TODO fix arrows.
\end{enumerate}
\end{proof}
Wir brauchen \propref{4_5} an 2 Stellen: hier und bei Produktmaßen
\begin{proposition}[Eindeutigkeitssatz]
\proplbl{4_6}
$(E, \mathscr{A})$ beliebiger Messraum, $\mu, \nu$ zwei Maße und $\mathscr{A} = \sigma(\mathscr{G})$ und
\begin{enumerate}[label=(\alph*)]
\item $\mathscr{G}$ ist $\cap$-stabil
\item $\exists (G_n)_{n \in \natur} \subset \mathscr{G}, G_n \uparrow E, \mu(G_n), \nu(G_n) < \infty$
\end{enumerate}
$\Rightarrow \forall G \in \mathscr{G}\colon \mu(G) = \nu(G) &\Rightarrow \forall A \in \sigma(G)\colon \mu(A) = \nu(A)$
Kurznotation: $\mu_{\vert_G} = \nu\vert_G &\Rightarrow &\mu=\nu$
\end{proposition}
\begin{proof}
$\forall n \colon \mathscr{D}_n := \{A \in \mathscr{A} \mid \mu(G_n \cap A) = \nu(G_n \cap A)\}$
\begin{enumerate}[label=(\alph*)]
\item $\mathscr{D}_n$ ist Dynkin-System $\forall n, n$ fest
\end{enumerate}
\end{proof}
\begin{remark}[Sonderfall]
$\mu, \nu$ $W$-Maße (oder $\mu(E) = \nu(E) < \infty$), dann kann man b) weglassen\\
\underline{Grund:} $\mathscr{G} \rightsquigarrow \mathscr{G} \cup \{E\} = \{B\colon B \in \mathscr{G} \text{ oder }B = E\}$ und $G_n := E \uparrow E \Rightarrow$ b)
\end{remark}
%TODO finish proofs.

View file

@ -5,23 +5,24 @@ Wenn man ein Integral hat: $\int_{t_0}^{t}F(t)\diff t$, also wird das $\diff t$
%TODO graph
\newline Wir messen Mengen:
\begin{align}
\mu: \mathcal{F} \to [0,\infty] \text{ mit }\mathcal{F} \subset \mathcal{P}(X) \notag
\mu: \mathcal{F} \to [0,\infty] \text{ mit }\mathcal{F} \subset \mathcal{P}(E) \notag
\end{align}
Dabei ist:
\begin{itemize}
\item $X$ eine beliebige Grundmenge
\item $\mathcal{P}(X)=\{A\mid A\subset X\}$ die Potenzmenge von $X$
\item $E$ eine beliebige Grundmenge
\item $\mathcal{P}(E)=\{A\mid A\subset X\}$ die Potenzmenge von $E$
\item $F \to \mu(F) \in [0,\infty]$
\end{itemize}
\textbf{Konvention:}
\begin{itemize}
\item Familien von Mengen: $\mathcal{A}, \mathcal{B}, \mathcal{C}, \mathcal{F}, \dots, \mathcal{R}$
\item Mengen: $A, B, X$
\item Mengen: $A, B, E$
\item Maße: $\mu, \lambda, \nu, \rho, \delta$
\item Abbildungen: $\phi, \psi, \gamma, \eta$
\end{itemize}
\begin{example}[Flächenmessung]
\begin{*example}[Flächenmessung]
%TODO needs graph! and no counting for this example!
\begin{align}
\mu(F) = g \cdot h &= \mu(F_1) + \mu(F_2) + \mu(F_3)\notag\\
@ -32,7 +33,7 @@ Dabei ist:
$\mu(F) = \mu(\Delta_1)+\mu(\Delta_2)$ mit $\mu(\Delta) = 0.5 gh$\\ %TODO graph
Allgemein für Dreiecke: \\%TODO graph
$\mu(\Delta) = 0.5 gh \overset{!}{=} 0.5 g^{\prime}h^{\prime}$ und das ganze ist wohldefiniert!
\end{example}
\end{*example}
Dreiecke lassen allgemeine Flächenberechnung zu - Triangulierung!
%TODO graph

View file

@ -1 +1,63 @@
\section{Integration positiver Funktionen}
\section{Existenz von Maßen}
\underline{Ziel:} Fortsetzung von Prämaßen auf Erzeuger $\rightarrow \sigma(\text{Erzeuger})$
\begin{*example}
$\lambda^d$ auf $\mathscr{I}=$ halboffene Rechtecke und $\sigma(\mathscr{I}) = \mathscr{B}(\real^d)$. Wenn Fortsetzung existiert $\xRightarrow{\propref{4_6}}$ Fortsetzung eindeutig.
\end{*example}
\begin{definition}[Halbring]
Eine Famile $\mathscr{S} \subset \mathscr{P}(E)$ heißt \begriff{Halbring} über $E$, wenn gilt:
\begin{itemize}
\item $(S_1)$ $\emptyset \in \mathscr{S}$
\item $(S_2)$ $S,T \in \mathscr{S} \Rightarrow S\cap T \in \mathscr{S}$
\item $(S_3)$ $\forall S,T \in \mathscr{S}, \exists S_1,\dots,S_m \in \mathscr{S}, m \in \natur$, disjunkt: $S\setminus T = \biguplus_{i=1}^{m} S_i$
\end{itemize}
\end{definition}
\begin{remark}
$\mathscr{I}$ ist Halbring in $\real^d$
\begin{enumerate}[label=(\alph*)]
\item $d=1:$ per Hand (trivial)
\item $d>1:$ Induktion (siehe Fubini) %TODO set reference!
\item Intuition: %TODO add figure for intution
\end{enumerate}
\end{remark}
Zentraler Satz der Maßtheorie:
\begin{proposition}[\person{Carathéodory}, Fortsetzungssatz]
\proplbl{5_3}
Sei $\mathscr{S}$ ein Halbring über $E$ und $\mu: \mathscr{S} \to [0,\infty]$ Prämaß, d.h.
\begin{enumerate}[label=(\alph*)]
\item $\mu(\emptyset) = 0$
\item $\forall (S_i)_{i \in \natur} \subset \mathscr{S}$, disjunkt und $\biguplus_{i\in \natur} S_i \in \mathscr{S}$ gilt: $\mu\left(\biguplus_{i\in \natur} S_i\right) = \sum_{i \in \natur}\mu(S_i)$
\end{enumerate}
$\Rightarrow \exists$ Fortsetzung von $\mu$ zu einem Maß auf $\sigma(\mathscr{S})$.\\
\underline{Zusatz:} Wenn $(G_i)_{i \in \natur} \subset \mathscr{S}, G_i \uparrow E, \mu(G_i) < \infty \Rightarrow \exists !$ Fortsetzung.
\end{proposition}
\begin{remark}
\propref{5_3}b) $\equiv$ $\mu$ ist relativ zu $\mathscr{S}$ $\sigma$-additiv; Satz sagt: $\sigma$-additiv vererbt sich auf $\sigma(\mathscr{S})$\\
Hauptproblem bleibt aber die Existenz einer Fortsetzung.
\end{remark}
\begin{proof}[\propref{5_3}]
Beweisskizze: %TODO
Beweis:
\end{proof}
\begin{proposition}
$\lambda^1$ ist Prämaß auf $\mathscr{I}$.
\end{proposition}
\begin{proof}
...
\end{proof}
\begin{conclusion}
$\lambda^1$ ist Maß auf $\sigma(\mathscr{I}) = \mathscr{B}(\R)$. Es ist das einzige Maß mit $\lambda^1[a,b) = b - a$.
\end{conclusion}
\begin{proof}
...
\end{proof}

View file

@ -5,9 +5,9 @@ Sei $E \neq \emptyset$ beliebige Grundmenge.
\begin{definition}[Maß]
Ein \begriff{Maß} $\mu$ ist eine Abbildung $\mu: \mathscr{A} \to [0,\infty]$ mit folgenden Eigenschaften:
\begin{itemize}
\item ($M_0$) $\mathscr{A}$ ist eine $\sigma$-Algebra auf $E$
\item ($M_1$) $\mu(\emptyset) = 0$
\item ($M_2$) $(A_n)_{n \in \natur} \subset \mathscr{A}$ paarweise disjunkt $\Longleftarrow \mu(\coprod_{n\in \natur} A_n\big) = \sum_{n\in \natur} \mu(A_n)$
\item $(M_0)$ $\mathscr{A}$ ist eine $\sigma$-Algebra auf $E$
\item $(M_1)$ $\mu(\emptyset) = 0$
\item $(M_2)$ $(A_n)_{n \in \natur} \subset \mathscr{A}$ paarweise disjunkt $\Longleftarrow \mu(\coprod_{n\in \natur} A_n\big) = \sum_{n\in \natur} \mu(A_n)$
\end{itemize}
Gilt für $\mu: \mathscr{A} \to [0,\infty]$ nur $(M_1),(M_2)$, dann heißt $\mu$
\end{definition}
@ -129,7 +129,7 @@ $\rightsquigarrow$ Setze Antwort ``ja'' voraus, zeige Eigenschaften.
$\lambda^d$ existiert als Maß auf $(\real^d, \mathscr{B}(\real^d))$ und es durch Werte auf $\mathscr{J}$ eineindeutig bestimmt, für alle $B \in \mathscr{B}(\real^d)$ gilt.
\begin{enumerate}[label=(\alph*)]
\item $\lambda^d$ ist translationsinvariant: $\lambda^d(x+B) = \lambda^d(B)$, wobei $B \in \mathscr{B}(\real^d), x+B := \{x+b \colon b \in B\}$
\item $\lambda^d$ ist bewegungsinvariant: $\lambda^d(R^{-1}(B)) = \lambda^d(B)$, mit $\forall R:\real^d \to \real^d$ Bewegung, d.h. kombination aus Translation, Drehung, Spiegelung
\item $\lambda^d$ ist bewegungsinvariant: $\lambda^d(R^{-1}(B)) = \lambda^d(B)$, mit $\forall R:\real^d \to \real^d$ Bewegung, d.h. Kombination aus Translation, Drehung, Spiegelung
\item $\lambda^d(M^{-1}(B)) = \vert \det(B)\vert^{-1} \lambda^d(B)\quad \forall M \in \GL(\real^d)$
\end{enumerate}
\end{proposition}

View file

@ -1 +1,132 @@
\section{Messbare Abbildungen}
Seien $(E, \sigA), (E^{'}, \sigA^{'})$ zwei Messräume\\
$T: E \to E^{'}$ Abbildung ``$T$ respektiert'' $\sigA$ und $\sigA^{'}$ auf $E$ bzw. $E^{'}$\\
Kenne die Frage (\propref{4_8}): $B \in \sigB (\Rd), x \in \Rd \to x + B \in \sigB (\Rd)$ (Beweis via $\mathscr{I}$= Erzeuger von $\sigB (\Rd)$)
\begin{definition}[messbare Abbildung]
Eine Abbildung $T: E \to E^{'}$ heißt $(\sigA / \sigA^{'})$-messbar, wenn gilt
\begin{align}
\forall A^{'} \in \sigA^{'}: T^{-1}(A) \in \sigA
\end{align}
Notation: $T^{-1}(A) \subset \sigA = \{T^{-1}(A^{'}) \mid A^{'} \in \sigA^{'}\}$
\end{definition}
%TODO add remarks here
\begin{lemma}
\proplbl{6_2}
Sei $\sigA^{'} = \sigma(\sigG^{'})$ für ein $\sigG^{'}$.
\begin{align}
T: E \to E^{'} \text{ ist } \sigA / \sigA^{'} \text{ messbar } \Leftrightarrow \forall G^{'} \in \sigG^{'}: T^{-1}(G^{'}) \in \sigA
\end{align}
d.h. Massbarkeit reicht am Erzeuger zu testen.
\end{lemma}
\begin{proof}
...
\end{proof}
\begin{example}
Jede stetige Abbildung $T: \Rd \to \Rd$ ist Borel-$(\sigB (\Rd) / \sigB (\Rn))$ - messbar\\
Grund: $\sigB(\Rd) = \sigma(\sigO)$, $\sigO^n :=\{\text{offene Mengen }\subseteq \Rn\\}$
\begin{align}
f \text{ stetig } \Rightarrow f^{-1}(\sigO^n) \subset \sigO^d \subset \sigB (\Rd) \text{ und } \propref{6_2}
\end{align}
\end{example}
Achtung: stetig $\Rightarrow$ Borel-messbar $\not \Rightarrow$ stetig\\
Beispiel\\
\begin{proposition}
\proplbl{6_4}
Seien $(E_i, \sigA_i), i = 1,2,3$ Messräume und
\begin{itemize}
\item $T: E_1 \to E_2 \quad \sigA_1 / \sigA_2$- messbar
\item $T: E_2 \to E_3 \quad \sigA_2 / \sigA_3$- messbar
\end{itemize}
$\Rightarrow S \circ T: E_1 \to E_3$ ist $\sigA_1 / \sigA_3$-messbar.
\end{proposition}
\begin{proof}
...
\end{proof}
\begin{lemma}[auch Definition]
$(T_i)_{i \in I}$ beliebig viele Abbildungen $T_i: E \to E_i$ und $(E_i, \sigA_i)$ sei Messraum für alle $i \in I$. Dann ist
\begin{align}
\sigma(T_i, i \in I) &:= \sigma(\bigcup_{i \in I}T^{-1}_i(\sigA_i))\notag \\
&= \sigma( \{A \subset E \mid \exists i \in I\colon A \in T^{-1}_i(\sigA_i)\})
\end{align}
die kleinste $\sigma$-Algebra in $E$, sodass alle $T_i: E \to E_i$ gleichzeitig messbar sind.\\
Sprechweise: ``von den $(T_i)_{i\in I}$ erzeugte $\sigma$-Algebra''
\end{lemma}
\begin{proof}
...
\end{proof}
\begin{proposition}[Bildmaß]
\proplbl{6_6}
$T: (E, \sigA) \to (E, \sigA^{'})$ messbar und $\nu$ sei Maß auf $(E, \sigA)$. Dann definiert
\begin{align}
\forall A^{'} \in \sigA^{'}: \nu^{'}(A^{'}) := \nu(T^{-1}(A^{'}))
\end{align}
ein Maß auf $(E^{'}, \sigA^{'})$.
\end{proposition}
\begin{proof}
...
\end{proof}
\begin{definition}[Bildmaß]
\proplbl{6_7}
Das Maß $\nu^{'}$ aus \propref{6_6} heißt \begriff{Bildmaß} $\nu$ und $T$ (engl. image measure, push forward).\\
Notation: $T(\nu)$ oder $T\ast \nu$ oder $\nu \circ T^{-1}$
\end{definition}
\begin{example}
\begin{enumerate}[label=(\alph*)]
\item $\lambda^d(x+B) = \lambda^d(\tau_x^{-1}(B)) = \tau_x(\lambda^d)(B)$
\item W-Theorie: $(\Omega, \sigA, \probP)$ Wahrscheinlichkeitsraum, $\ProbP(\Omega) = 1$
\begin{align}
&\xi: (\Omega, \sigA) \to (\Rd, \sigB (\Rd)) &\text{''Zufallsvarible''} \notag \\
&\xi(\probP)(B) = \probP \circ \xi^{-1}(B) = \probP(\{ \xi \in B\}) &\text{''Verteilung von $\xi$''}\notag \\
&\{\xi \in B\} = \{ \omega \in \Omega \mid \xi(\omega) \in B \} = \xi^{-1}(B) &
\end{align}
konkret: $2$ mal Würfeln %TODO finish this later up!
\end{enumerate}
\end{example}
Achtung: $T: (E, \pows(E)) \to (E^{'}, \sigA^{'})$, die Potenzmenge $\pows(E)$ macht alle $T$ für alle $\sigA^{'}$ messbar.
\begin{proposition}
\proplbl{6_9}
Sei $T = \Orth(\Rd) = \{T \in \R^{d\times d}\colon T^t \cdot T = \id_{\Rd}$ Orthogonale Matrizen\\
$\Rightarrow T(\lambda^d) = \lambda^d \to \vert \det (T)\vert = 1$
\end{proposition}
\begin{proof}
...
\end{proof}
\begin{proposition}
\proplbl{6_10}
Sei $S \in \GL(\Rd)$ ($\det(S) \neq 0$). Dann
\begin{align}
S(\lambda^d) \overset{Def}{=} \lambda^d \circ S = \vert \det(S^{-1})\vert \lambda^d = \frac{1}{\vert \det(S)\vert}\lambda^d
\end{align}
\end{proposition}
\begin{proof}
...
\end{proof}
\begin{conclusion}
$\lambda^d$ invariant unter Bewegung.
\end{conclusion}
\begin{proof}
Bewegung $=$ Kombination aus Shifts $\tau_x$ und Matrizen $T$ mit $\vert \det(T)\vert = 1$ und \propref{6_10}.
\end{proof}

View file

@ -2,6 +2,9 @@
\textbf{Ziel:} Charakterisierung der Definitionsgebiete von Maßen.
$\C \H$
\begin{definition}[$\sigma$-Algebra, messbar]
Eine \begriff{$\sigma$-Algebra} über einer beliebigen Grundmenge $E \neq \emptyset$ ist eine Familie von Mengen in $\mathscr{P}(E), \mathscr{A} \subset \mathscr{P}(E)$:
\begin{itemize}
@ -13,6 +16,7 @@
\end{definition}
\begin{proposition}[Eigenschaften einer $\sigma$-Algebra]
\proplbl{2_2}
Sei $\mathscr{A}$ eine $\sigma$-Algebra über $E$.
\begin{enumerate}[label=(\alph*)]
\item $\emptyset\in\mathscr{A}$
@ -41,7 +45,7 @@
\item $\{\emptyset,X\}$ ist eine $\sigma$-Algebra (kleinstmögliche)
\item $\{\emptyset,A,A^C,X\}$ ist eine $\sigma$-Algebra
\item $\{\emptyset,B,X\}$ ist eine $\sigma$-Algebra, wenn $B=\emptyset$ oder $B=X$
\item $\mathscr{A}=\{A\subset X\mid \#A\le \#\natur\text{ oder } \#A^C\le \#\natur\}$ ist eine \sigmalg %TODO needs the proof still!
\item $\mathscr{A}=\{A\subset X\mid \#A\le \#\natur\text{ oder } \#A^C\le \#\natur\}$ ist eine $\sigma$-Algebra %TODO needs the proof still!
\item Spur-$\sigma$-Algebra: $E \subset X,\mathscr{A}$ ist $\sigma$-Algebra in $X \Rightarrow \mathscr{A}_E := \{E \cap A \mid A \in \mathscr{A}\}$ ist eine $\sigma$-Algebra.
\item Urbild-$\sigma$-Algebra: $f: X \to Y$ eine Abbildung, $X,Y$ Mengen, $\mathscr{A}_Y$ sei $\sigma$-Algebra in $Y$ $\Rightarrow \mathscr{A} := \{f^{-1}(A_Y)\mid A_Y \in \mathscr{A}\}$ eine $\sigma$-Algebra.
\end{enumerate}
@ -63,6 +67,8 @@
\end{enumerate}
\end{proposition}
\begin{proof}
\begin{enumerate}[label=(\alph*)]
\item
@ -82,9 +88,9 @@
\end{align}
\end{itemize}
\item a) sagt:
\begin{align}
\mathscr{A} := \bigcap_{\substack{\mathscr{F} \sigma\text{-Algebra}\\ \mathscr{G} \subset \mathscr{F}}} \mathscr{F} \text{ ist } \sigma-\text{Algebra} \label{2_4_eq} \tag{\ast}
\end{align}
% \begin{align}
% \mathscr{A} := \bigcap_{\substack{\mathscr{F} \sigma\text{-Algebra}\\ \mathscr{G} \subset \mathscr{F}}} \mathscr{F} \text{ ist } \sigma-\text{Algebra} \label{2_4_eq} \tag{\ast}
% \end{align}
Dabei ist $\mathscr{G} \subset \mathscr{F}$, weil $\mathscr{F}=\mathscr{P}(E)$ Kandidat und dann \eqref{2_4_eq} wohldefiniert.
\begin{itemize}
\item Existenz: $\mathscr{A}$ reicht, weil $\mathscr{A}$ wohldefiniert und $\mathscr{G} \subset \mathscr{A}$ und $\mathscr{A}$ ist $\sigma$-Algebra.
@ -172,7 +178,7 @@
\item Jedes $I \in \mathscr{J}^o$ ist eine offene Menge (DIY) $\Rightarrow \mathscr{J}_{rat}^o \subset \mathscr{J}^o \subset \mathscr{O}$
\item Sei $U \in \mathscr{O}$. Dann gilt:
\begin{align}
U = \bigcup_{\substack{I^{'} \in \mathscr{J}_{rat}^o\\ I^{'} \subset U}} I^{'} \label{2_8_eq}\tag{\ast\ast}
U = \bigcup_{\substack{I^{'} \in \mathscr{J}_{rat}^{o}\\ I^{'} \subset U}} I^{'} \label{2_8_eq}\tag{\ast\ast}
\end{align}
Klar in \eqref{2_8_eq} ist $\bigcup_{\dots} I^{'} \subset U$. Für $U \subset \bigcup_{\dots} I^{'}$ bemerken wir, weil $U$ offen ist gilt:
\begin{align}

View file

@ -1 +1,8 @@
\pagebreak
Für die Vorlesung \textit{Maß und Integral} von Prof. \person{Schilling} im WS 2018/19 gibt es zwar schon ein Buch von Prof. \person{Schilling}, was sich jeder Kapitel für Kapitel über die SLUB herunterladen kann. Trotzdem haben wir es uns nicht nehmen lassen auch für diese Vorlesung ein Skript zu schreiben.\footnote{Also zumindest haben wir das vor; zu dem
Zeitpunkt, an dem ich dieses Vorwort schreibe, ich das Skript noch lange nicht fertig.}
Dem Fakt geschuldet, dass Prof. \person{Schilling} seine Vorlesung sehr lebhaft\footnote{Seine Vorlesung lässt sich mit folgenden Wort eigentlich ganz gut beschreiben: \textit{fabulös}} hält und mit mindestens 3 Farben und jeder Menge Pfeilen arbeitet, war es relativ schwierig daraus ein vernünftiges Skript zu schreiben. Deswegen sind die nachfolgenden Seiten eher eine zusammengefasste und verbesserte Abschrift seines Buches.
Auch wenn wir uns Mühe geben dieses Skript frei von Fehlern zu halten - perfekt sind auch wir nicht. Falls du deswegen einen Fehler beim Lesen findest sind wir froh über jeden Issue, den du auf \url{https://github.com/henrydatei/TUD_MATH_BA} erstellst. So hilfst du deinen jetzigen und zukünftigen Kommilitonen!
Genieße auf jeden Fall die Show von Prof. \person{Schilling} \smiley{}! Ich habe bis jetzt keine Vorlesung erlebt, die mit so viel Begeisterung gehalten wurde.

View file

@ -55,7 +55,7 @@ Vorausgesetzt $F$ ist differenzierbar, dann ist dieses Verfahren durchführbar,
Da $F'$ stetig in der offenen Menge $D$ ist, gibt es $\delta_1>0$ und $M\ge 1$, so dass $B(x^\ast,\delta_1)\subset D$ sowie
\begin{align}
\label{5.4}
F'(x)\text{ regulär und } \Vert F''(x)^{-1}\Vert\le M \quad\forall x\in B(x^\ast,\delta_1)
F'(x)\text{ regulär und } \Vert F'(x)^{-1}\Vert\le M \quad\forall x\in B(x^\ast,\delta_1)
\end{align}
Weiter folgt wegen \propref{5_0_1} und \cref{5.2}
\begin{align}

View file

@ -0,0 +1,7 @@
Auch für die Vorlesung \textit{Einführung in die Numerik} im WS 2018/19 haben wir ein Skript geschrieben. Es ist eine in vielen Teilen verbesserte Version des Skriptes von Prof. \person{Fischer}, die uns leider nur in Papierform vorlag. Wir haben über das ganze Semester die 61 Seiten abgetippt und mit hilfreichen Notizen und Abbildungen aus der Vorlesung angereichert. Bei diesem Prozess hat sich die Nummerierung der Sätze, Bemerkungen, Gleichungen, ... deutlich verändert, aber immerhin gibt es jetzt nicht mehr eine Definition 2.3, einen Satz 2.3, einen Algorithmus 2.3, ....
Während der ersten Vorlesung ist die Diskussion über die Lauffähigkeit der Algorithmen entstanden. Wir sind damals zu dem Schluss gekommen, dass wohl einige Fehler in diesen enthalten sind, aber der grobe Ablauf stimmt überein. Es geht meiner Meinung nach (und wahrscheinlich auch der von Prof. \person{Fischer}) darum, dass man sieht, wie lange ein Algorithmus braucht um ein Problem lösen zu können, also um die Komplexität des Algorithmus.
Wie eigentlich immer, will auch hier gesagt sein, dass es sich lohnt die Vorlesung zu besuchen, auch wenn Prof. \person{Fischer} in der Regel sein Skript an die Tafel schreibt. Aber zwischendurch kommen immer mal wieder nützliche Bemerkungen, die das Verständnis des Stoffes deutlich erleichtern und jede Menge Arbeit in der Nachbereitung ersparen. Ich spreche da aus Erfahrung: Während ich das Skript geschrieben habe, habe ich vielleicht nur 50\% von dem verstanden, was ich da eigentlich geschrieben habe. Aber als ich dann die Vorlesung besucht habe, habe ich mich gefragt, wieso ich diesen Stoff nicht vorher komplett verstanden hatte.
Trotz sorgfältiger Kontrolle kann es vorkommen, dass hier und da noch ein Fehler versteckt ist. In diesem Fall bitten wir darum, dass du ein Issue auf \url{https://github.com/henrydatei/TUD_MATH_BA} erstellst und uns hilfst den Fehler zu beheben. Damit hilfst du nicht nur uns, sondern auch allen zukünftigen Studenten. Danke!

Binary file not shown.

View file

View file

@ -0,0 +1,35 @@
\documentclass[ngerman,a4paper,order=firstname]{../../texmf/tex/latex/mathscript/mathscript}
\usepackage{../../texmf/tex/latex/mathoperators/mathoperators}
\title{\textbf{Algebra und Zahlentheorie SS 2019}}
\author{Dozent: Prof. Dr. \person{Arno Fehm}}
\begin{document}
\pagenumbering{roman}
\pagestyle{plain}
\maketitle
\hypertarget{tocpage}{}
\tableofcontents
\bookmark[dest=tocpage,level=1]{Inhaltsverzeichnis}
\pagebreak
\pagenumbering{arabic}
\pagestyle{fancy}
\chapter*{Vorwort}
\input{./TeX_files/Vorwort}
\chapter{Test}
\part*{Anhang}
\addcontentsline{toc}{part}{Anhang}
\appendix
%\printglossary[type=\acronymtype]
\printindex
\end{document}

View file

View file

@ -0,0 +1,35 @@
\documentclass[ngerman,a4paper,order=firstname]{../../texmf/tex/latex/mathscript/mathscript}
\usepackage{../../texmf/tex/latex/mathoperators/mathoperators}
\title{\textbf{Einführung in die Numerik 2 SS 2019}}
\author{Dozent: Prof. Dr. \person{Andreas Fischer}}
\begin{document}
\pagenumbering{roman}
\pagestyle{plain}
\maketitle
\hypertarget{tocpage}{}
\tableofcontents
\bookmark[dest=tocpage,level=1]{Inhaltsverzeichnis}
\pagebreak
\pagenumbering{arabic}
\pagestyle{fancy}
\chapter*{Vorwort}
\input{./TeX_files/Vorwort}
\chapter{Test}
\part*{Anhang}
\addcontentsline{toc}{part}{Anhang}
\appendix
%\printglossary[type=\acronymtype]
\printindex
\end{document}

View file

View file

@ -0,0 +1,35 @@
\documentclass[ngerman,a4paper,order=firstname]{../../texmf/tex/latex/mathscript/mathscript}
\usepackage{../../texmf/tex/latex/mathoperators/mathoperators}
\title{\textbf{Stochastik SS 2019}}
\author{Dozent: Prof. Dr. \person{?}}
\begin{document}
\pagenumbering{roman}
\pagestyle{plain}
\maketitle
\hypertarget{tocpage}{}
\tableofcontents
\bookmark[dest=tocpage,level=1]{Inhaltsverzeichnis}
\pagebreak
\pagenumbering{arabic}
\pagestyle{fancy}
\chapter*{Vorwort}
\input{./TeX_files/Vorwort}
\chapter{Test}
\part*{Anhang}
\addcontentsline{toc}{part}{Anhang}
\appendix
%\printglossary[type=\acronymtype]
\printindex
\end{document}

Binary file not shown.

View file

@ -0,0 +1,59 @@
\documentclass[british,a4paper,order=firstname]{mathscript}
\usepackage{mathoperators}
\title{\textbf{Applied statistics (spring term 2019)}}
\author{readers: Dr \person{Nikolai Bode} and Dr \person{ Ksenia Shalonova}}
\date{written by \person{Henry Haustein}}
\begin{document}
\pagenumbering{roman}
\pagestyle{plain}
\maketitle
\hypertarget{tocpage}{}
\tableofcontents
\bookmark[dest=tocpage,level=1]{Table of contents}
\pagebreak
\pagenumbering{arabic}
\pagestyle{fancy}
\section{Estimating parameters}
\input{./TeX_files/Estimating_parameters_1}
\input{./TeX_files/Estimating_parameters_2}
\pagebreak
\section{Hypothesis testing}
\input{./TeX_files/Hypothesis_testing_1}
\input{./TeX_files/Hypothesis_testing_2}
\pagebreak
\section{Bootstrapping}
\input{./TeX_files/Bootstrapping}
\pagebreak
\section{Linear models (Simple linear regression)}
\input{./TeX_files/Linear_models_SLR}
\pagebreak
\section{Linear models (Multiple linear regression)}
\input{./TeX_files/Linear_models_MLR}
\pagebreak
\section{Model building}
\input{./TeX_files/Model_building}
\pagebreak
\section{Experimental design and ANOVA}
\input{./TeX_files/Experimental_design_and_ANOVA}
\pagebreak
\section{Generalised linear models}
\input{./TeX_files/Generalised_linear_models}
\pagebreak
\section{Appendix}
\input{./TeX_files/Weibulls_Distribution}
%\addcontentsline{toc}{part}{Appendix}
%\appendix
%\printglossary[type=\acronymtype]
\printindex
\end{document}

Binary file not shown.

View file

@ -0,0 +1,407 @@
\documentclass[british,a4paper,order=firstname]{mathscript}
\usepackage{mathoperators}
\title{\textbf{Applied statistics: Coursework 1}}
\author{\person{Henry Haustein}}
\begin{document}
\pagenumbering{roman}
\pagestyle{plain}
\maketitle
\hypertarget{tocpage}{}
\tableofcontents
\bookmark[dest=tocpage,level=1]{Table of contents}
\pagebreak
\pagenumbering{arabic}
\pagestyle{fancy}
\section{Task 1}
\subsection{Part (1)}
In the given data were two out of 26 data points with an Al/Be ratio of more than 4.5. That means
\begin{align}
\hat{p} = \frac{2}{26} = \frac{1}{13}\notag
\end{align}
\subsection{Part (2)}
Using the following formula from the lecture we get the 95\% confidence interval:
\begin{align}
\hat{p}&\pm 2\cdot\sqrt{\frac{\hat{p}(1-\hat{p})}{n}} \notag \\
\frac{1}{13} &\pm \underbrace{2\cdot\sqrt{\frac{\frac{1}{13}\cdot\frac{12}{13}}{26}}}_{0.1045} \notag
\end{align}
Our 95\% confidence interval is [-0.0276,0.1814] which means that we are 95\% sure that the true proportion lies between -0.0276 and 0.1814.
\subsection{Part (3)}
To get the 95\% confidence interval via bootstrap I want to use the \texttt{bootci} function in MATLAB.
\begin{lstlisting}
data = [3.75, 4.05, 3.81, 3.23, 3.13, 3.3, 3.21, 3.32, ...
4.09, 3.9, 5.06, 3.85, 3.88, 4.06, 4.56, 3.6, 3.27, ...
4.09, 3.38, 3.37, 2.73, 2.95, 2.25, 2.73, 2.55, 3.06];
parameter = @(y) length(find(y > 4.5))/length(y);
bootci(10000,{parameter, data},'alpha',0.05,'type',...
'percentile')
\end{lstlisting}
That gives the 95\% confidence interval: [0,0.1923]
\subsection{Part (4)}
Yes, the confidence interval from the bootstrap procedure is more appropriate because it's not containing Al/Be ratios that are not possible like -0.0276. A negative ratio would suggest that there is a negative amount of data points in the sample which exceed 4.5. That is not possible.
\pagebreak
\section{Task 2}
\subsection{Part (1)}
\begin{lstlisting}
x = [-4.5, -1, -0.5, -0.15, 0, 0.01, 0.02, 0.05, ...
0.15, 0.2, 0.5, 0.5, 1, 2, 3];
m = mean(x);
s = std(x);
\end{lstlisting}
\begin{center}
\begin{tabular}{p{4cm}|p{7cm}}
null hypothesis & $H_0$: $\mu = 0$ \\
\hline
alternative hypothesis & $H_A$: $\mu\neq 0$ \\
\hline
t-test for $\mu$ & $t=\frac{m-0}{\frac{s}{\sqrt{15}}} =\frac{0.0853}{\frac{1.6031}{\sqrt{15}}} = 0.2062$ \\
\hline
rejection region & \texttt{tinv(0.05,15)} = -1.7531 \\
\hline
conclusion & $t$ is not in the rejection region so $H_0$ is accepted at the 10\% significance level.
\end{tabular}
\end{center}
\begin{center}
\begin{tikzpicture}
\begin{axis}[
xmin=-3, xmax=3, xlabel=$x$,
ymin=0, ymax=1, ylabel=$y$,
samples=400,
axis y line=middle,
axis x line=middle,
domain=-3:3,
restrict y to domain=0:1,
width = 16cm,
height = 8cm,
]
\addplot[name path=f,blue] {116640000000*sqrt(15)/(143*pi*(x^2+15)^(8))};
\path[name path=axis] (axis cs:1.7531,0) -- (axis cs:3,0);
\path[name path=axis2] (axis cs:-3,0) -- (axis cs:-1.7531,0);
\draw (axis cs:1.7531,0) -- (axis cs:1.7531,1);
\draw (axis cs:-1.7531,0) -- (axis cs:-1.7531,1);
\draw [dotted] (axis cs:0.2062,0) -- (axis cs:0.2062,0.6);
\node at (axis cs:1,0.5) (a) {0.05};
\node at (axis cs:-1,0.5) (a2) {0.05};
\draw (axis cs:1, 0.46) -- (axis cs: 2.2,0.02);
\draw (axis cs:-1, 0.46) -- (axis cs: -2.2,0.02);
\node at (axis cs: 2.0,0.95) (b) {1.7531};
\node at (axis cs: -2.0,0.95) (b2) {-1.7531};
\node at (axis cs: 0.45,0.55) (c) {0.2062};
\node[red] at (axis cs: 2.4,0.78) (d) {rejection region};
\node[red] at (axis cs: -2.4,0.78) (d2) {rejection region};
\begin{scope}[transparency group]
\begin{scope}[blend mode=multiply]
\addplot [thick,color=blue,fill=blue,fill opacity=0.3] fill between[of=f and axis,soft clip={domain=1.7531:3},];
\addplot [thick,color=blue,fill=blue,fill opacity=0.3] fill between[of=f and axis2,soft clip={domain=-3:-1.7531},];
\draw[red,fill=red,opacity=0.2] (axis cs: 1.7531,0) -- (axis cs: 1.7531,1) -- (axis cs: 3,1) -- (axis cs: 3,0) -- (axis cs: 1.7531,0);
\draw[red,fill=red,opacity=0.2] (axis cs: -1.7531,0) -- (axis cs: -1.7531,1) -- (axis cs: -3,1) -- (axis cs: -3,0) -- (axis cs: -1.7531,0);
\end{scope}
\end{scope}
\end{axis}
\end{tikzpicture}
\end{center}
\subsection{Part (2)}
If we reduce the significance level our rejection region gets smaller. With $\alpha = 0.05$ the rejection region will start at \texttt{tinv(0.025,15)} = -2.1314. The $t$ calculated in part (1) won't change $\Rightarrow$ our decision won't change too.
To get the type 2 error we use the MATLAB function \texttt{sampsizepwr} and $type\, 2\, error = 1-power$.
\begin{lstlisting}
testtype = 't';
p0 = [0 1.6031];
p1 = 0.0853;
n = 15;
power = sampsizepwr(testtype,p0,p1,[],n)
\end{lstlisting}
This gives $power = 0.0542\Rightarrow type\, 2\, error = 0.9458$. This is the probability of wrongly accepting $H_0$ when it is false.
\subsection{Part (3)}
$H_0$: $\mu=0$, normal distribution, small model $M_S$ \\
$H_A$: $\mu\neq 0$, normal distribution, big model $M_B$ \\
The log-likelihood function for normal distribution is
\begin{align}
\label{log-likelihood}
-\frac{n}{2}\log(2\pi)-\frac{n}{2}\log(\sigma^2)-\frac{1}{2\sigma^2}\sum_{j=1}^{n} (x_j-\mu)^2
\end{align}
Let's start with the MLEs for $\mu$ and $\sigma$ in $M_B$:
\begin{align}
\hat{\mu} &= \frac{1}{n}\sum_{j=1}^n x_j \notag \\
&= 0.0853 \notag \\
\widehat{\sigma^2} &= \frac{1}{n}\sum_{j=1}^n (x_j-\hat{\mu})^2 \notag \\
&= 2.3986 \notag
\end{align}
Maximum possible value for the log-likelihood $\xRightarrow{\cref{log-likelihood}}$ -27.8457. \\
Now we'll calculate the MLE for $\sigma$ in $M_S$:
\begin{align}
\widehat{\sigma^2} &= \frac{1}{n}\sum_{j=1}^n (x_j-\hat{\mu})^2 \notag \\
&= 2.3986 \notag
\end{align}
Maximum possible value for the log-likelihood $\xRightarrow{\cref{log-likelihood}}$ -27.8684. \\
Likelihood ratio test:
\begin{align}
\chi^2 &= 2\Big(l(M_B) - l(M_S)\Big) \notag \\
&= 0.0454 \notag
\end{align}
It should be compared to $\chi^2$(1 degree of freedom) since the difference in unknown parameters is equal to 1. The following piece of MATLAB code will calculate the p-value.
\begin{lstlisting}
p = chi2cdf(0.0454,1,'upper')
\end{lstlisting}
The p-value is 0.8313 which means that we accept $H_0$: The small model $M_S$ fits the data good enough. This is the same result as in part (1) and (2).
\pagebreak
\section{Task 3}
\subsection{Part (1)}
First of all we need to prepare the data:
\begin{lstlisting}
raw = load('input_data.txt');
data = reshape(raw,[1 500]); %produce a single vector
\end{lstlisting}
After that we do for every distribution (normal, exponential, uniform, lognormal, \person{Rayleigh}, gamma) the same procedure:
\begin{enumerate}[label=\textbf{\arabic*.}]
\item Estimate the parameter. This is often done with the function \texttt{<distribution>fit} but for estimating the parameters in the gamma distribution I used \texttt{fitdist(data', 'Gamma')} because \texttt{gamfit} doesn't work.
\item Creating the CDF with \texttt{makedist}.
\item Run the \person{Kolmogorov-Smirnov} test with \texttt{kstest}.
\end{enumerate}
\begin{lstlisting}
%normal distribution
[mu,sigma] = normfit(data)
norm_cdf = makedist('Normal','mu',mu,'sigma',sigma);
[h,p] = kstest(data,'CDF',norm_cdf)
%exponential distribution
mu = expfit(data)
exp_cdf = makedist('Exponential','mu',mu);
[h,p] = kstest(data,'CDF',exp_cdf)
%uniform distribution
[low,up] = unifit(data)
uni_cdf = makedist('Uniform','lower',low,'upper',up);
[h,p] = kstest(data,'CDF',uni_cdf)
%lognormal distribution
logmu = mean(log(data))
logsigma = std(log(data))
logn_cdf = makedist('Lognormal','mu',logmu,'sigma',logsigma);
[h,p] = kstest(data,'CDF',logn_cdf)
%rayleigh distribution
b = raylfit(data)
rayl_cdf = makedist('Rayleigh','b',b);
[h,p] = kstest(data,'CDF',rayl_cdf)
%gamma distribution
distribution = fitdist(data','Gamma');
a = distribution.a
b = distribution.b
gamma_cdf = makedist('Gamma','a',a,'b',b);
[h,p] = kstest(data,'CDF',gamma_cdf)
\end{lstlisting}
Running this gives the following output. The best fitting distribution is marked green, the worst red.
\begin{center}
\begin{tabular}{c|l|p{3cm}|p{3cm}}
\textbf{distribution}& \textbf{estimated parameters} & \multicolumn{2}{c}{\person{Kolmogorov-Smirnov} \textbf{test}} \\
& & $h$ & $p$ \\
\hline
normal & $\mu=2.3804$, $\sigma=1.2486$ & $h=1$ & $p=0.0158$ \\
\hline
exponential & $\mu=2.3804$ & $h=1$ & $p=2.2618\cdot 10^{-23}$ \\
\hline
\rowcolor{red}uniform & $lower=0.1478$, $upper=7.8807$ & $h=1$ & $p=1.5096\cdot 10^{-72}$ \\
\hline
lognormal & $\log(\mu)=0.7050$, $\log(\sigma)=0.6243$ & $h=1$ & $p=0.0017$ \\
\hline
\rowcolor{green}\person{Rayleigh} & $b=1.9003$ & $h=0$ & $p=0.8939$ \\
\hline
gamma & $a=3.2378$, $b=0.7352$ & $h=0$ & $p=0.2771$ \\
\end{tabular}
\end{center}
\subsection{Part (2)}
\input{./TeX_files/materials/CW1_kstest}
\pagebreak
\section{Task 4}
\subsection{Part (1)}
The probability density function $f(t)$ is
\begin{align}
f(t) = \frac{2t\cdot\frac{\exp(-t^2)}{100}}{100} = \frac{t\cdot\exp(-t^2)}{5000}\notag
\end{align}
\begin{center}
\begin{tikzpicture}
\begin{axis}[
xmin=0, xmax=5, xlabel=$x$,
ymin=0, ymax=0.0001, ylabel=$y$,
samples=400,
axis y line=middle,
axis x line=middle,
restrict y to domain=0:1,
]
\addplot+[mark=none] {(x*exp(-x^2))/5000};
\end{axis}
\end{tikzpicture}
\end{center}
The cumulative distribution function $F(t)$ is then
\begin{align}
F(t) &= \int_0^t f(\xi)\,\diff\xi \notag \\
&= \int_0^t \frac{\xi\cdot\exp(-\xi^2)}{5000}\,\diff\xi\notag \\
&= \frac{\exp(-t^2)\Big(\exp(t^2)-1\Big)}{10000} \notag
\end{align}
\begin{center}
\begin{tikzpicture}
\begin{axis}[
xmin=0, xmax=5, xlabel=$x$,
ymin=0, ymax=0.0001, ylabel=$y$,
samples=400,
axis y line=middle,
axis x line=middle,
restrict y to domain=0:1,
]
\addplot+[mark=none] {(exp(-x^2)*(exp(x^2)-1))/10000};
\end{axis}
\end{tikzpicture}
\end{center}
For the survival function we get
\begin{align}
R(t) &= 1 - F(t) \notag \\
&= \frac{\exp(-t^2)+9999}{10000} \notag
\end{align}
\begin{center}
\begin{tikzpicture}[scale=0.9]
\begin{axis}[
xmin=0, xmax=5, xlabel=$x$,
ymin=0, ymax=1, ylabel=$y$,
samples=400,
axis y line=middle,
axis x line=middle,
restrict y to domain=0:1,
]
\addplot+[mark=none] {(exp(-x^2)+9999)/10000};
\end{axis}
\end{tikzpicture}
\begin{tikzpicture}[scale=0.9]
\begin{axis}[
xmin=0, xmax=5, xlabel=$x$,
ymin=0.9999, ymax=1, ylabel=$y$,
samples=400,
axis y line=middle,
axis x line=middle,
restrict y to domain=0:1,
y tick label style={
/pgf/number format/.cd,
precision=5,
/tikz/.cd
},
]
\addplot+[mark=none] {(exp(-x^2)+9999)/10000};
\end{axis}
\end{tikzpicture}
\end{center}
To get the reliability of the component at $t=7$ we simply evaluate $R(7)$ which is 0.9999.
The hazard function is defined as
\begin{align}
h(t) &= \frac{f(t)}{1-F(t)} \notag \\
&= \frac{2t}{9999\cdot \exp(t^2)+1} \notag
\end{align}
\begin{center}
\begin{tikzpicture}
\begin{axis}[
xmin=0, xmax=5, xlabel=$x$,
ymin=0, ymax=0.0001, ylabel=$y$,
samples=400,
axis y line=middle,
axis x line=middle,
restrict y to domain=0:1,
]
\addplot+[mark=none] {(2*x)/(9999*exp(x^2)+1)};
\end{axis}
\end{tikzpicture}
\end{center}
The hazard function describes how an item ages where $t$ affects the risk of failure. It is the frequency with which the item fails, expressed in failures per unit of time.
\subsection{Part (2)}
Given $h(x)\sim(\sqrt{x})^{-1}$ we will try to find out the $shape$-parameter of the \person{Weibull} distribution first.
\begin{center}
\begin{tikzpicture}
\begin{axis}[
xmin=0, xmax=5, xlabel=$x$,
ymin=0, ymax=1, ylabel=$y$,
samples=400,
axis y line=middle,
axis x line=middle,
restrict y to domain=0:1,
yticklabels={,,},
xticklabels={,,}
]
\addplot+[mark=none] {1/sqrt(x)};
\end{axis}
\end{tikzpicture}
\end{center}
Comparing this graph to graphs of the hazard function with different $shape$-parameters we see that $shape=0.5$ fits best.
\begin{center}
\begin{tabular}{p{5cm}|p{5cm}|p{5cm}}
$shape = 0.5$ & $shape = 1$ & $shape = 2$ \\
\hline
\multicolumn{3}{c}{\cellcolor{gray!50}\textbf{Hazard function} $\left(h = \frac{\text{PDF}}{1-\text{CDF}}\right)$} \\
\hline
\begin{tikzpicture}[scale=0.6]
\begin{axis}[
xmin=0, xmax=2, xlabel=$x$,
ymin=0, ymax=1, ylabel=$y$,
samples=400,
axis y line=middle,
axis x line=middle,
]
\addplot+[mark=none] {(0.5*x^(-0.5)*exp(-x^0.5))/(1-(1-exp(-x^0.5)))};
\end{axis}
\end{tikzpicture} &
\begin{tikzpicture}[scale=0.6]
\begin{axis}[
xmin=0, xmax=2, xlabel=$x$,
ymin=0, ymax=1, ylabel=$y$,
samples=400,
axis y line=middle,
axis x line=middle,
restrict y to domain=0:1,
]
\addplot+[mark=none] {(1/exp(x))/(1-(1-1/exp(x)))};
\draw[blue] (axis cs: 0,1) -- (axis cs: 2,1);
\end{axis}
\end{tikzpicture} &
\begin{tikzpicture}[scale=0.6]
\begin{axis}[
xmin=0, xmax=2, xlabel=$x$,
ymin=0, ymax=1, ylabel=$y$,
samples=400,
axis y line=middle,
axis x line=middle,
]
\addplot+[mark=none] {(2*x^(1)*exp(-x^2))/(1-(1-exp(-x^2)))};
\end{axis}
\end{tikzpicture} \\
\end{tabular}
\end{center}
To get the $scale$-parameter of the distribution we use the other provided information:
\begin{align}
5 &= \mu \notag \\
&= scale\cdot\Gamma\left(1+\frac{1}{shape}\right) \notag \\
&= scale\cdot\Gamma(3) \notag \\
\Rightarrow scale &= \frac{5}{2} \notag
\end{align}
Let's build the survival function:
\begin{align}
R(t) &= 1-\Bigg(1-\exp\left(-\sqrt{\frac{x}{\nicefrac{5}{2}}}\right)\Bigg) \notag \\
&= \exp(-\sqrt{x}\cdot\sqrt{2.5})\notag
\end{align}
That mean that the probability of surviving 6 years (30 years) is $R(6) = 0.0208$ ($R(30) = 0.0002$).
\end{document}

View file

@ -0,0 +1,92 @@
\subsection{A word of warning}
The limitation of the bootstrap is the assumption that the distribution of the data represented by one sample is an accurate estimate of the population distribution. If the sample does not reflect the population distribution, then the random sampling performed in the bootstrap procedure may add another level of sampling error, resulting in inaccurate statistical estimations.
It is important to get quality data that accurately reflects the population being sampled. The smaller the original sample, the less likely it is to accurately represent the entire population.
We use bootstrap if
\begin{itemize}
\item we have a small but representative random sample or
\item we have a not-normal distribution or aren't sure about it.
\end{itemize}
\subsection{Why to use it?}
In normal population the mean $\mu$ is the parameter that is most often estimated. But other parameters are possible too:
\begin{itemize}
\item standard deviation
\item interquartile range (upper quartile - lower quartile)
\item median
\item other percentiles (e.g. upper quartile)
\end{itemize}
These parameters can be estimated using the corresponding summary statistic from a random sample, but the error distribution may be difficult to obtain theoretically.
Resampling techniques are normally used to estimate parameters and confidence intervals from sample data when parametric test assumptions are not met or for small samples from non-normal distributions.
\begin{itemize}
\item non-parametric bootstrap
\item \textcolor{gray}{parametric bootstrap}
\item \textcolor{gray}{Jackknife}
\item \textcolor{gray}{permutation tests}
\end{itemize}
Non-parametric bootstrap means that only a random sample is known and no prior knowledge on the population density function.
\begin{example}
Monthly rainfall in Dodoma, Tanzania has a skew distribution in some months. The distribution of a sample is provided below.
\input{./TeX_files/materials/rainfall_dodoma_sample}
If a normal distribution does not seem a reasonable model, an alternative is to treat the actual sample as the "population" for the simulation and take random samples with replacement from this sample. Such samples are called \begriff{bootstrap samples}. A simulation with these bootstrap samples can again show the error distribution and provide approximate values for the bias and standard error.
\input{./TeX_files/materials/rainfall_dodoma_bs1}
\input{./TeX_files/materials/rainfall_dodoma_bs2}
\end{example}
\subsection{Bootstrap distribution}
The standard error of a statistic is the standard deviation of the sample statistic. The standard error can be calculated as the standard deviation of the sampling distribution.
Bootstrap sample is a random sample taken with replacement from the original sample, of the same size as the original sample. A bootstrap statistic is the statistic computed on a bootstrap sample. A bootstrap distribution is the distribution of many bootstrap statistics. The standard error of a statistic can be estimated using the standard deviation of the bootstrap distribution.
Let $\hat{\theta}$ a statistic calculated from a sample ($\hat{\theta} = \bar{x}$). We draw $r$ observations with replacement to create a bootstrap sample and calculate the statistic $\hat{\theta}^\ast$ for this sample.
\begin{itemize}
\item \textbf{bootstrap standard error:} the sample standard deviation of the bootstrap distribution:
\begin{align}
\SE_b = \sqrt{\frac{\sum (\hat{\theta}^\ast_b - \bar{\theta}^\ast)^2}{B-1}} \notag
\end{align}
where $B$ is the number of bootstrap replications (usually $B>10000$)
\item \textbf{bootstrap bias:} $\bar{\theta}^\ast-\hat{\theta}$
\item \textbf{bootstrap confidence intervals:} bootstrap percentile interval, t confidence interval with bootstrap standard error, bootstrap t-interval, etc.
\end{itemize}
\subsection{Bootstrap methods}
\begin{center}
\begin{tabular}{p{3cm}|p{2cm}|p{2cm}|p{3cm}|p{3.5cm}}
\textbf{Name} & \textbf{calculate} & \textbf{repeat} & \textbf{get distribution} & \textbf{confidence interval} \\
\hline
Bootstrap percentile CI or \person{Efron} method & $\hat{\theta}^\ast_b$ & $B$ times & $\left\lbrace \hat{\theta}^\ast_b\right\rbrace^B_{b=1}$ & $[q_{\nicefrac{\alpha}{2}},q_{1-\nicefrac{\alpha}{2}}]$ \\
\hline
Bootstrap CI - bootstrap t & $\frac{\hat{\theta}^\ast_b-\hat{\theta}}{\SE(\hat{\theta}^\ast_b)}$ & $B$ times & $\left\lbrace\frac{\hat{\theta}^\ast_b-\hat{\theta}}{\SE(\hat{\theta}^\ast_b)} \right\rbrace^B_{b=1}$ & $[\hat{\theta}-\SE(\hat{\theta})\cdot q_{1-\nicefrac{\alpha}{2}},\hat{\theta}-\SE(\hat{\theta})\cdot q_{\nicefrac{\alpha}{2}}]$ \\
\hline
Bootstrap CI symmetric t-percentile & $\frac{\hat{\theta}^\ast_b-\hat{\theta}}{\SE(\hat{\theta}^\ast_b)}$ & $B$ times & $\left\lbrace\frac{\hat{\theta}^\ast_b-\hat{\theta}}{\SE(\hat{\theta}^\ast_b)} \right\rbrace^B_{b=1}$ & $[\hat{\theta}-\SE(\hat{\theta})\cdot q_{1-\alpha},\hat{\theta}+\SE(\hat{\theta})\cdot q_{1-\alpha}]$ \\
\hline
Bootstrap CI \person{Hall} method & $\hat{\theta}^\ast_b-\hat{\theta}$ & $B$ times & $\left\lbrace \hat{\theta}^\ast_b - \hat{\theta} \right\rbrace^B_{b=1}$ & $[\hat{\theta}-q_{1-\nicefrac{\alpha}{2}},\hat{\theta} - q_{\nicefrac{\alpha}{2}}]$ \\
\end{tabular}
\end{center}
\textbf{Bootstrap using t CI - \textcolor{red}{not recommended}}
\begin{align}
\hat{\theta}\pm t_{\nicefrac{\alpha}{2}}\cdot\SE_b\notag
\end{align}
Bootstrap standard error is the sample standard deviation of the bootstrap distribution
\begin{align}
\SE_b = \sqrt{\frac{\sum (\hat{\theta}^\ast_b - \bar{\theta}^\ast)^2}{B-1}} \notag
\end{align}
where $B$ is the number of bootstrap replications (usually $B>10000$). The bootstrap bias is $\bar{\theta}^\ast-\hat{\theta}$. It can be useful when the standard error is difficult to derive. \textcolor{red}{It has a poor performance when distributions are highly skewed.}
\textbf{Bootstrap percentile CI or \person{Elfron} method} \\
For a 90\% confidence interval keep the middle 90\%, leaving 5\% in each tail and 5\% in the head. The 90\% confidence interval boundaries would be the 5th percentile and the 95th percentile. In case we have 10000 bootstrap replications: $\theta^\ast_1\le\theta^\ast_2\le\dots\le\theta^\ast_{10000}$ the 90\% confidence interval is $[\theta^\ast_{500},\theta^\ast_{9500}]$.
\begin{itemize}
\item \textbf{Advantages:} A very intuitive and easy to implement method. Can also outperform some other bootstrap CI methods for skewed distributions.
\item \textbf{Disadvantages:} Can be too narrow for small samples.
\end{itemize}

View file

@ -0,0 +1,96 @@
\subsection{Confidence and tolerance intervals}
In statistical analysis we want to estimate a population from a \begriff{random sample}. This is called \begriff{interference} about the parameter. Random samples are used to provide information about parameters in an underlying \begriff{population distribution}. Rather than estimating the full shape of the underlying distribution, we usually focus on one or two parameters.
We want the error distribution to be centered on zero. Such an estimator is called \begriff{unbiased}. An biased estimator tends to have negative/positive errors, i.e. it usually underestimates/overestimates the parameter that is being estimated.
We also want \begriff{error distribution} to be tightly concentrated on zero, i.e. to have a small spread.
A good estimator should have a small bias and small standard error. These two criteria can be combined with into single value called the estimator's \begriff{mean squared error}. Most estimators that we will consider are unbiased, the spread of the error distribution is most important.
\begin{definition}[Standard error]
The \begriff{standard error} (SE) of an estimator $\hat{\theta}$ of a parameter $\theta$ is defined to be its standard deviation.
\end{definition}
\begin{example}
Standard error of the mean:
\begin{itemize}
\item Bias ($\mu$ error) = 0, i.e. $E(\hat{\theta})=\theta$
\item When population standard deviation is known: $\text{SE} = \frac{\sigma}{\sqrt{n}}$
\item When population standard deviation is unknown: $\text{SE} = \frac{s}{\sqrt{n}}$
\end{itemize}
\end{example}
Do not confuse SD (sample standard deviation) ($\to$ one sample) and SE (standard deviation of the sample mean $\bar{x}$) ($\to$ error from hypothetical samples)!
\begin{definition}[Confidence interval for $\mu$ with known $\sigma$]
We can be $(1-\alpha)\cdot 100\%$ confident that the estimate for $\mu$ will be in the interval
\begin{align}
\bar{x} - z_{\nicefrac{\alpha}{2}}\frac{\sigma}{\sqrt{n}} < \mu < \bar{x} + z_{\nicefrac{\alpha}{2}}\frac{\sigma}{\sqrt{n}} \notag
\end{align}
Common exact values of $z_{\nicefrac{\alpha}{2}}$ with critical values from normal distribution:
\begin{center}
\begin{tabular}{c|c}
\textbf{confidence} & \textbf{value of $z_{\nicefrac{\alpha}{2}}$} \\
\hline
90\% & 1.645 \\
95\% & 1.96 \\
99\% & 2.575 \\
\end{tabular}
\end{center}
\end{definition}
\begin{definition}[Confidence interval for $\mu$ when $\sigma$ is unknown]
If we simply replace $\sigma$ by its sample variance the confidence level will be lower than 95\%. When the sample size is large, the confidence level is close to 95\% but the confidence level can be much lower if the sample size is small.
Critical value comes from the \person{Students} t distribution. The value of $t_{\nicefrac{\alpha}{2}}$ depends on the sample size through the use of degrees of freedom. The confidence interval is
\begin{align}
\bar{x} - t_{\nicefrac{\alpha}{2}}\frac{s}{\sqrt{n}} < \mu < \bar{x} + t_{\nicefrac{\alpha}{2}}\frac{s}{\sqrt{n}} \notag
\end{align}
\end{definition}
Consider estimation of a population mean, $\mu$, from a random sample of size $n$. A confidence interval will be of the form $\bar{x}\pm t_{\nicefrac{\alpha}{2}}\frac{s}{\sqrt{n}}$. If we want our estimate to be within $k$ of $\mu$, then we need $n$ to be large enough so that $t_{\nicefrac{\alpha}{2}}\frac{s}{\sqrt{n}} < k$. For 95\% confidence interval if $n$ is reasonably large the t-value in the inequality will be approximately 1.96: $1.96\frac{s}{\sqrt{n}}<k$ that can be re-written as $n>\left(\frac{1.96s}{k}\right)^2$. In practice, it is best to increase $n$ a little over this value in case the sample deviation was wrongly guessed.
\begin{example}
If we expect that a particular type of measurement will have a standard deviation of about 8, and we want to estimate its mean, $\mu$, to within 2 of its correct value with probability 0.95, the sample size should be:
\begin{align}
n>\left(\frac{1.96\cdot 8}{2}\right)^2 = 61.5\notag
\end{align}
This suggests a sample size of at least 62. The more accurate trial-and-error method using a t-value would give a sample size of 64.
\end{example}
The sample proportion of successes is denoted by $\hat{p}$ and is an estimate of $p$. The estimation error is $\hat{p}-p$.
\begin{align}
\hat{p} = \frac{\text{number of successes in sample}}{\text{sample size}} \notag
\end{align}
A 95\% confidence interval is $\hat{p} \pm 2\cdot\sqrt{\frac{p(1-p)}{n}}$
\begin{example}
In a random sample of $n=36$ values, there were $x=17$ successes. We estimate the population proportion $\hat{p}$ with $\hat{p}=\frac{17}{36}=0.472$. A 95\% confidence interval for $\hat{p}$ is $0.472\pm 0.166$. We are therefor 95\% confident that the population of successes is between 30.6\% and 63.8\%. A sample size of $n=36$ is clearly too small to give a very accurate estimate.
If the sample size $n$ is small or $\hat{p}$ is close to either 0 or 1, this normal approximation is inaccurate and the confidence level for the interval can be considerably less than 95\%. Classical theory recommends to use the confidence interval for $\hat{p}$ only when $n>30$, $n\hat{p}>5$ and $n(1-\hat{p})>5$.
\end{example}
\begin{*anmerkung}[z-value or t-value?]
\begin{itemize}
\item If you know the variance of the population, then you should use the z-value from normal distribution.
\item If you don't know the variance of the population or the population is non-normal, then you should formally always use the t-value.
\item For most non-normal population distributions, the distribution of the sample mean becomes close to normal when the sample size increases (\begriff{Central Limit Theorem})
\item Even for relatively small samples, the distributions are virtually the same. Therefore, it is common to approximate the t-distribution using normal distribution for sufficiently large samples (e.g. $n>30$).
\end{itemize}
\end{*anmerkung}
\begin{definition}[Tolerance interval]
A $(1-\alpha)\cdot 100\%$ \begriff{tolerance interval} for $\gamma\cdot 100\%$ of the measurements in a normal population is given by $\bar{x}\pm Ks$ where $K$ is a tolerance factor. \begriff{Tolerance limits} are the endpoints of the tolerance interval.
\end{definition}
Do not mix up with confidence intervals! We focus on $\gamma$ (a certain percentage of measurements) rather than on a population parameter.
If we knew $\mu$ and $\sigma$ then the tolerance factor $K$ is 1. Otherwise the tolerance factor depends on the level of confidence, $\gamma$ and the sample size $n$.
\begin{example}
A corporation manufactures field rifles. To monitor the process, an inspector randomly selected 50 firing pins from the production line. The sample mean $\bar{x}$ for all observations is 0.9958 inch and standard deviation $s$ is 0.0333. Assume that the distribution of pin lengths is normal. Find a 95\% tolerance interval for 90\% of the firing pin lengths.
Given $n=50$, $\gamma=0.9$ and $\alpha=0.05$, work out $K$ (you can either use a special table or MATLAB function). $K=1.996$. The 95\% tolerance interval is (0.9293, 1.0623). Approximately 95 of 100 similarly constructed tolerance intervals will contain 90\% of the firing pin lengths in the population.
\end{example}

View file

@ -0,0 +1,186 @@
\subsection{Maximum Likelihood Estimate}
\begin{definition}[likelihood function]
If random variables have joint probability $p(x_1,...,x_n\vert \theta)$ then the function $L(\theta\vert x_1,...,x_n)=p(x_1,...,x_n\vert\theta)$ is called the \begriff{likelihood function} of $\theta$.
\end{definition}
The likelihood function tells the probability of getting the data that were observed if the parameter value was really $\theta$.
\begin{definition}[maximum likelihood estimate]
The \begriff{maximum likelihood estimate} of a parameter $\theta$ is the value that maximizes the likelihood function $L(\theta\vert x_1,...,x_n) = p(x_1,...,x_n\vert\theta)$.
\end{definition}
In practice they maximize the logarithm of the likelihood function and solve the following equation:
\begin{align}
\frac{\mathrm{d}\log L(\theta\vert x_1,...,x_n)}{\mathrm{d}\theta}\notag
\end{align}
The following formula can find an approximate numerical value for the standard error of almost any maximum likelihood estimator:
\begin{align}
\SE(\hat{\theta}) \approx\sqrt{-\frac{1}{l''(\hat{\theta})}}\notag
\end{align}
For the 95\% confidence interval we can write:
\begin{align}
\hat{\theta} - 1.96\cdot\SE(\hat{\theta}) < \theta < \hat{\theta} + 1.96\cdot\SE(\hat{\theta}) \notag
\end{align}
For the 90\% confidence interval we can write:
\begin{align}
\hat{\theta} - 1.645\cdot\SE(\hat{\theta}) < \theta < \hat{\theta} + 1.645\cdot\SE(\hat{\theta}) \notag
\end{align}
\begin{example}
The probability density function (PDF) of exponential distribution is
\begin{align}
\PDF = \begin{cases}
\lambda e^{-\lambda x} & x\ge 0 \\ 0 & \text{otherwise}
\end{cases}\notag
\end{align}
We want to estimate the parameter $\lambda$.
\begin{center}
\begin{tabular}{p{4cm}|p{7cm}}
Likelihood function & $L(\lambda\vert x_1,...,x_n) = \lambda^n e^{-\lambda\sum x_i}$ \\
\hline
log-likelihood function & $l(\lambda\vert x_1,...,x_n) = n\log(\lambda)-\lambda\sum x_i$ \\
\hline
MLE & $l'(\lambda\vert x_1,...,x_n) = \frac{n}{\lambda} - \sum x_i \overset{!}{=} 0\Rightarrow \hat{\lambda} = \frac{1}{\bar{x}}$ \\
\hline
Standard error & $\SE(\hat{\lambda}) = \sqrt{-\frac{1}{l''(\hat{\lambda})}} = \frac{\hat{\lambda}}{\sqrt{n}} = \frac{1}{\sqrt{n}\bar{x}}$ where $l''(\lambda) = -\frac{n}{\lambda^2}$ \\
\hline
95\% confidence interval & $\frac{1}{\bar{x}}\pm 1.96\cdot\frac{1}{\sqrt{n}\bar{x}}$
\end{tabular}
\end{center}
Lets assume that the mean time between failures of 199 air-conditioners is $\bar{x} = 90.92$ hours. The MLE for the estimated failure rate $\lambda$ is $\frac{1}{\bar{x}} = 0.0110$ failure per hour. \\
$\Rightarrow$ 95\% confidence interval for the failure rate:
\begin{align}
\frac{1}{\bar{x}} \pm 1.96\cdot\frac{1}{\sqrt{n}\bar{x}} \Rightarrow \lambda\in [0.00974,0.01253]\notag
\end{align}
\end{example}
Given a sample, we can estimate two unknown parameters in a probability distribution, for example, estimate parameters $\mu$ and $\sigma$ in a normal distribution.
\begin{definition}[likelihood function for two parameters]
If random variables have joint probability $p(x_1,...,x_n\vert \theta,\phi)$ then the function $L(\theta,\phi\vert x_1,...,x_n)=p(x_1,...,x_n\vert\theta,\phi)$ is called the \begriff{likelihood function} of $\theta$ and $\phi$.
\end{definition}
The likelihood function is maximised at a turning point of the likelihood function and could therefore be found by setting the partial derivatives of $L(\theta,\phi)$ with respect to $\theta$ and $\phi$ to zero.
There are two important properties of the maximum likelihood estimator $\hat{\theta}$ of a parameter $\theta$ based on a random sample of size $n$ from a distribution with a probability function $p(x_1,...,x_n\vert\theta)$:
\begin{itemize}
\item Asymptotically unbiased: $E(\hat{\theta})\to\theta$ when $n\to\infty$
\item Asymptotically has a normal distribution: $\hat{\theta}\to$ normal distribution when $n\to\infty$ that can be used to generate confidence intervals.
\item Maximum likelihood estimators have low mean squared error if the sample size is large enough. MLE can be heavily biased for small samples!
\end{itemize}
\subsection{Continuous distributions}
The \begriff{lognormal distribution} is used in situations where values are positively skewed, for example, for financial analysis of stock prices. Note that the uncertain variable can increase without limits but cannot take negative values.
\begin{center}
\begin{tikzpicture}
\begin{axis}[
xmin=0, xmax=2, xlabel=$x$,
ymin=0, ymax=1, ylabel=$y$,
samples=400,
axis y line=middle,
axis x line=middle,
restrict y to domain=0:1,
]
\addplot+[mark=none] {1/(x*sqrt(2*pi))*exp(-0.5*(ln(x))^2)};
\addlegendentry{$\mu=0$, $\sigma=1$}
\end{axis}
\end{tikzpicture}
\end{center}
In the \begriff{beta distribution} the uncertain variable is a random value between 0 and positive value. The distribution is frequently used for estimating the proportions and probabilities (i.e. values between 0 and 1). The shape of the distribution is specified by two positive parameters.
The \begriff{\person{Students} t distribution} is the most widely used distribution in confidence intervals and hypothesis testing. The distribution can be used to estimate the mean of a normally distributed population when the sample size is small. The t distribution comes to approximate the normal distribution as the degrees of freedom (or sample size) increases.
\begin{center}
\begin{tikzpicture}
\begin{axis}[
xmin=-3, xmax=3, xlabel=$x$,
ymin=0, ymax=1, ylabel=$y$,
samples=400,
axis y line=middle,
axis x line=middle,
]
\addplot+[mark=none] {(1)/(pi*(x^2+1))};
\addlegendentry{1 degree of freedom}
\addplot+[mark=none] {0.389108/((1+x^2/10)^5.5)};
\addlegendentry{10 degrees of freedom}
\addplot+[dashed,mark=none] {1/(sqrt(2*pi))*exp(-0.5*x^2)};
\addlegendentry{normal distribution}
\end{axis}
\end{tikzpicture}
\end{center}
The \begriff{chi-square distribution} is usually used for estimating the variance in a normal distribution.
\begin{center}
\begin{tikzpicture}
\begin{axis}[
xmin=0, xmax=5, xlabel=$x$,
ymin=0, ymax=1, ylabel=$y$,
samples=400,
axis y line=middle,
axis x line=middle,
]
\addplot+[mark=none] {exp(-x/2)/(sqrt(2*pi) * sqrt(x))};
\addlegendentry{1 degree of freedom}
\addplot+[mark=none] {exp(-x/2) * sqrt(x)/sqrt(2*pi)};
\addlegendentry{3 degrees of freedom}
\addplot+[mark=none] {exp(-x/2) * x^(7/2)/(105 * sqrt(2*pi))};
\addlegendentry{9 degrees of freedom}
\end{axis}
\end{tikzpicture}
\end{center}
In a homogeneous \person{Poisson} process with a rate $\lambda$ events per unit time, the time until the first event happens has a distribution called an \begriff{exponential distribution}. All exponential distributions have their highest probability density at $x=0$ and steadily decrease as $x$ increases.
\begin{center}
\begin{tikzpicture}
\begin{axis}[
xmin=0, xmax=2, xlabel=$x$,
ymin=0, ymax=1, ylabel=$y$,
samples=400,
axis y line=middle,
axis x line=middle,
restrict y to domain=0:1,
]
\addplot+[mark=none] {exp(-x)};
\addlegendentry{$\mu=1$}
\end{axis}
\end{tikzpicture}
\end{center}
The \begriff{\person{Weibull} distribution} can be used as a model for items that either deteriorate or improve over time. It's basic version has two parameters: shape and scale.
\begin{center}
\begin{tikzpicture}
\begin{axis}[
xmin=0, xmax=2, xlabel=$x$,
ymin=0, ymax=1, ylabel=$y$,
samples=400,
axis y line=middle,
axis x line=middle,
restrict y to domain=0:1,
]
\addplot+[mark=none] {0.5*x^(-0.5)*exp(-x^0.5)};
\addlegendentry{$scale=1$, $shape=0.5$}
\addplot+[mark=none] {1/exp(x)};
\addlegendentry{$scale=1$, $shape=1$}
\addplot+[mark=none] {2*x^(1)*exp(-x^2)};
\addlegendentry{$scale=1$, $shape=2$}
\end{axis}
\end{tikzpicture}
\end{center}
\begin{itemize}
\item $shape>1$: the hazard function is increasing so the item becomes less reliable as it gets older.
\item $shape<1$: the hazard function is decreasing so the item becomes more reliable as it gets older.
\item $shape=1$: the hazard function is constant so the lifetime distribution becomes exponential.
\end{itemize}
The \begriff{survival function} (probability of surviving until a particular time) is $R(t) = 1-F(t)$. The \begriff{hazard rate function} (failure rate) is worked out by the formula:
\begin{align}
h(t) &= \frac{f(t)}{1-F(t)} \notag \\
&= \frac{f(t)}{R(t)} \notag
\end{align}
where $f(t)$ and $F(t)$ are PDF and CDF of the distribution.
The hazard function describes how an item ages where $t$ affects its risk of failure. This constant hazard function in the exponential distribution corresponds to the \person{Poisson} process without memory, i.e. the chance of failing does not depend on what happened before and how long the item has already survived.

View file

@ -0,0 +1,296 @@
There are two types of questions in statistical interference:
\begin{itemize}
\item \textbf{Parameter estimation:} What parameter values would be consistent with the sample data?
\item \textbf{Hypothesis testing:} Are the sample data consistent with some statement about the parameters?
\end{itemize}
The \begriff{Null Hypothesis} $H_0$ often specifies a single value for the unknown parameter such as "$\alpha = \dots$". It is a default value that can be accepted as holding if there is no evidence against it. A researcher often collects data with the express hope of disapproving the null hypothesis.
If the null hypothesis is not true, we say that the \begriff{alternative hypothesis} $H_A$ holds. If the data are not consistent with the null hypothesis, then we can conclude that the alternative hypothesis must be true. Either the null hypothesis or the alternative hypothesis must be true.
\begin{example}
The data show the number of operating hours between successive failures of air-conditioning equipment in ten aircrafts. The sample of 199 values is a \begriff{test statistic}. We can test the manufacturer's claim that the rate of failures is no more than one per 110 hours of use.
\begin{align}
H_0: \lambda &\le \frac{1}{100}\text{ (claim of a manufacturer)} \notag \\
H_A: \lambda &> \frac{1}{100} \notag
\end{align}
This can be simplified: \\
\begin{align}
H_0: \lambda &= \frac{1}{100}\text{ (claim of a manufacturer)} \notag \\
H_A: \lambda &> \frac{1}{100} \notag
\end{align}
\end{example}
\subsection{The p-value (probability value)}
In an industrial process some measurement is normally distributed with standard deviation $\sigma = 10$. Its mean should be $\mu = 520$, but can differ a little bit. Samples of $n=10$ measurements are regularly collected as part of quality control. If a sample had $\bar{x}=529$, does the process need to be adjusted?
\input{./TeX_files/materials/samples_of_mean}
From the 200 simulated samples above (\person{Monte Carlo} simulation), it seems very unlikely that a sample mean of 529 would have been recorded if $\mu = 529$. There is strong evidence that the industrial process no longer has a mean of $\mu = 520$ and needs to be adjusted.
\begin{definition}[p-value]
A \begriff{p-value} describes the \textbf{evidence against} $H_0$. A p-value is evaluated from a random sample so it has a distribution in the same way that a sample mean has a distribution.
\end{definition}
\begin{center}
\begin{tabular}{p{4cm}|p{7cm}}
\textbf{p-value} & \textbf{Interpretation} \\
\hline
over 0.1 & no evidence that $H_0$ does not hold \\
between 0.05 and 0.1 & very weak evidence that $H_0$ does not hold \\
between 0.01 and 0.05 & moderately strong evidence that $H_0$ does not hold \\
under 0.01 & strong evidence that $H_0$ does not hold
\end{tabular}
\end{center}
\begin{example}[normal distribution with known $\sigma$, one-tailed test]
We are given a random sample of $n=30$ with $\bar{x}=16.8$. Does the population have mean $\mu=18.3$ and standard deviation $\sigma=7.1$, or is the mean now lower than 18.3?
\begin{align}
H_0: \mu &= 18.3 \notag \\
H_A: \mu &< 18.3 \notag
\end{align}
The p-value can be evaluated using the statistical distance of 16.8 from 18.3 (a z statistic).
\begin{align}
z = \frac{\bar{x} - 18.3}{\underbrace{\frac{7.1}{\sqrt{30}}}_{\text{standard error}}} = -1.157 \notag
\end{align}
\begin{center}
\begin{tikzpicture}
\begin{axis}[
xmin=-3, xmax=3, xlabel=$z$,
ymin=0, ymax=0.6,
samples=400,
axis y line=middle,
axis x line=middle,
]
\addplot[name path=f,blue] {1/(sqrt(2*pi))*exp(-0.5*x^2)};
\path[name path=axis] (axis cs:-3,0) -- (axis cs:-1.157,0);
\addplot [thick,color=blue,fill=blue,fill opacity=0.3] fill between[of=f and axis,soft clip={domain=-3:-1.157},];
\draw [dotted] (axis cs:-1.157,0) -- (axis cs:-1.157,0.6);
\node at (axis cs:-2.5,0.25) (node) {p-value};
\draw (axis cs:-2.5,0.23) -- (axis cs:-1.5,0.08);
\end{axis}
\end{tikzpicture}
\end{center}
\begin{align}
\text{p-value} = P(z \le -1.157) = 0.124 \notag
\end{align}
The p-value is reasonably large, meaning that a sample mean as low as 16.8 would not be unusual if $\mu=18.3$, so there is no evidence against $H_0$.
\end{example}
\begin{*anmerkung}
To compute the p-value you can use
\begin{align}
\text{p-value} = \texttt{CDF(NormalDistribution(0,1),-1.157)}\notag
\end{align}
\end{*anmerkung}
\begin{example}[normal distribution with known $\sigma$, two-tailed test]
Companies test their products to ensure that the amount of active ingredient is within some limits. However the chemical analysis is not precise and repeated measurements of the same specimen usually differ slightly. One type of analysis gives results that are normally distributed with a mean that depend on the actual product being tested and standard deviation 0.0068 grams per litre. A product is tested three times with the following concentrations of the active ingredient: 0.8403, 0.8363, 0.8447 grams per litre. are the data consistent with the target concentration of 0.85 grams per litre?
\begin{center}
\begin{tabular}{p{4cm}|p{7cm}}
null hypothesis & $H_0$: $\mu=0.85$ \\
\hline
alternative hypothesis & $H_A$: $\mu\neq 0.85$ \\
\hline
test statistic & $\bar{x} = 0.8404$, $z=\frac{0.8404-0.85}{\frac{0.0068}{\sqrt{3}}} = -2.437$, $P(z\le -2.437) = 0.00741$ \\
\hline
p-value & $2\cdot 0.00741 = 0.0148$ \\
\hline
p-value interpretation & There is moderately strong evidence that the true concentration is not 0.85.
\end{tabular}
\end{center}
\begin{center}
\begin{tikzpicture}
\begin{axis}[
xmin=-3, xmax=3, xlabel=$z$,
ymin=0, ymax=0.6,
samples=400,
axis y line=middle,
axis x line=middle,
]
\addplot[name path=f,blue] {1/(sqrt(2*pi))*exp(-0.5*x^2)};
\path[name path=axis] (axis cs:-3,0) -- (axis cs:-2.437,0);
\path[name path=axis2] (axis cs:2.437,0) -- (axis cs:3,0);
\addplot [thick,color=blue,fill=blue,fill opacity=0.3] fill between[of=f and axis,soft clip={domain=-3:-2.437},];
\addplot [thick,color=blue,fill=blue,fill opacity=0.3] fill between[of=f and axis2,soft clip={domain=2.437:3},];
\draw [dotted] (axis cs:-2.437,0) -- (axis cs:-2.437,0.6);
\draw [dotted] (axis cs:2.437,0) -- (axis cs:2.437,0.6);
\node at (axis cs:-1.5,0.25) (node) {p-value};
\draw (axis cs:-1.7,0.23) -- (axis cs:-2.7,0.002);
\draw (axis cs:-1.3,0.23) -- (axis cs:2.7,0.002);
\end{axis}
\end{tikzpicture}
\end{center}
\end{example}
\begin{example}[normal distribution with unknown $\sigma$, one-tailed test]
Both cholesterol and saturated fats are often avoided by people who are trying to lose weight or reduce their blood cholesterol level. Cooking oil made from soybeans has little cholesterol and has been claimed to have only 15\% saturated fat. A clinician believes that the saturated fat content is greater than 15\% and randomly samples 13 bottles of soybean cooking oil for testing with the following percentage saturated fat: 15.2, 12.4, 15.4, 13.5, 15.9, 17.1, 16.9, 14.3, 19.1, 18.2, 15.5, 16.3, 20.0.
\begin{center}
\begin{tabular}{p{4cm}|p{7cm}}
null hypothesis & $H_0$: $\mu=15$ \\
\hline
alternative hypothesis & $H_A$: $\mu > 15$ \\
\hline
T-test for $\mu$ & $\bar{x} = 16.138$, $t=\frac{16.138-15}{\frac{2.154}{\sqrt{13}}} = 1.906$, $P(t\ge 1.906) = 0.040$ (t-distribution with 12 degrees of freedom) \\
\hline
p-value interpretation & Since this is below 0.05, we conclude that there is moderately strong evidence that the mean saturated fat content of the oils is higher than the claimed 15\%.
\end{tabular}
\end{center}
\begin{center}
\begin{tikzpicture}
\begin{axis}[
xmin=-3, xmax=3, xlabel=$z$,
ymin=0, ymax=0.6,
samples=400,
axis y line=middle,
axis x line=middle,
]
\addplot[name path=f,blue] {4041576*(1/(x^2 + 12))^(13/2)};
\path[name path=axis] (axis cs:-3,0) -- (axis cs:-1.906,0);
\addplot [thick,color=blue,fill=blue,fill opacity=0.3] fill between[of=f and axis,soft clip={domain=-3:-1.906},];
\draw [dotted] (axis cs:-1.906,0) -- (axis cs:-1.906,0.6);
\node at (axis cs:-2.5,0.25) (node) {p-value};
\draw (axis cs:-2.5,0.23) -- (axis cs:-2.2,0.03);
\end{axis}
\end{tikzpicture}
\end{center}
\end{example}
A hypothesis test is based on two competing hypotheses about the value of a parameter $\theta$. \\
Null hypothesis $H_0$: $\theta = \theta_0$ \\
Alternative hypothesis (one-tailed test) $H_A$: $\theta > \theta_0$
The hypothesis test is based on a test statistic that is some function of the data values:
\begin{align}
Q = g(x_1,...,x_n\vert\theta_0) \notag
\end{align}
whose distribution is fully known when $H_0$ is true (i.e. when $\theta_0$ is the true parameter value). We evaluate the test statistic to assess whether it is unusual enough to throw doubt on the null hypothesis.
\begin{center}
\begin{tikzpicture}
\begin{axis}[
xmin=0, xmax=4,
ymin=0, ymax=1,
samples=400,
axis y line=middle,
axis x line=middle,
restrict y to domain=0:1,
]
\addplot[name path=f,blue] {1/(x*sqrt(2*pi))*exp(-0.5*(ln(x))^2)};
\path[name path=axis] (axis cs:2,0) -- (axis cs:4,0);
\addplot [thick,color=blue,fill=blue,fill opacity=0.3] fill between[of=f and axis,soft clip={domain=2:4}];
\draw [dotted] (axis cs:2,0) -- (axis cs:2,0.55);
\node at (axis cs:3,0.5) (1) {observed value of $Q$};
\node at (axis cs:1.7,0.75) (2) {distribution of $Q$ when $H_0$ is true};
\node at (axis cs:3,0.3) (3) {p-value};
\draw (axis cs:3,0.25) -- (axis cs:2.5,0.05);
\end{axis}
\end{tikzpicture}
\end{center}
\begin{theorem}
P-values close to zero throw doubt on the null hypothesis.
\end{theorem}
\subsection{The significance level}
\begin{definition}[significance level]
The \begriff{significance level} is the probability of wrongly concluding that $H_0$ does not hold when it actually does.
\end{definition}
\begin{itemize}
\item \textbf{One-tailed test:} For example, it may be acceptable to have a 5\% chance of concluding that $\theta<\theta_0$ when actually $\theta=\theta_0$. This means a significance level (tail area of the test statistic's distribution) of this test is $\alpha=0.05$.
\item \textbf{Two-tailed test:} Values at both tails of the distribution of the test statistic result in rejection of $H_0$, so the corresponding tail areas should each have area $\frac{\alpha}{2}$ for a test with significance level $\alpha$.
\end{itemize}
\begin{example}
Cooking oil made from soybeans has little cholesterol and has been claimed to have only 15\% saturated fat. A clinician believes that the saturated fat content is greater than 15\% and randomly samples 13 bottles of soybean cooking oil for testing: 15.2, 12.4, 15.4, 13.5, 15.9, 17.1, 16.9, 14.3, 19.1, 18.2, 15.5, 16.3, 20.0.
\begin{center}
\begin{tabular}{p{4cm}|p{7cm}}
Null hypothesis & $H_0$: $\mu=15$ \\
\hline
Alternative hypothesis & $H_A$: $\mu > 15$ \\
\hline
\multicolumn{2}{p{11cm}}{A significance level of $\alpha=0.05$ means that the clinician is willing to wrongly conclude that the saturated fat content is over 15\% when it really is 15\% with probability 0.05.} \\
\hline
t-statistic & $t=\frac{\bar{x}-15}{\frac{s}{\sqrt{13}}} = 1.906$ \\
\hline
rejection region & $P(T>1.782) = 0.05$ (t distribution with 12 degrees of freedom) \\
\hline
Conclusion & $t$ lies in the rejection region so $H_0$ is rejected at the 5\% significance level.
\end{tabular}
\end{center}
\begin{center}
\begin{tikzpicture}
\begin{axis}[
xmin=-3, xmax=3, xlabel=$x$,
ymin=0, ymax=1, ylabel=$y$,
samples=400,
axis y line=middle,
axis x line=middle,
domain=-3:3,
restrict y to domain=0:1,
width = 16cm,
height = 8cm,
]
\addplot[name path=f,blue] {4041576/((x^2+12)^(6.5))};
\path[name path=axis] (axis cs:1.782,0) -- (axis cs:3,0);
\draw (axis cs:1.782,0) -- (axis cs:1.782,1);
\draw [dotted] (axis cs:1.906,0) -- (axis cs:1.906,0.6);
\node at (axis cs:1,0.5) (a) {0.05};
\draw (axis cs:1, 0.46) -- (axis cs: 2.2,0.02);
\node at (axis cs: 2.0,0.95) (b) {1.782};
\node at (axis cs: 2.1,0.55) (c) {1.906};
\node[red] at (axis cs: 2.4,0.78) (d) {rejection region};
\begin{scope}[transparency group]
\begin{scope}[blend mode=multiply]
\addplot [thick,color=blue,fill=blue,fill opacity=0.3] fill between[of=f and axis,soft clip={domain=1.782:3},];
\draw[red,fill=red,opacity=0.2] (axis cs: 1.782,0) -- (axis cs: 1.782,1) -- (axis cs: 3,1) -- (axis cs: 3,0) -- (axis cs: 1.782,0);
\end{scope}
\end{scope}
\end{axis}
\end{tikzpicture}
\end{center}
\end{example}
\begin{definition}[Type 1 + 2 error]
The \begriff{Type 1 error} is the significance level of the test. The decision rule is usually defined to make the significance level 5\% or 1\%.
The \begriff{Type 2 error} is wrongly accepting $H_0$ when it is false.
\end{definition}
Instead of the probability of a Type 2 error, it is common to use the \begriff{power} of a test, defined as one minus the probability of a Type 2 error. The power of a test is the probability of correctly rejecting $H_0$ when it is false.
\begin{center}
\begin{tabular}{p{2cm}p{2cm}|p{3cm}|p{3cm}}
& & \multicolumn{2}{|c}{Decision} \\
\cline{3-4}
& & accept $H_0$ & reject $H_0$ \\
\hline
\multirow{3}{*}{Truth} & \multicolumn{1}{|l|}{$H_0$ is true} & \cellcolor{green} & \cellcolor{red}significance level = P(Type 1 error) \\
\cline{2-4}
& \multicolumn{1}{|l|}{$H_0$ is false} & \cellcolor{red}$P(\text{Type 2 error})$ & \cellcolor{green}Power = 1 - P(Type 2 error) \\
\end{tabular}
\end{center}
Computer software can provide the p-value for a hypothesis test at 5\% or 1\% significance level (Type 1 error).
It is clearly desirable to use a test whose power is as close to 1 as possible. There are three different ways to increase the power:
\begin{itemize}
\item \textbf{Increase the significance level:} If the critical value for the test is adjusted, increasing the probability of a Type 1 error decreases the probability of a Type 2 error and therefore increase the power.
\item \textbf{Use a different decision rule:} For example, in a test about the mean of a normal population, a decision rule based on the sample median has lower power than a decision rule based on the sample mean.
\item \textbf{Increase the sample size:} By increasing the amount of data on which we base our decision about whether to accept or reject $H_0$, the probabilities of making errors can be reduced.
\end{itemize}
When the significance level is fixed, increasing the sample size is therefore usually the only way to improve the power.
Ideally there should be a trade-off between low significance level (Type 1 error) and high power. The desired power of the test is usually 0.8. The power of a test is not a single value since the alternative hypothesis allows for a range of different parameter values. It is represented by a power function that can be graphed against the possible parameter values. MATLAB \texttt{sampsizepwr} can compute the sample size to obtain a particular power for a hypothesis test, given the parameter value of the alternative hypothesis.
\input{./TeX_files/materials/power_vs_sample_size}
There are a many number of statistical tests for assessing normality: \person{Shapiro-Wilk} test, \person{Kolmogorov-Smirnov} test, \person{Jacque-Bera} test, etc. The \person{Shapiro-Wilk} test ($n<50$) can be used to verify whether data come from a normal distribution: \\
$H_0$: sample data are not significantly different than a normal population. \\
$H_A$: sample data are significantly different than a normal population. \\
P-value $>0.05$ mean the data are normal \\
P-value $<0.05$ mean the data are not normal \\
\person{Monte Carlo} simulations proved the efficiency of \person{Shapiro-Wilk} test. It s preferable that normality is assessed visually as well! The \person{Kolmogorov-Smirnov} non-parametric test ($n>50$) examines if scores are likely to follow some distribution in some population (not necessarily normal).

View file

@ -0,0 +1,376 @@
\subsection{Likelihood ratio test}
In some cases we need to perform a hypothesis test to compare two models: big "general" model ($M_B$) and small "simple" model ($M_S$) nested into the bigger model. \\
$H_0$: $M_S$ fits the data \\
$H_A$: $M_S$ does not fit the data and $M_B$ should be used instead. \\
We need to verify if $M_B$ fits the data significantly better.
\begin{itemize}
\item \textbf{Measure how well a model fits the data:} The fit of any model can be described by the maximum possible likelihood for that model:
\begin{align}
L(M) = \max\{P(data\vert model)\}\notag
\end{align}
Calculate the maximum likelihood estimates for all unknown parameters and insert them into the likelihood function.
\item \textbf{Work out the \begriff{likelihood ratio}:}
\begin{align}
R = \frac{L(M_B)}{L(M_S)} \ge 1\notag
\end{align}
Big values of $R$ suggests that $M_S$ does not fit as well as $M_B$.
\item \textbf{Work out log of likelihood ratio:}
\begin{align}
\log(R) = l(M_B) - l(M_S) \ge 0\notag
\end{align}
Big values of $R$ suggests that $M_S$ does not fit as well as $M_B$.
\end{itemize}
\begin{example}
There are a number of defective items on a production line in 20 days that follow \person{Poisson}($\lambda$) distribution: 1, 2, 3, 4, 2, 3, 2, 5, 5, 2, 4, 3, 5, 1, 2, 4, 0, 2, 2, 6. \\
$M_S$: the sample comes from \person{Poission}(2) \\
$M_B$: the sample comes from \person{Poission}($\lambda$) \\
\end{example}
\begin{example}
Clinical records give the survival time for 30 people: 9.73,5.56, 4.28, 4.87, 1.55, 6.20, 1.08, 7.17, 28.65, 6.10, 16.16, 9.92, 2.40, 6.19. In a clinical trial of a new drug treatment 20 people had survival times of: 22.07, 12.47, 6.42, 8.15, 0.64, 20.04, 17.49, 2.22, 3.00. Is there any difference in survival times for those using the new drug? \\
$M_S$: Both examples come from the same exponential($\lambda$) distribution. \\
$M_B$: The first sample comes from exponential($\lambda_1$) and the second sample from exponential($\lambda_2$).
\end{example}
\begin{definition}
If the data come from $L(M_S)$, and $L(M_B)$ has $k$ more parameters than $L(M_S)$ then
\begin{align}
X^2 &= 2\log(R) \notag \\
&= 2\big(l(M_B) - l(M_S)\big) \notag \\
&\approx \chi^2(k \text{ degrees of freedom}) \notag
\end{align}
\end{definition}
The main steps for the likelihood ratio test are:
\begin{enumerate}[label=\textbf{\arabic*.}]
\item Work out maximum likelihood estimates of all unknown parameters in $M_S$.
\item Work out maximum likelihood estimates of all unknown parameters in $M_B$.
\item Evaluate the test statistic: $\chi^2 = 2\big(l(M_B) - l(M_S)\big)$
\item The degrees of freedom for the test are the difference between the numbers of unknown parameters in two models. The p-value for the test is the upper tail probability of the $\chi^2(k \text{ degrees of freedom})$ distribution given the test statistic.
\item Interpret the p-value: small values give evidence that the null hypothesis ($M_S$ model) does not hold.
\end{enumerate}
\begin{example}
There are a number of defective items on a production line in 20 days that follow \person{Poisson}($\lambda$) distribution: 1, 2, 3, 4, 2, 3, 2, 5, 5, 2, 4, 3, 5, 1, 2, 4, 0, 2, 2, 6.
\begin{center}
$\begin{array}{ccp{4cm}|p{7cm}}
&&null hypothesis & $H_0$: $\lambda = 2$ small model $M_S$ \\
\cline{3-4}
&&alternative hypothesis & $H_A$: $\lambda \neq 2$ big model $M_B$\\
\cline{3-4}
&&log-likelihood for the Poisson distribution & $l(\lambda) = \left(\sum_{i=1}^{20} x_i\right)\log(\lambda) - n\lambda$ \\
\cline{3-4}
\multirow{3.7}{3mm}{$M_B$}& \ldelim\{{3.5}{2mm} & MLE for the unknown parameter& $\hat{\lambda} = \frac{\sum x_i}{n} = 2.9$ \\ \cline{3-4}
& & Maximum possible value for the log-likelihood & $l(M_B) = 58\log(2.9) - 20\cdot 2.9 = 3.7532$ \\ \cline{3-4}
\multirow{3.7}{3mm}{$M_S$}& \ldelim\{{3.5}{2mm} & MLE for the unknown parameter& no unknown parameter \\ \cline{3-4}
& & Maximum possible value for the log-likelihood & $l(M_S) = 58\log(2) - 20\cdot 2 = 0.2025$ \\ \cline{3-4}
&&Likelihood ratio test & $\chi^2 = 2\big(l(M_B) - l(M_S)\big) = 7.101$ \\
\cline{3-4}
&&\multicolumn{2}{p{11cm}}{It should be compared to $\chi^2(1\text{ degree of freedom})$ since the difference in unknown parameters is equal to 1.} \\
\cline{3-4}
&&p-value & The p-value is 0.008 (the upper tail probability above 7.101) \\
\cline{3-4}
&&Interpreting p-value & The p-value is very small and we can conclude that there is strong evidence that $M_B$ fits the data better than $M_S$: $\lambda\neq 2$.
\end{array}$
\end{center}
\begin{center}
\begin{tikzpicture}[scale=0.9]
\begin{axis}[
xmin=0, xmax=10, xlabel=$x$,
ymin=0, ymax=1, ylabel=$y$,
samples=50,
axis y line=middle,
axis x line=middle,
domain=0:10,
restrict y to domain=0:1,
]
\addplot[name path=f,blue] {exp(-x/2)/(sqrt(2*pi) * sqrt(x))};
\path[name path=axis] (axis cs:7.101,0) -- (axis cs:10,0);
\addplot [thick,color=blue,fill=blue,fill opacity=0.3] fill between[of=f and axis,soft clip={domain=7.101:10},];
\draw [dotted] (axis cs:7.101,0) -- (axis cs:7.101,0.6);
\node at (axis cs:8.5,0.4) (a) {p-value};
\draw (axis cs:8.5, 0.36) -- (axis cs: 7.5,0.0002);
\end{axis}
\end{tikzpicture}
\begin{tikzpicture}[scale=0.9]
\begin{axis}[
xmin=6, xmax=10, xlabel=$x$,
ymin=0, ymax=0.01, ylabel=$y$,
samples=50,
axis y line=middle,
axis x line=middle,
domain=0:10,
restrict y to domain=0:0.01,
]
\addplot[name path=f,blue] {exp(-x/2)/(sqrt(2*pi) * sqrt(x))};
\path[name path=axis] (axis cs:7.101,0) -- (axis cs:10,0);
\addplot [thick,color=blue,fill=blue,fill opacity=0.3] fill between[of=f and axis,soft clip={domain=7.101:10},];
\draw [dotted] (axis cs:7.101,0) -- (axis cs:7.101,0.006);
\node at (axis cs:8.5,0.006) (a) {p-value};
\draw (axis cs:8.5, 0.0056) -- (axis cs: 7.5,0.002);
\end{axis}
\end{tikzpicture}
\end{center}
\end{example}
A \begriff{two-sample t-test} should be used to compare group means when you have independent samples. A \begriff{paired t-test} is needed when each sampled item in one group is associated with an item sampled from the other group.
\subsection{Two-sample t-test}
We can carry out a hypothesis test to verify if the two means are equal: \\
$H_0$: $\mu_1 = \mu_2$ \\
$H_A$: $\mu_1\neq\mu_2$ (The corresponding one-tailed alternative also holds.)
\begin{definition}
If $\bar{x_1}$ and $\bar{x_2}$ come from Normal($\mu_1,\sigma$) and Normal($\mu_2,\sigma$) with sample sizes $n_1$ and $n_2$ then
\begin{align}
T = \frac{\bar{x_1}-\bar{x_2}}{\SE(\bar{x_1}-\bar{x_2})} \approx t(n_1+n_2-2\text{ degrees of freedom})\notag
\end{align}
provided $\mu_1=\mu_2$. For relatively large sample sizes (Central Limit Theorem) we can use Z-test instead of t-test.
\end{definition}
\begin{example}
A botanist is interested in comparing the growth response of dwarf pea stems to two different levels of the hormone indoleacetic acid (IAA). The botanist measured the growth of pea stem segments in millimetres for $0.5\cdot 10^{-4}$ IAA level: 0.8, 1.8, 1.0, 0.1, 0.9, 1.7, 1.0, 1.4, 0.9, 1.2, 0.5 and for $10^{-4}$ IAA level: 1.0, 1.8, 0.8, 2.5, 1.6, 1.4, 2.6, 1.9, 1.3, 2.0, 1.1, 1.2. Test whether the larger hormone concentration results in greater growth of the pea plants.
\begin{center}
\begin{tabular}{p{4cm}|p{7cm}}
independent samples & $n_x = 11$, $n_y=12$ \\
\hline
Null hypothesis & $H_0$: $\mu_x=\mu_y$ \\
\hline
Alternative hypothesis & $H_A$: $\mu_x < \mu_y$ \\
\hline
The \begriff{pooled estimate} assumes that the variance is the same in both groups & $s^2 = \frac{10s_x^2 + 11s_y^2}{21} = 0.2896$ \\
\hline
test statistic & $t=\frac{1.027-1.6}{\sqrt{0.2896(\frac{1}{11} + \frac{1}{12})}} = -2.5496$ \\
\hline
p-value for 21 degrees of freedom in t-distribution & $P(t\le -2.5496) = 0.0093$ \\
\hline
Interpretation & There is very strong evidence that the mean growth of the peas is higher at the higher hormone concentration.
\end{tabular}
\end{center}
\begin{center}
\begin{tikzpicture}
\begin{axis}[
xmin=-3, xmax=3, xlabel=$x$,
ymin=0, ymax=1, ylabel=$y$,
samples=400,
axis y line=middle,
axis x line=middle,
domain=-3:3,
restrict y to domain=0:1,
]
\addplot[name path=f,blue] {(1.38087*10^(14))/((x^2+21)^(11))};
\path[name path=axis] (axis cs:-3,0) -- (axis cs:-2.5496,0);
\addplot [thick,color=blue,fill=blue,fill opacity=0.3] fill between[of=f and axis,soft clip={domain=-3:-2.5496},];
\draw [dotted] (axis cs:-2.5496,0) -- (axis cs:-2.5496,0.6);
\node at (axis cs:-1.7,0.3) (a) {p-value};
\draw (axis cs:-1.7, 0.26) -- (axis cs: -2.7,0.01);
\end{axis}
\end{tikzpicture}
\end{center}
\end{example}
\begin{*anmerkung}[pooled variance]
In statistics, pooled variance (also known as combined, composite, or overall variance) is a method for estimating variance of several different populations when the mean of each population may be different, but one may assume that the variance of each population is the same. The numerical estimate resulting from the use of this method is also called the pooled variance.
Under the assumption of equal population variances, the pooled sample variance provides a higher precision estimate of variance than the individual sample variances. This higher precision can lead to increased statistical power when used in statistical tests that compare the populations, such as the t-test.
\begin{align}
s^2 = \frac{\sum_{i=1}^k (n_i-1)s_i^2}{\sum_{i=1}^k (n_i-1)} \notag
\end{align}
Adapted from \url{https://en.wikipedia.org/wiki/Pooled_variance}.
\end{*anmerkung}
\begin{example}[from MATLAB session]
When you sing in to your Facebook account, you are granted access to more than 1 million relying party (RP) websites. RP websites were categorized as server-flow or client-flow websites. Of the 40 server-flow sites studied, 20 were found to be vulnerable to impersonation attacks. Of the 54 client-flow sites, 41 were found to be vulnerable to impersonation attacks. Do these results indicate that a client-flow website is more likely to be vulnerable to an attack than a server-flow website? Test using $\alpha = 0.01$.
\begin{center}
\begin{tabular}{p{4cm}|p{7cm}}
Null hypothesis & $H_0$: $p_{server}=p_{client}\Rightarrow \frac{20}{40}=\frac{41}{54}$ \\
\hline
Alternative hypothesis & $H_A$: $p_{server} < p_{client}$ \\
\hline
pooled sample proportion & $p = \frac{40\cdot\frac{20}{40} + 54\cdot\frac{41}{54}}{40+54} = 0.6489$ \\
\hline
test statistic & $z=\frac{p_{client}-p_{server}}{\sqrt{0.6489(\frac{1}{40} + \frac{1}{54})}} = 2.6038$ \\
\hline
rejection region for $\alpha=0.01$ & \texttt{norminv(0.01)} = 2.3268 \\
\hline
Interpretation & $z$ lies in the rejection region so $H_0$ is rejected.
\end{tabular}
\end{center}
\begin{center}
\begin{tikzpicture}
\begin{axis}[
xmin=-4, xmax=4, xlabel=$x$,
ymin=0, ymax=1, ylabel=$y$,
samples=400,
axis y line=middle,
axis x line=middle,
domain=-4:4,
restrict y to domain=0:1,
width = 16cm,
height = 8cm,
]
\addplot[name path=f,blue] {1/(sqrt(2*pi)) * exp(-x^2/2)};
\path[name path=axis] (axis cs:2.3263,0) -- (axis cs:4,0);
\draw (axis cs:2.3263,0) -- (axis cs:2.3263,1);
\draw [dotted] (axis cs:2.6038,0) -- (axis cs:2.6038,0.6);
\node at (axis cs:1.7,0.5) (a) {0.01};
\draw (axis cs:1.7, 0.46) -- (axis cs: 2.45,0.01);
\node at (axis cs: 2.7,0.95) (b) {2.3263};
\node at (axis cs: 2.95,0.55) (c) {2.6038};
\node[red] at (axis cs: 3.2,0.78) (d) {rejection region};
\begin{scope}[transparency group]
\begin{scope}[blend mode=multiply]
\addplot [thick,color=blue,fill=blue,fill opacity=0.3] fill between[of=f and axis,soft clip={domain=2.3263:4},];
\draw[red,fill=red,opacity=0.2] (axis cs: 2.3263,0) -- (axis cs: 2.3263,1) -- (axis cs: 4,1) -- (axis cs: 4,0) -- (axis cs: 2.3263,0);
\end{scope}
\end{scope}
\end{axis}
\end{tikzpicture}
\end{center}
\end{example}
\subsection{Paired t-test}
Testing whether two paired measurements $X$ and $Y$ have equal means is done in terms of the difference $D=Y-X$. The hypothesis \\
$H_0$: $\mu_x = \mu_y$ \\
$H_A$: $\mu_x\neq \mu_y$ \\
can be re-written as \\
$H_0$: $\mu_d=0$ \\
$H_A$: $\mu_d\neq 0$. \\
This can reduce the paired data set to a univariate data set of differences. The hypothesis can be assigned using t-test:
\begin{align}
t = \frac{\bar{d}-0}{\frac{s_d}{\sqrt{n}}} \notag
\end{align}
Z-test can be used for relatively large sample sizes.
\begin{example}
A researcher studying congenital heard disease wants to compare the development of cyanotic children with normal children. Among the measurement of interest is the age at which the children speak their first word.
\begin{center}
\begin{tabular}{c|cc|c}
\textbf{pair of siblings} & \textbf{cyanotic sibling} & \textbf{normal sibling} & \textbf{difference} \\
\hline
1 & 11.8 & 9.8 & 2.0 \\
2 & 20.8 & 16.5 & 4.3 \\
3 & 14.5 & 14.5 & 0.0 \\
4 & 9.5 & 15.2 & -5.7 \\
5 & 13.5 & 11.8 & 1.7 \\
6 & 22.6 & 12.2 & 10.4 \\
7 & 11.1 & 15.2 & -4.1 \\
8 & 14.9 & 15.6 & -0.7 \\
9 & 16.5 & 17.2 & -0.7 \\
10 & 16.5 & 10.5 & 6.0 \\
\end{tabular}
\end{center}
The researcher wants to test whether cyanotic children speak their first word later on average than children without the disease.
\begin{center}
\begin{tabular}{p{4cm}|p{7cm}}
Null hypothesis & $H_0$: $\mu_d = 0$ \\
\hline
Alternative hypothesis & $H_A$: $\mu_d > 0$ \\
\hline
test statistic & $t = \frac{\bar{d}-0}{\frac{s_d}{\sqrt{n}}} = 0.8802$ \\
\hline
Interpretation & The p-value is well above zero (0.1997), so there is no evidence that the cyanotic children learn to speak later.
\end{tabular}
\end{center}
\begin{center}
\begin{tikzpicture}
\begin{axis}[
xmin=-3, xmax=3, xlabel=$x$,
ymin=0, ymax=1, ylabel=$y$,
samples=400,
axis y line=middle,
axis x line=middle,
domain=-3:3,
restrict y to domain=0:1,
]
\addplot[name path=f,blue] {(984375)/(8*(x^2+10)^(5.5))};
\path[name path=axis] (axis cs:0.8802,0) -- (axis cs:3,0);
\addplot [thick,color=blue,fill=blue,fill opacity=0.3] fill between[of=f and axis,soft clip={domain=0.8802:3},];
\draw [dotted] (axis cs:0.8802,0) -- (axis cs:0.8802,0.6);
\node at (axis cs:1.7,0.4) (a) {p-value};
\draw (axis cs:1.7, 0.36) -- (axis cs: 1.2,0.08);
\end{axis}
\end{tikzpicture}
\end{center}
\end{example}
\begin{example}
The blood pressure of 15 college-aged woman was measured before starting to take the pill and after 6 months of use.
\begin{center}
\begin{tabular}{c|cc}
& \multicolumn{2}{c}{\textbf{blood pressure}} \\
\textbf{subject} & \textbf{before pill} & \textbf{after pill} \\
\hline
1 & 70 & 68 \\
2 & 80 & 72 \\
3 & 72 & 62 \\
4 & 76 & 70 \\
5 & 76 & 58 \\
6 & 76 & 66 \\
7 & 72 & 68 \\
8 & 78 & 52 \\
9 & 82 & 64 \\
10 & 64 & 72 \\
11 & 74 & 74 \\
12 & 92 & 60 \\
13 & 74 & 74 \\
14 & 68 & 72 \\
15 & 84 & 74 \\
\end{tabular}
\end{center}
A two-tailed test is used as the pill might either increase or decrease blood pressure.
\begin{center}
\begin{tabular}{p{4cm}|p{7cm}}
Null hypothesis & $H_0$: $\mu_d = 0$ \\
\hline
Alternative hypothesis & $H_A$: $\mu_d \neq 0$ \\
\hline
test statistic & $t = \frac{\bar{d}-0}{\frac{s_d}{\sqrt{n}}} = -3.1054$ \\
\hline
Interpretation & The p-value (0.0072) is very small that gives strong evidence that the blood pressure has changed. The negative t-value suggests that the blood pressure has decreased.
\end{tabular}
\end{center}
\begin{center}
\begin{tikzpicture}[scale=0.9]
\begin{axis}[
xmin=-4, xmax=4, xlabel=$x$,
ymin=0, ymax=1, ylabel=$y$,
samples=400,
axis y line=middle,
axis x line=middle,
domain=-4:4,
restrict y to domain=0:1,
]
\addplot[name path=f,blue] {(116640000000*sqrt(15))/(143*pi*(x^2+15)^(8))};
\path[name path=axis] (axis cs:-4,0) -- (axis cs:-3.1054,0);
\addplot [thick,color=blue,fill=blue,fill opacity=0.3] fill between[of=f and axis,soft clip={domain=-4:-3.1054},];
\draw [dotted] (axis cs:-3.1054,0) -- (axis cs:-3.1054,0.6);
\node at (axis cs:-2.2,0.4) (a) {p-value};
\draw (axis cs:-2.2, 0.36) -- (axis cs: -3.2,0.001);
\end{axis}
\end{tikzpicture}
\begin{tikzpicture}[scale=0.9]
\begin{axis}[
xmin=-4, xmax=-2, xlabel=$x$,
ymin=0, ymax=0.1, ylabel=$y$,
samples=400,
axis y line=middle,
axis x line=middle,
domain=-4:-2,
restrict y to domain=0:0.1,
]
\addplot[name path=f,blue] {(116640000000*sqrt(15))/(143*pi*(x^2+15)^(8))};
\path[name path=axis] (axis cs:-4,0) -- (axis cs:-3.1054,0);
\addplot [thick,color=blue,fill=blue,fill opacity=0.3] fill between[of=f and axis,soft clip={domain=-4:-3.1054},];
\draw [dotted] (axis cs:-3.1054,0) -- (axis cs:-3.1054,0.06);
\node at (axis cs:-3.6,0.05) (a) {p-value};
\draw (axis cs:-3.6, 0.046) -- (axis cs: -3.2,0.003);
\end{axis}
\end{tikzpicture}
\end{center}
\end{example}

View file

@ -0,0 +1,156 @@
\subsection{\person{Weibulls} Distribution - Graphs}
\begin{center}
\begin{tabular}{p{5cm}|p{5cm}|p{5cm}}
$shape = 0.5$ & $shape = 1$ & $shape = 2$ \\
\hline
\multicolumn{3}{c}{\cellcolor{gray!50}\textbf{Weibull PDF} ($scale = 1$)} \\
\hline
\begin{tikzpicture}[scale=0.6]
\begin{axis}[
xmin=0, xmax=2, xlabel=$x$,
ymin=0, ymax=1, ylabel=$y$,
samples=400,
axis y line=middle,
axis x line=middle,
]
\addplot+[mark=none] {0.5*x^(-0.5)*exp(-x^0.5)};
\end{axis}
\end{tikzpicture} &
\begin{tikzpicture}[scale=0.6]
\begin{axis}[
xmin=0, xmax=2, xlabel=$x$,
ymin=0, ymax=1, ylabel=$y$,
samples=400,
axis y line=middle,
axis x line=middle,
restrict y to domain=0:1,
]
\addplot+[mark=none] {1/exp(x)};
\end{axis}
\end{tikzpicture} &
\begin{tikzpicture}[scale=0.6]
\begin{axis}[
xmin=0, xmax=2, xlabel=$x$,
ymin=0, ymax=1, ylabel=$y$,
samples=400,
axis y line=middle,
axis x line=middle,
]
\addplot+[mark=none] {2*x^(1)*exp(-x^2)};
\end{axis}
\end{tikzpicture} \\
\hline
\multicolumn{3}{c}{\cellcolor{gray!50}\textbf{Weibull CDF} ($scale = 1$)} \\
\hline
\begin{tikzpicture}[scale=0.6]
\begin{axis}[
xmin=0, xmax=2, xlabel=$x$,
ymin=0, ymax=1, ylabel=$y$,
samples=400,
axis y line=middle,
axis x line=middle,
]
\addplot+[mark=none] {1-exp(-x^0.5)};
\end{axis}
\end{tikzpicture} &
\begin{tikzpicture}[scale=0.6]
\begin{axis}[
xmin=0, xmax=2, xlabel=$x$,
ymin=0, ymax=1, ylabel=$y$,
samples=400,
axis y line=middle,
axis x line=middle,
restrict y to domain=0:1,
]
\addplot+[mark=none] {1-1/exp(x)};
\end{axis}
\end{tikzpicture} &
\begin{tikzpicture}[scale=0.6]
\begin{axis}[
xmin=0, xmax=2, xlabel=$x$,
ymin=0, ymax=1, ylabel=$y$,
samples=400,
axis y line=middle,
axis x line=middle,
]
\addplot+[mark=none] {1-exp(-x^2)};
\end{axis}
\end{tikzpicture} \\
\hline
\multicolumn{3}{c}{\cellcolor{gray!50}\textbf{Hazard function} $\left(h = \frac{\text{PDF}}{1-\text{CDF}}\right)$} \\
\hline
\begin{tikzpicture}[scale=0.6]
\begin{axis}[
xmin=0, xmax=2, xlabel=$x$,
ymin=0, ymax=1, ylabel=$y$,
samples=400,
axis y line=middle,
axis x line=middle,
]
\addplot+[mark=none] {(0.5*x^(-0.5)*exp(-x^0.5))/(1-(1-exp(-x^0.5)))};
\end{axis}
\end{tikzpicture} &
\begin{tikzpicture}[scale=0.6]
\begin{axis}[
xmin=0, xmax=2, xlabel=$x$,
ymin=0, ymax=1, ylabel=$y$,
samples=400,
axis y line=middle,
axis x line=middle,
restrict y to domain=0:1,
]
\addplot+[mark=none] {(1/exp(x))/(1-(1-1/exp(x)))};
\draw[blue] (axis cs: 0,1) -- (axis cs: 2,1);
\end{axis}
\end{tikzpicture} &
\begin{tikzpicture}[scale=0.6]
\begin{axis}[
xmin=0, xmax=2, xlabel=$x$,
ymin=0, ymax=1, ylabel=$y$,
samples=400,
axis y line=middle,
axis x line=middle,
]
\addplot+[mark=none] {(2*x^(1)*exp(-x^2))/(1-(1-exp(-x^2)))};
\end{axis}
\end{tikzpicture} \\
\hline
\multicolumn{3}{c}{\cellcolor{gray!50}\textbf{Survival function} ($R = 1-\text{CDF}$)} \\
\hline
\begin{tikzpicture}[scale=0.6]
\begin{axis}[
xmin=0, xmax=2, xlabel=$x$,
ymin=0, ymax=1, ylabel=$y$,
samples=400,
axis y line=middle,
axis x line=middle,
]
\addplot+[mark=none] {1-(1-exp(-x^0.5))};
\end{axis}
\end{tikzpicture} &
\begin{tikzpicture}[scale=0.6]
\begin{axis}[
xmin=0, xmax=2, xlabel=$x$,
ymin=0, ymax=1, ylabel=$y$,
samples=400,
axis y line=middle,
axis x line=middle,
restrict y to domain=0:1,
]
\addplot+[mark=none] {1-(1-1/exp(x))};
\end{axis}
\end{tikzpicture} &
\begin{tikzpicture}[scale=0.6]
\begin{axis}[
xmin=0, xmax=2, xlabel=$x$,
ymin=0, ymax=1, ylabel=$y$,
samples=400,
axis y line=middle,
axis x line=middle,
]
\addplot+[mark=none] {1-(1-exp(-x^2))};
\end{axis}
\end{tikzpicture} \\
\end{tabular}
\end{center}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,114 @@
\begin{center}
\begin{tikzpicture}
\begin{axis}[
xmin=0, xmax=100, xlabel=sample size,
ymin=0, ymax=1, ylabel=power,
samples=401,
axis y line=middle,
axis x line=middle,
restrict y to domain=0:1,
]
\addplot[mark=x, blue] coordinates {
( 2,0.06)
( 3,0.07)
( 4,0.09)
( 5,0.11)
( 6,0.13)
( 7,0.15)
( 8,0.17)
( 9,0.19)
( 10,0.21)
( 11,0.22)
( 12,0.24)
( 13,0.26)
( 14,0.28)
( 15,0.30)
( 16,0.32)
( 17,0.34)
( 18,0.36)
( 19,0.38)
( 20,0.40)
( 21,0.42)
( 22,0.43)
( 23,0.45)
( 24,0.47)
( 25,0.48)
( 26,0.50)
( 27,0.52)
( 28,0.53)
( 29,0.55)
( 30,0.56)
( 31,0.58)
( 32,0.59)
( 33,0.61)
( 34,0.62)
( 35,0.63)
( 36,0.65)
( 37,0.66)
( 38,0.67)
( 39,0.68)
( 40,0.69)
( 41,0.71)
( 42,0.72)
( 43,0.73)
( 44,0.74)
( 45,0.75)
( 46,0.76)
( 47,0.77)
( 48,0.77)
( 49,0.78)
( 50,0.79)
( 51,0.80)
( 52,0.81)
( 53,0.82)
( 54,0.82)
( 55,0.83)
( 56,0.84)
( 57,0.84)
( 58,0.85)
( 59,0.86)
( 60,0.86)
( 61,0.87)
( 62,0.87)
( 63,0.88)
( 64,0.88)
( 65,0.89)
( 66,0.89)
( 67,0.90)
( 68,0.90)
( 69,0.91)
( 70,0.91)
( 71,0.91)
( 72,0.92)
( 73,0.92)
( 74,0.92)
( 75,0.93)
( 76,0.93)
( 77,0.93)
( 78,0.94)
( 79,0.94)
( 80,0.94)
( 81,0.94)
( 82,0.95)
( 83,0.95)
( 84,0.95)
( 85,0.95)
( 86,0.96)
( 87,0.96)
( 88,0.96)
( 89,0.96)
( 90,0.96)
( 91,0.97)
( 92,0.97)
( 93,0.97)
( 94,0.97)
( 95,0.97)
( 96,0.97)
( 97,0.97)
( 98,0.98)
( 99,0.98)
(100,0.98)
};
\end{axis}
\end{tikzpicture}
\end{center}

View file

@ -0,0 +1,96 @@
\begin{center}
\begin{tikzpicture}
\node at (0.00,0.25) (1) {$\times$};
\node at (0.00,0.50) (1) {$\times$};
\node at (0.00,0.75) (1) {$\times$};
\node at (0.20,0.25) (1) {$\times$};
\node at (0.80,0.25) (1) {$\times$};
\node at (0.80,0.50) (1) {$\times$};
\node at (0.80,0.75) (1) {$\times$};
\node at (1.00,0.25) (1) {$\times$};
\node at (1.00,0.50) (1) {$\times$};
\node at (1.00,0.75) (1) {$\times$};
\node at (1.20,0.25) (1) {$\times$};
\node at (1.20,0.50) (1) {$\times$};
\node at (1.20,0.75) (1) {$\times$};
\node at (1.20,1.00) (1) {$\times$};
\node at (1.20,1.25) (1) {$\times$};
\node at (1.40,0.25) (1) {$\times$};
\node at (1.60,0.25) (1) {$\times$};
\node at (1.60,0.50) (1) {$\times$};
\node at (1.60,0.75) (1) {$\times$};
\node at (1.60,1.00) (1) {$\times$};
\node at (1.80,0.25) (1) {$\times$};
\node at (1.80,0.50) (1) {$\times$};
\node at (1.80,0.75) (1) {$\times$};
\node at (2.00,0.25) (1) {$\times$};
\node at (2.00,0.50) (1) {$\times$};
\node at (2.20,0.25) (1) {$\times$};
\node at (2.20,0.50) (1) {$\times$};
\node at (2.40,0.25) (1) {$\times$};
\node at (2.40,0.50) (1) {$\times$};
\node at (2.40,0.75) (1) {$\times$};
\node at (2.60,0.25) (1) {$\times$};
\node at (2.60,0.50) (1) {$\times$};
\node at (2.60,0.75) (1) {$\times$};
\node at (2.60,1.00) (1) {$\times$};
\node at (2.80,0.25) (1) {$\times$};
\node at (2.80,0.50) (1) {$\times$};
\node at (3.00,0.25) (1) {$\times$};
\node at (3.00,0.50) (1) {$\times$};
\node at (3.00,0.75) (1) {$\times$};
\node at (3.20,0.25) (1) {$\times$};
\node at (3.20,0.50) (1) {$\times$};
\node at (3.20,0.75) (1) {$\times$};
\node at (3.40,0.25) (1) {$\times$};
\node at (3.40,0.50) (1) {$\times$};
\node at (3.40,0.75) (1) {$\times$};
\node at (3.40,1.00) (1) {$\times$};
\node at (3.40,1.25) (1) {$\times$};
\node at (3.40,1.50) (1) {$\times$};
\node at (3.60,0.25) (1) {$\times$};
\node at (3.80,0.25) (1) {$\times$};
\node at (3.80,0.50) (1) {$\times$};
\node at (4.00,0.25) (1) {$\times$};
\node at (4.00,0.50) (1) {$\times$};
\node at (4.20,0.25) (1) {$\times$};
\node at (4.20,0.50) (1) {$\times$};
\node at (4.40,0.25) (1) {$\times$};
\node at (4.40,0.50) (1) {$\times$};
\node at (4.40,0.75) (1) {$\times$};
\node at (4.40,1.00) (1) {$\times$};
\node at (4.60,0.25) (1) {$\times$};
\node at (4.60,0.50) (1) {$\times$};
\node at (5.40,0.25) (1) {$\times$};
\node at (5.60,0.25) (1) {$\times$};
\node at (5.60,0.50) (1) {$\times$};
\node at (5.60,0.75) (1) {$\times$};
\node at (6.40,0.25) (1) {$\times$};
\node at (6.60,0.25) (1) {$\times$};
\node at (6.60,0.50) (1) {$\times$};
\node at (6.60,0.75) (1) {$\times$};
\node at (6.60,1.00) (1) {$\times$};
\node at (7.00,0.25) (1) {$\times$};
\node at (7.00,0.50) (1) {$\times$};
\node at (7.00,0.75) (1) {$\times$};
\node at (7.00,1.00) (1) {$\times$};
\draw (0,0.05) -- (8,0.05);
\draw (0,0.05) -- (0,-0.05);
\draw (2,0.05) -- (2,-0.05);
\draw (4,0.05) -- (4,-0.05);
\draw (6,0.05) -- (6,-0.05);
\draw (8,0.05) -- (8,-0.05);
\node at (0,-0.35) (0) {0};
\node at (2,-0.35) (0) {100};
\node at (4,-0.35) (0) {200};
\node at (6,-0.35) (0) {300};
\node at (8,-0.35) (0) {400};
\draw[gray,dotted] (3.812,0.05) -- (3.812,2.5);
\node[gray] at (3.812,2.8) (up) {upper quartile = 190.6};
\draw[red,dotted] (4.4,0.05) -- (4.4,2);
\node[red] at (4.4,2.3) (up) {upper quartile = 220};
\end{tikzpicture}
\end{center}

View file

@ -0,0 +1,96 @@
\begin{center}
\begin{tikzpicture}
\node at (0.00,0.25) (1) {$\times$};
\node at (0.20,0.25) (1) {$\times$};
\node at (0.20,0.50) (1) {$\times$};
\node at (0.20,0.75) (1) {$\times$};
\node at (0.80,0.25) (1) {$\times$};
\node at (0.80,0.50) (1) {$\times$};
\node at (0.80,0.75) (1) {$\times$};
\node at (1.00,0.25) (1) {$\times$};
\node at (1.20,0.25) (1) {$\times$};
\node at (1.20,0.50) (1) {$\times$};
\node at (1.20,0.75) (1) {$\times$};
\node at (1.20,1.00) (1) {$\times$};
\node at (1.20,1.25) (1) {$\times$};
\node at (1.40,0.25) (1) {$\times$};
\node at (1.40,0.50) (1) {$\times$};
\node at (1.40,0.75) (1) {$\times$};
\node at (1.40,1.00) (1) {$\times$};
\node at (1.60,0.25) (1) {$\times$};
\node at (1.60,0.50) (1) {$\times$};
\node at (1.60,0.75) (1) {$\times$};
\node at (1.60,1.00) (1) {$\times$};
\node at (1.80,0.25) (1) {$\times$};
\node at (2.00,0.25) (1) {$\times$};
\node at (2.00,0.50) (1) {$\times$};
\node at (2.00,0.75) (1) {$\times$};
\node at (2.00,1.00) (1) {$\times$};
\node at (2.00,1.25) (1) {$\times$};
\node at (2.20,0.25) (1) {$\times$};
\node at (2.20,0.50) (1) {$\times$};
\node at (2.20,0.75) (1) {$\times$};
\node at (2.20,1.00) (1) {$\times$};
\node at (2.20,1.25) (1) {$\times$};
\node at (2.20,1.50) (1) {$\times$};
\node at (2.40,0.25) (1) {$\times$};
\node at (2.40,0.50) (1) {$\times$};
\node at (2.40,0.75) (1) {$\times$};
\node at (2.40,1.00) (1) {$\times$};
\node at (2.40,1.25) (1) {$\times$};
\node at (2.60,0.25) (1) {$\times$};
\node at (2.60,0.50) (1) {$\times$};
\node at (2.60,0.75) (1) {$\times$};
\node at (2.60,1.00) (1) {$\times$};
\node at (2.60,1.25) (1) {$\times$};
\node at (2.60,1.50) (1) {$\times$};
\node at (2.60,1.75) (1) {$\times$};
\node at (2.80,0.25) (1) {$\times$};
\node at (2.80,0.50) (1) {$\times$};
\node at (2.80,0.75) (1) {$\times$};
\node at (3.00,0.25) (1) {$\times$};
\node at (3.00,0.50) (1) {$\times$};
\node at (3.00,0.75) (1) {$\times$};
\node at (3.00,1.00) (1) {$\times$};
\node at (3.00,1.25) (1) {$\times$};
\node at (3.20,0.25) (1) {$\times$};
\node at (3.40,0.25) (1) {$\times$};
\node at (3.60,0.25) (1) {$\times$};
\node at (3.80,0.25) (1) {$\times$};
\node at (4.20,0.25) (1) {$\times$};
\node at (4.20,0.50) (1) {$\times$};
\node at (4.20,0.75) (1) {$\times$};
\node at (4.20,1.00) (1) {$\times$};
\node at (4.40,0.25) (1) {$\times$};
\node at (4.40,0.50) (1) {$\times$};
\node at (4.40,0.75) (1) {$\times$};
\node at (4.60,0.25) (1) {$\times$};
\node at (4.80,0.25) (1) {$\times$};
\node at (5.60,0.25) (1) {$\times$};
\node at (5.60,0.50) (1) {$\times$};
\node at (6.00,0.25) (1) {$\times$};
\node at (6.00,0.50) (1) {$\times$};
\node at (6.40,0.25) (1) {$\times$};
\node at (6.40,0.50) (1) {$\times$};
\node at (6.40,0.75) (1) {$\times$};
\node at (6.60,0.25) (1) {$\times$};
\draw (0,0.05) -- (8,0.05);
\draw (0,0.05) -- (0,-0.05);
\draw (2,0.05) -- (2,-0.05);
\draw (4,0.05) -- (4,-0.05);
\draw (6,0.05) -- (6,-0.05);
\draw (8,0.05) -- (8,-0.05);
\node at (0,-0.35) (0) {0};
\node at (2,-0.35) (0) {100};
\node at (4,-0.35) (0) {200};
\node at (6,-0.35) (0) {300};
\node at (8,-0.35) (0) {400};
\draw[gray,dotted] (3.812,0.05) -- (3.812,2.5);
\node[gray] at (3.812,2.8) (up) {upper quartile = 190.6};
\draw[red,dotted] (3.6,0.05) -- (3.6,2);
\node[red] at (3.6,2.3) (up) {upper quartile = 180};
\end{tikzpicture}
\end{center}

View file

@ -0,0 +1,94 @@
\begin{center}
\begin{tikzpicture}
\node at (0.00,0.25) (1) {$\times$};
\node at (0.00,0.50) (1) {$\times$};
\node at (0.00,0.75) (1) {$\times$};
\node at (0.20,0.25) (1) {$\times$};
\node at (0.20,0.50) (1) {$\times$};
\node at (0.40,0.25) (1) {$\times$};
\node at (0.80,0.25) (1) {$\times$};
\node at (0.80,0.50) (1) {$\times$};
\node at (0.80,0.75) (1) {$\times$};
\node at (1.00,0.25) (1) {$\times$};
\node at (1.00,0.50) (1) {$\times$};
\node at (1.20,0.25) (1) {$\times$};
\node at (1.20,0.50) (1) {$\times$};
\node at (1.20,0.75) (1) {$\times$};
\node at (1.40,0.25) (1) {$\times$};
\node at (1.40,0.50) (1) {$\times$};
\node at (1.40,0.75) (1) {$\times$};
\node at (1.60,0.25) (1) {$\times$};
\node at (1.60,0.50) (1) {$\times$};
\node at (1.60,0.75) (1) {$\times$};
\node at (1.80,0.25) (1) {$\times$};
\node at (1.80,0.50) (1) {$\times$};
\node at (2.00,0.25) (1) {$\times$};
\node at (2.00,0.50) (1) {$\times$};
\node at (2.00,0.75) (1) {$\times$};
\node at (2.00,1.00) (1) {$\times$};
\node at (2.20,0.25) (1) {$\times$};
\node at (2.20,0.50) (1) {$\times$};
\node at (2.20,0.75) (1) {$\times$};
\node at (2.20,1.00) (1) {$\times$};
\node at (2.40,0.25) (1) {$\times$};
\node at (2.40,0.50) (1) {$\times$};
\node at (2.40,0.75) (1) {$\times$};
\node at (2.40,1.00) (1) {$\times$};
\node at (2.60,0.25) (1) {$\times$};
\node at (2.60,0.50) (1) {$\times$};
\node at (2.60,0.75) (1) {$\times$};
\node at (2.60,1.00) (1) {$\times$};
\node at (2.60,1.25) (1) {$\times$};
\node at (2.60,1.50) (1) {$\times$};
\node at (2.80,0.25) (1) {$\times$};
\node at (2.80,0.50) (1) {$\times$};
\node at (2.80,0.75) (1) {$\times$};
\node at (3.00,0.25) (1) {$\times$};
\node at (3.00,0.50) (1) {$\times$};
\node at (3.00,0.75) (1) {$\times$};
\node at (3.00,1.00) (1) {$\times$};
\node at (3.00,1.25) (1) {$\times$};
\node at (3.20,0.25) (1) {$\times$};
\node at (3.20,0.50) (1) {$\times$};
\node at (3.40,0.25) (1) {$\times$};
\node at (3.40,0.50) (1) {$\times$};
\node at (3.40,0.75) (1) {$\times$};
\node at (3.60,0.25) (1) {$\times$};
\node at (3.60,0.50) (1) {$\times$};
\node at (3.80,0.25) (1) {$\times$};
\node at (4.00,0.25) (1) {$\times$};
\node at (4.20,0.25) (1) {$\times$};
\node at (4.20,0.50) (1) {$\times$};
\node at (4.40,0.25) (1) {$\times$};
\node at (4.40,0.50) (1) {$\times$};
\node at (4.60,0.25) (1) {$\times$};
\node at (4.60,0.50) (1) {$\times$};
\node at (4.80,0.25) (1) {$\times$};
\node at (4.80,0.50) (1) {$\times$};
\node at (5.40,0.25) (1) {$\times$};
\node at (5.60,0.25) (1) {$\times$};
\node at (5.60,0.50) (1) {$\times$};
\node at (6.00,0.25) (1) {$\times$};
\node at (6.40,0.25) (1) {$\times$};
\node at (6.60,0.25) (1) {$\times$};
\node at (7.00,0.25) (1) {$\times$};
\node at (7.40,0.25) (1) {$\times$};
\node at (7.80,0.25) (1) {$\times$};
\draw (0,0.05) -- (8,0.05);
\draw (0,0.05) -- (0,-0.05);
\draw (2,0.05) -- (2,-0.05);
\draw (4,0.05) -- (4,-0.05);
\draw (6,0.05) -- (6,-0.05);
\draw (8,0.05) -- (8,-0.05);
\node at (0,-0.35) (0) {0};
\node at (2,-0.35) (0) {100};
\node at (4,-0.35) (0) {200};
\node at (6,-0.35) (0) {300};
\node at (8,-0.35) (0) {400};
\draw[red,dotted] (3.812,0.05) -- (3.812,2);
\node[red] at (3.812,2.3) (up) {upper quartile = 190.6};
\end{tikzpicture}
\end{center}

View file

@ -0,0 +1,222 @@
\begin{center}
\begin{tikzpicture}[scale=0.6]
\node at (525.23,0.09) (1) {$\times$};
\node at (521.10,0.67) (2) {$\times$};
\node at (519.55,1.00) (3) {$\times$};
\node at (518.37,0.28) (4) {$\times$};
\node at (522.27,0.13) (5) {$\times$};
\node at (522.26,0.55) (6) {$\times$};
\node at (509.43,0.93) (7) {$\times$};
\node at (518.28,0.02) (8) {$\times$};
\node at (522.19,0.74) (9) {$\times$};
\node at (520.17,0.75) (10) {$\times$};
\node at (520.22,0.13) (11) {$\times$};
\node at (519.52,0.37) (12) {$\times$};
\node at (519.65,0.88) (13) {$\times$};
\node at (519.50,0.98) (14) {$\times$};
\node at (515.93,0.28) (15) {$\times$};
\node at (517.45,0.18) (16) {$\times$};
\node at (524.65,0.49) (17) {$\times$};
\node at (520.52,0.54) (18) {$\times$};
\node at (517.60,0.85) (19) {$\times$};
\node at (524.15,0.30) (20) {$\times$};
\node at (521.90,0.73) (21) {$\times$};
\node at (520.34,0.99) (22) {$\times$};
\node at (521.04,0.82) (23) {$\times$};
\node at (525.24,0.06) (24) {$\times$};
\node at (513.66,0.70) (25) {$\times$};
\node at (518.39,0.02) (26) {$\times$};
\node at (520.46,0.76) (27) {$\times$};
\node at (515.86,0.82) (28) {$\times$};
\node at (515.90,0.32) (29) {$\times$};
\node at (519.11,0.31) (30) {$\times$};
\node at (518.38,0.54) (31) {$\times$};
\node at (514.55,0.65) (32) {$\times$};
\node at (524.35,0.63) (33) {$\times$};
\node at (520.66,0.26) (34) {$\times$};
\node at (522.26,0.72) (35) {$\times$};
\node at (519.90,0.94) (36) {$\times$};
\node at (521.55,0.99) (37) {$\times$};
\node at (524.03,0.25) (38) {$\times$};
\node at (521.99,0.07) (39) {$\times$};
\node at (513.94,0.18) (40) {$\times$};
\node at (521.20,0.28) (41) {$\times$};
\node at (524.44,0.97) (42) {$\times$};
\node at (518.39,0.22) (43) {$\times$};
\node at (518.56,0.89) (44) {$\times$};
\node at (518.95,0.69) (45) {$\times$};
\node at (522.26,0.71) (46) {$\times$};
\node at (530.34,0.84) (47) {$\times$};
\node at (516.74,0.13) (48) {$\times$};
\node at (520.68,0.54) (49) {$\times$};
\node at (521.57,0.33) (50) {$\times$};
\node at (523.36,0.93) (51) {$\times$};
\node at (518.58,0.86) (52) {$\times$};
\node at (519.83,0.28) (53) {$\times$};
\node at (519.77,0.45) (54) {$\times$};
\node at (517.39,0.67) (55) {$\times$};
\node at (521.89,0.97) (56) {$\times$};
\node at (514.41,0.56) (57) {$\times$};
\node at (525.90,0.97) (58) {$\times$};
\node at (521.62,0.59) (59) {$\times$};
\node at (523.65,0.93) (60) {$\times$};
\node at (521.68,0.33) (61) {$\times$};
\node at (522.09,0.41) (62) {$\times$};
\node at (519.02,0.88) (63) {$\times$};
\node at (521.95,0.50) (64) {$\times$};
\node at (514.71,0.88) (65) {$\times$};
\node at (521.41,0.79) (66) {$\times$};
\node at (523.43,0.37) (67) {$\times$};
\node at (521.81,0.53) (68) {$\times$};
\node at (521.29,0.34) (69) {$\times$};
\node at (523.50,0.03) (70) {$\times$};
\node at (520.85,0.76) (71) {$\times$};
\node at (522.87,0.82) (72) {$\times$};
\node at (522.07,0.86) (73) {$\times$};
\node at (521.25,0.48) (74) {$\times$};
\node at (521.78,0.28) (75) {$\times$};
\node at (519.72,0.42) (76) {$\times$};
\node at (522.24,0.22) (77) {$\times$};
\node at (515.84,0.61) (78) {$\times$};
\node at (522.90,0.17) (79) {$\times$};
\node at (515.03,0.03) (80) {$\times$};
\node at (520.84,0.59) (81) {$\times$};
\node at (523.55,0.63) (82) {$\times$};
\node at (517.79,0.24) (83) {$\times$};
\node at (520.65,0.54) (84) {$\times$};
\node at (518.35,0.22) (85) {$\times$};
\node at (522.88,0.22) (86) {$\times$};
\node at (514.12,0.65) (87) {$\times$};
\node at (517.22,0.96) (88) {$\times$};
\node at (515.34,0.88) (89) {$\times$};
\node at (511.53,0.42) (90) {$\times$};
\node at (518.22,0.28) (91) {$\times$};
\node at (519.59,0.71) (92) {$\times$};
\node at (522.17,0.31) (93) {$\times$};
\node at (524.46,0.87) (94) {$\times$};
\node at (520.20,0.60) (95) {$\times$};
\node at (518.04,0.86) (96) {$\times$};
\node at (525.25,0.18) (97) {$\times$};
\node at (518.30,0.90) (98) {$\times$};
\node at (517.44,0.16) (99) {$\times$};
\node at (518.04,0.82) (100) {$\times$};
\node at (516.83,0.81) (101) {$\times$};
\node at (522.26,0.70) (102) {$\times$};
\node at (520.81,0.65) (103) {$\times$};
\node at (520.61,0.85) (104) {$\times$};
\node at (522.54,0.95) (105) {$\times$};
\node at (522.07,0.05) (106) {$\times$};
\node at (515.14,0.28) (107) {$\times$};
\node at (521.14,0.04) (108) {$\times$};
\node at (516.08,0.61) (109) {$\times$};
\node at (516.57,0.10) (110) {$\times$};
\node at (525.45,0.59) (111) {$\times$};
\node at (521.47,0.68) (112) {$\times$};
\node at (516.13,0.21) (113) {$\times$};
\node at (518.00,0.16) (114) {$\times$};
\node at (524.16,0.84) (115) {$\times$};
\node at (518.87,0.34) (116) {$\times$};
\node at (515.00,0.88) (117) {$\times$};
\node at (519.75,0.81) (118) {$\times$};
\node at (520.41,0.80) (119) {$\times$};
\node at (522.23,0.39) (120) {$\times$};
\node at (523.63,0.61) (121) {$\times$};
\node at (526.56,0.94) (122) {$\times$};
\node at (521.32,0.58) (123) {$\times$};
\node at (526.08,0.43) (124) {$\times$};
\node at (513.60,0.42) (125) {$\times$};
\node at (520.35,0.19) (126) {$\times$};
\node at (517.73,0.75) (127) {$\times$};
\node at (517.23,0.29) (128) {$\times$};
\node at (517.98,0.38) (129) {$\times$};
\node at (515.38,0.08) (130) {$\times$};
\node at (516.11,0.61) (131) {$\times$};
\node at (513.18,0.06) (132) {$\times$};
\node at (519.81,0.87) (133) {$\times$};
\node at (517.20,0.24) (134) {$\times$};
\node at (520.76,0.62) (135) {$\times$};
\node at (516.53,0.50) (136) {$\times$};
\node at (514.76,0.03) (137) {$\times$};
\node at (513.52,0.68) (138) {$\times$};
\node at (521.10,0.29) (139) {$\times$};
\node at (511.91,0.05) (140) {$\times$};
\node at (519.47,0.18) (141) {$\times$};
\node at (522.84,0.43) (142) {$\times$};
\node at (527.22,0.61) (143) {$\times$};
\node at (525.62,0.44) (144) {$\times$};
\node at (514.56,0.79) (145) {$\times$};
\node at (515.91,0.13) (146) {$\times$};
\node at (521.01,0.26) (147) {$\times$};
\node at (523.44,0.20) (148) {$\times$};
\node at (517.98,0.32) (149) {$\times$};
\node at (515.70,0.45) (150) {$\times$};
\node at (524.60,0.39) (151) {$\times$};
\node at (518.86,0.16) (152) {$\times$};
\node at (518.55,0.09) (153) {$\times$};
\node at (521.11,0.97) (154) {$\times$};
\node at (518.76,0.24) (155) {$\times$};
\node at (519.29,0.70) (156) {$\times$};
\node at (518.37,0.98) (157) {$\times$};
\node at (519.79,0.24) (158) {$\times$};
\node at (520.72,0.65) (159) {$\times$};
\node at (520.76,0.60) (160) {$\times$};
\node at (523.01,0.13) (161) {$\times$};
\node at (523.41,0.12) (162) {$\times$};
\node at (524.07,0.01) (163) {$\times$};
\node at (522.68,0.15) (164) {$\times$};
\node at (521.71,0.15) (165) {$\times$};
\node at (516.41,0.22) (166) {$\times$};
\node at (523.11,0.88) (167) {$\times$};
\node at (523.43,0.18) (168) {$\times$};
\node at (525.66,0.25) (169) {$\times$};
\node at (522.78,0.97) (170) {$\times$};
\node at (513.52,0.37) (171) {$\times$};
\node at (519.57,0.75) (172) {$\times$};
\node at (522.67,0.13) (173) {$\times$};
\node at (520.85,0.69) (174) {$\times$};
\node at (523.93,0.07) (175) {$\times$};
\node at (519.32,0.55) (176) {$\times$};
\node at (520.67,0.29) (177) {$\times$};
\node at (515.65,0.93) (178) {$\times$};
\node at (522.89,0.03) (179) {$\times$};
\node at (519.17,0.96) (180) {$\times$};
\node at (518.00,0.58) (181) {$\times$};
\node at (517.23,0.18) (182) {$\times$};
\node at (526.78,0.05) (183) {$\times$};
\node at (518.66,0.18) (184) {$\times$};
\node at (520.97,0.80) (185) {$\times$};
\node at (516.08,0.67) (186) {$\times$};
\node at (516.97,0.25) (187) {$\times$};
\node at (520.35,0.12) (188) {$\times$};
\node at (516.30,0.90) (189) {$\times$};
\node at (514.98,0.99) (190) {$\times$};
\node at (524.01,0.05) (191) {$\times$};
\node at (517.33,0.60) (192) {$\times$};
\node at (519.02,0.93) (193) {$\times$};
\node at (516.61,0.27) (194) {$\times$};
\node at (525.17,0.87) (195) {$\times$};
\node at (516.32,0.91) (196) {$\times$};
\node at (525.43,0.24) (197) {$\times$};
\node at (522.81,0.33) (198) {$\times$};
\node at (519.42,0.50) (199) {$\times$};
\node at (521.66,0.51) (200) {$\times$};
\draw (508,-0.2) -- (532,-0.2);
\draw (510,-0.1) -- (510,-0.3);
\node at (510,-0.6) (510) {510};
\draw (515,-0.1) -- (515,-0.3);
\node at (515,-0.6) (515) {515};
\draw (520,-0.1) -- (520,-0.3);
\node at (520,-0.6) (520) {520};
\draw (525,-0.1) -- (525,-0.3);
\node at (525,-0.6) (525) {525};
\draw (530,-0.1) -- (530,-0.3);
\node at (530,-0.6) (530) {530};
\draw[red,dotted] (511,1.2) -- (511,-0.2);
\draw[red,dotted] (529,1.2) -- (529,-0.2);
\node at (530,-1.2) (axis) {Means of sample};
\node at (520,2) (top) {\textbf{Means of samples of $n=10$ values from normal ($\mu=520$, $\sigma=10$)}};
\end{tikzpicture}
\end{center}

View file

@ -0,0 +1,193 @@
\ProvidesPackage{mathoperators}
\RequirePackage{xparse}
\RequirePackage{xkeyval}
\RequirePackage{xstring}
%\OptFractionAppearence - differenciate between multiple appearences of fraction
% - 0: simple slash: a/b (option slash)(default)
% - 1: fraction by slash, raised nominator, lowered denominator (option lowerraise)
\let\OptFractionAppearence\z@
\DeclareOptionX{fractionappearence}{%
\IfStrEq{#1}{slash}{}{%
\IfStrEq{#1}{lowerraise}{%
\let\OptFractionAppearence\@ne
}{}%further options can be inserted here
}
}
\DeclareOptionX*{\PackageWarning{mathoperator}{Unknown option >\CurrentOption<}}
\ProcessOptionsX
%Provides some common math related commands and operators
\RequirePackage{amsmath}
\RequirePackage{amssymb}
\RequirePackage{amsfonts}
\RequirePackage{scalerel,stackengine}
\RequirePackage{calc}
%transpose
\newcommand\transpose[1]{\ensuremath{#1^\mathsf{T}}}
%new macro for "equals" ^=
\newcommand\equalhat{\mathrel{\stackon[1.5pt]{=}{\stretchto{%
\scalerel*[\widthof{=}]{\wedge}{\rule{1ex}{3ex}}}{0.5ex}}}}
%fraction with backslash
\newcommand\bsfrac[2]{%
\scalebox{-1}[1]{\nicefrac{\scalebox{-1}[1]{$#1$}}{\scalebox{-1}[1]{$#2$}}}%
}
\DeclareRobustCommand{\properideal}{\mathrel{\text{$\m@th\proper@ideal$}}}
\newcommand{\proper@ideal}{%
\ooalign{$\lneq$\cr\raise.22ex\hbox{$\lhd$}\cr}%
}
\DeclareRobustCommand{\properidealright}{\mathrel{\text{$\m@th\proper@idealright$}}}
\newcommand{\proper@idealright}{%
\ooalign{$\gneq$\cr\raise.22ex\hbox{$\rhd$}\cr}%
}
%General newcommands!
\newcommand{\comp}{\mathbb{C}} % complex set C
\newcommand{\real}{\mathbb{R}} % real set R
\newcommand{\whole}{\mathbb{Z}} % whole number Symbol
\newcommand{\natur}{\mathbb{N}} % natural number Symbol
\newcommand{\ratio}{\mathbb{Q}} % rational number symbol
\newcommand{\field}{\mathbb{K}} % general field for the others above!
\newcommand{\diff}{\mathrm{d}} % differential d
\newcommand{\s}{\,\,} % space after the function in the intergral
\newcommand{\cont}{\mathcal{C}} % Contour C
\newcommand{\fuk}{f(z) \s\diff z} % f(z) dz
\newcommand{\diffz}{\s\diff z}
\newcommand{\subint}{\int\limits} % lower boundaries for the integral
\newcommand{\poly}{\mathcal{P}} % special P - polygon
\newcommand{\defi}{\mathcal{D}} % D for the domain of a function
\newcommand{\cover}{\mathcal{U}} % cover for a set
\newcommand{\setsys}{\mathcal{M}} % set system M
\newcommand{\setnys}{\mathcal{N}} % set system N
\newcommand{\zetafunk}{f(\zeta)\s\diff \zeta} %f(zeta) d zeta
\newcommand{\ztfunk}{f(\zeta)} % f(zeta)
\newcommand{\bocirc}{S_r(z)}
\newcommand{\prop}{\,|\,}
\newcommand*{\QEDA}{\hfill\ensuremath{\blacksquare}} %tombstone
\newcommand{\emptybra}{\{\varnothing\}} % empty set with set-bracket
\newcommand{\realpos}{\real_{>0}}
\newcommand{\realposr}{\real_{\geq0}}
\newcommand{\naturpos}{\natur_{>0}}
\newcommand{\Imag}{\operatorname{Im}} % Imaginary symbol
\newcommand{\Realz}{\operatorname{Re}} % Real symbol
\newcommand{\norm}{\Vert \cdot \Vert}
\newcommand{\metric}{\vert \cdot \vert}
\newcommand{\foralln}{\forall n} %all n
\newcommand{\forallnset}{\forall n \in \natur} %all n € |N
\newcommand{\forallnz}{\forall n \geq _0} % all n >= n_0
\newcommand{\conjz}{\overline{z}} % conjugated z
\newcommand{\tildz}{\tilde{z}} % different z
\newcommand{\lproofar}{"`$ \Leftarrow $"'} % "`<="'
\newcommand{\rproofar}{"`$ \Rightarrow $"'} % "`=>"'
\newcommand{\beha}{\Rightarrow \text{ Behauptung}}
\newcommand{\powerset}{\mathcal{P}}
\newcommand{\person}[1]{\textsc{#1}}
\newcommand{\highlight}[1]{\emph{#1}}
\newcommand{\realz}{\mathfrak{Re}}
\newcommand{\imagz}{\mathfrak{Im}}
\renewcommand{\epsilon}{\varepsilon}
\renewcommand{\phi}{\varphi}
\newcommand{\lebesque}{\textsc{Lebesgue}}
\renewcommand{\Re}{\mathfrak{Re}}
\renewcommand{\Im}{\mathfrak{Im}}
\renewcommand*{\arraystretch}{1.4}
\newcommand{\bigcupdot}{\bigcup \hspace{-0.35cm} \cdot}
\newcommand{\BIGboxplus}{\mathop{\mathchoice{\raise-0.35em\hbox{\huge $\boxplus$}}{\raise-0.15em\hbox{\Large $\boxplus$}}{\hbox{\large $\boxplus$}}{\boxplus}}}
\newcommand{\eps}{\textit{eps }}
\newcommand{\skalar}[2]{\left\langle #1,#2\right\rangle}
\newcommand{\qraum}[2]{\sfrac{#1}{#2}}
\newcommand{\lnkset}[2]{%Menge der Linksnebenklassen
\ifcase\OptFractionAppearence\relax%
\ifmmode%
#1\slash #2%
\else%
$#1\slash #2$%
\fi%
\or%
\nicefrac{#1}{#2}%
\else%
\PackageWarning{mathoperator}{Unkown value for option >fractionAppearence<: \meaning\OptFractionAppearence}%
\fi%
}
\newcommand{\rnkset}[2]{%
\ifcase\OptFractionAppearence\relax%
\ifmmode%
#1\backslash#2%
\else%
$#1\backslash#2$%
\fi%
\or%
\bsfrac{#1}{#2}% Menge der Rechtsnebenklassen
\else%
\PackageWarning{mathoperator}{Unkown value for option >fractionAppearence<: \meaning\OptFractionAppearence}%
\fi%
}
% Math Operators
\DeclareMathOperator{\inn}{int} % Set of inner points
\DeclareMathOperator{\ext}{ext} % Set of outer points
\DeclareMathOperator{\cl}{cl} % Closure
\DeclareMathOperator{\grad}{grad}
\DeclareMathOperator{\D}{d}
\DeclareMathOperator{\id}{id}
\DeclareMathOperator{\graph}{graph}
\DeclareMathOperator{\Int}{int}
\DeclareMathOperator{\Ext}{ext}
\DeclareMathOperator{\diam}{diam}
\DeclareMathOperator{\supp}{supp}
\DeclareMathOperator{\cond}{cond}
\DeclareMathOperator{\rd}{rd}
\undef\div
\DeclareMathOperator{\div}{div}
\DeclareMathOperator{\rot}{rot}
\DeclareMathOperator{\End}{End}
\DeclareMathOperator{\Aff}{Aff}
\DeclareMathOperator{\Aut}{Aut}
\DeclareMathOperator{\Hom}{Hom}
\DeclareMathOperator{\Abb}{Abb}
\DeclareMathOperator{\Bil}{Bil}
\DeclareMathOperator{\Eig}{Eig}
\DeclareMathOperator{\Mat}{Mat}
\DeclareMathOperator{\Ker}{Ker}
\DeclareMathOperator{\diag}{diag}
\DeclareMathOperator{\GL}{GL}
\DeclareMathOperator{\tr}{tr}
\DeclareMathOperator{\rk}{rk}
\DeclareMathOperator{\ZR}{ZR}
\DeclareMathOperator{\SR}{SR}
\DeclareMathOperator{\sgn}{sgn}
\DeclareMathOperator{\Span}{span}
\DeclareMathOperator{\Image}{Im}
\DeclareMathOperator{\Sym}{Sym}
\DeclareMathOperator{\Hau}{Hau}
\DeclareMathOperator{\pr}{pr}
\DeclareMathOperator{\Orth}{O}
\DeclareMathOperator{\SO}{SO}
\DeclareMathOperator{\Uni}{U}
\DeclareMathOperator{\SU}{SU}
\DeclareMathOperator{\SL}{SL}
\DeclareMathOperator{\ggT}{ggT}
\DeclareMathOperator{\kgV}{kgV}
\DeclareMathOperator{\rang}{rang}
\DeclareMathOperator{\ord}{ord}
\DeclareMathOperator{\Inn}{Inn}
\DeclareMathOperator{\Z}{Z}
\DeclareMathOperator{\UG}{UG}
\DeclareMathOperator{\Fix}{Fix}
\DeclareMathOperator{\Stab}{Stab}
\DeclareMathOperator{\Syl}{Syl}
\DeclareMathOperator{\Typ}{Typ}
\DeclareMathOperator{\LC}{LC}
\DeclareMathOperator{\Quot}{Quot}
\DeclareMathOperator{\SE}{SE}
\DeclareMathOperator{\PDF}{PDF}
\endinput

View file

@ -0,0 +1,729 @@
\NeedsTeXFormat{LaTeX2e}[1995/12/01]
\ProvidesClass{mathscript}[2018/08/05 mathscript]
%programming related packages
\RequirePackage{zref-base}
\RequirePackage{etoolbox}
\RequirePackage{xparse}%better macros
\RequirePackage{calc}
\RequirePackage{xstring}
\RequirePackage{xkeyval}
%possible values: firstnumber, firstname
\def\theoremheader@order@val{firstnumber}
\DeclareOptionX{order}{%
%sets accordingly the required ntheoremoptions
\let\tempa\@empty
\IfEq{#1}{firstnumber}{\def\tempa{changebreak}}{%
\IfEq{#1}{firstname}{\def\tempa{break}}{%
\PackageWarning{mathscript}{Unknown Value for key `order'}
}
}
\edef\theoremheader@order@val{\tempa}
}
%possible values: all you can imagine.
%usefull values: theorem-names defined in this class, new theorems defined by \newmdtheoremenv
\listadd{\theorem@disable}{\@empty}
\DeclareOptionX{disable}{%
\listadd{\theorem@disable}{#1}
}
\newif\ifreset@section@after@chapter
\reset@section@after@chaptertrue
\DeclareOptionX{sectionreset}{%
\reset@section@after@chapterfalse
}
\DeclareOptionX*{\PassOptionsToClass{\CurrentOption}{report}}
\ProcessOptionsX
\LoadClass[ngerman,a4paper]{article}
\RequirePackage{ifpdf,ifluatex}
%decent space; change explicitly the space between header and content and footer and content
\RequirePackage[left=2.1cm,right=3.1cm,bottom=3cm,footskip=0.75cm,headsep=0.5cm]{geometry}
\RequirePackage{babel}
\ifpdf
\ifluatex
%new lualatex needs explicit fontspec to draw properly e.g. german umlauts
\RequirePackage{fontspec}
%\RequirePackage{lua-visual-debug} %for debugging
\else
%old pdflatex needs explicitly utf-8-support
\RequirePackage[utf8]{inputenc}
\RequirePackage{chngcntr}
\RequirePackage{eufrak}
\fi
\fi
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%Layout related packages
\RequirePackage{parskip} %split paragraphs by vspace instead of intendations
\RequirePackage{fancyhdr} %customize header / footer
\RequirePackage{titlesec} %customize titles
\RequirePackage{tocloft} %customize toc
%tabulars
\RequirePackage{tabularx} %tabularx-environment (explicitly set width of columns)
\RequirePackage{longtable} %Tabellen mit Seitenumbrüchen
\RequirePackage{multirow}
\RequirePackage{bigdelim}
\RequirePackage{booktabs} %improved rules
\usepackage{colortbl} %einfärben von Spalten, Zeilen und Zellen
\RequirePackage[title,titletoc]{appendix}
\RequirePackage{environ}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%Math related packages
%basic ams-math and enhancments
\RequirePackage{amsmath,amssymb,amsfonts,mathtools}
\RequirePackage{blkarray}
%add some font-related stuff
\RequirePackage{latexsym}
\RequirePackage{marvosym} %lightning (contradiction)
\RequirePackage{stmaryrd} %Lightning symbol
\RequirePackage{bbm} %unitary matrix
\RequirePackage{wasysym} %add some symbols
\RequirePackage[bb=boondox]{mathalfa} %special zero using \mathbb{0}
\RequirePackage{systeme}
\usepackage{upgreek} %griechische Buchstaben, für mehr Auswahl bei phi's
%further support for different equation setting
\RequirePackage{cancel}
\RequirePackage{xfrac} %sfrac -> fractions e.g. 3/4
\RequirePackage{nicefrac}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%Graphics-related packages
\RequirePackage[table,dvipsnames]{xcolor}
\RequirePackage{graphicx}
\RequirePackage{tcolorbox}
\RequirePackage{pgfplots}
\pgfplotsset{compat=1.13}
\usepgfplotslibrary{fillbetween}
\RequirePackage{pgf}
\RequirePackage{tikz}
\usetikzlibrary{patterns,arrows,calc,decorations.pathmorphing,backgrounds, positioning,fit,petri,decorations.fractals}
\usetikzlibrary{matrix}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%Text-related packages
%increase line spacing
\RequirePackage[onehalfspacing]{setspace} %increase row-space
\RequirePackage{ulem} %better underlines
\RequirePackage{marginnote} %notes at the edge
%enumeration
\RequirePackage{enumerate}
\RequirePackage[inline]{enumitem} %customize label
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%Pakete für Programmierung
\RequirePackage{lmodern}
\RequirePackage{listings}
%Konfiguration dazu
\definecolor{lightlightgray}{rgb}{0.95,0.95,0.95}
\definecolor{lila}{rgb}{0.8,0,0.8}
\definecolor{mygray}{rgb}{0.5,0.5,0.5}
\definecolor{mygreen}{rgb}{0,0.8,0.26}
\lstset{language=Matlab,
basicstyle=\ttfamily,
keywordstyle=\color{lila},
commentstyle=\color{lightgray},
morecomment=[l]{!\,\% },% Comment only with space after !
morekeywords={sampsizepwr, makedist, kstest, fitdist,chi2cdf},
stringstyle=\color{mygreen}\ttfamily,
backgroundcolor=\color{white},
showstringspaces=false,
numbers=left,
numbersep=10pt,
numberstyle=\color{mygray}\ttfamily,
identifierstyle=\color{blue},
xleftmargin=.2\textwidth,
xrightmargin=.2\textwidth
}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%Index related packages
\RequirePackage[texindy]{imakeidx}
\indexsetup{
level=\section*
}
\makeindex[intoc]
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%Theorem-related packages
\RequirePackage[amsmath,amsthm,thmmarks,hyperref]{ntheorem}
\RequirePackage[ntheorem,framemethod=TikZ]{mdframed}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\RequirePackage[unicode,bookmarks=true]{hyperref}
\hypersetup{
colorlinks,
citecolor=green,
filecolor=green,
linkcolor=blue,
urlcolor=blue
}
\RequirePackage{cleveref}
\RequirePackage{bookmark} %pdf-bookmarks
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% End Packages %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%tweak \newmdtheoremenv to create an empty environment if this very enviroment is disabled
\RenewDocumentCommand{\newmdtheoremenv}{O{} m o m o}{%
\ifinlist{#2}{\theorem@disable}{%
\IfStrEq{#2}{theorem}{\newtheorem{#2}{}[section]}{\newtheorem{#2}{}[theorem]}
\RenewEnviron{#2}{\stepcounter{theorem}}{}
}{%
%from mdframed.sty, definition of \newmdtheoremenv
\ifboolexpr{ test {\IfNoValueTF {#3}} and test {\IfNoValueTF {#5}} }%
{\newtheorem{#2}{#4}}{%
\IfValueTF{#3}{\newtheorem{#2}[#3]{#4}}{}%
\IfValueTF{#5}{\newtheorem{#2}{#4}[#5]}{}%
}%
\BeforeBeginEnvironment{#2}{%
\begin{mdframed}[#1]}%
\AfterEndEnvironment{#2}{%
\end{mdframed}%
}%
}
}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%Setup theorem environments
%triangle symbol + length
\newlength{\blacktrianglewidth}
\settowidth{\blacktrianglewidth}{$\blacktriangleright$}
%colors
\definecolor{lightgrey}{gray}{0.91}
\definecolor{lightred}{rgb}{1,0.6,0.6}
\definecolor{darkgrey}{gray}{0.6}
\definecolor{darkgreen}{rgb}{0,0.6,0}
\mdfdefinestyle{boxedtheorem}{%
outerlinewidth=3pt,%
skipabove=5pt,%
skipbelow=10pt,%
frametitlefont=\normalfont\bfseries\color{black},%
nobreak,%enforce no pagebrakes in the whole frame
}
%numbered environments
\theoremstyle{\theoremheader@order@val} %set by pacakge options above
\theorembodyfont{}
%including background
\newmdtheoremenv[%
style=boxedtheorem,%
innertopmargin=\topskip,%
innerbottommargin=\topskip,%
linecolor=darkgrey,%
backgroundcolor=lightgrey,%
]{theorem}{Theorem}[section]
\newmdtheoremenv[%
style=boxedtheorem,%
linecolor=darkgrey,%
topline=false,%
rightline=false,%
bottomline=false,%
innertopmargin=\topskip,%
innerbottommargin=\topskip,%
backgroundcolor=lightgrey,%
]{proposition}[theorem]{Proposition}
\newmdtheoremenv[%
style=boxedtheorem,%
linecolor=darkgrey,%
topline=false,%
rightline=false,%
bottomline=false,%
backgroundcolor=lightgrey,%
innertopmargin=\topskip,%
innerbottommargin=\topskip,%
]{lemma}[theorem]{Lemma}
%excluding background
\newmdtheoremenv[%
style=boxedtheorem,%
linecolor=red,%
topline=false,%
rightline=false,%
bottomline=false,%
innertopmargin=0,%
innerbottommargin=-3pt,%
]{definition}[theorem]{Definition}
\newmdtheoremenv[%
outerlinewidth=3pt,%
linecolor=black,%
topline=false,%
rightline=false,%
bottomline=false,%
innertopmargin=0pt,%
innerbottommargin=-0pt,%
frametitlefont=\normalfont\bfseries\color{black},%
skipabove=5pt,%
skipbelow=10pt,%
]{conclusion}[theorem]{Conclusion}
\newmdtheoremenv[%
hidealllines=true,%
frametitlefont=\normalfont\bfseries\color{black},%
innerleftmargin=0pt,%
skipabove=5pt,%
innerleftmargin=10pt,%
]{remark}[theorem]{\hspace*{-10pt}$\blacktriangleright$\hspace*{\dimexpr 10pt - \blacktrianglewidth\relax}Remark}
\newmdtheoremenv[%
hidealllines=true,%
frametitlefont=\normalfont\bfseries\color{black},%
innerleftmargin=0pt,%
skipabove=5pt,%
innerleftmargin=10pt,%
]{erinnerung}[theorem]{\hspace*{-10pt}$\blacktriangleright$\hspace*{\dimexpr 10pt - \blacktrianglewidth\relax}Reminder}
\newmdtheoremenv[%
hidealllines=true,%
frametitlefont=\normalfont\bfseries\color{black},%
innerleftmargin=10pt,%
skipabove=5pt,%
]{example}[theorem]{\hspace*{-10pt}\rule{5pt}{5pt}\hspace*{5pt}Example}
\newmdtheoremenv[%
hidealllines=true,%
frametitlefont=\normalfont\bfseries\color{black},%
innerleftmargin=10pt,%
skipabove=5pt,%
]{algorithm}[theorem]{\hspace*{-10pt}\rule{5pt}{5pt}\hspace*{5pt}Algorithm}
%unnumbered theorems
\theoremstyle{nonumberbreak}
\theoremindent0cm
\newmdtheoremenv[%
style=boxedtheorem,%
linecolor=red,%
topline=false,%
rightline=false,%
bottomline=false,%
innertopmargin=1pt,%
innerbottommargin=1pt,%
]{*definition}{Definition}
\newmdtheoremenv[%
hidealllines=true,%
frametitlefont=\normalfont\bfseries\color{black},%
skipabove=5pt,%
innerleftmargin=10pt,%
]{*remark}{\hspace*{-10pt}$\blacktriangleright$\hspace*{\dimexpr 10pt - \blacktrianglewidth\relax}Remark}
\newmdtheoremenv[%
hidealllines=true,%
innerleftmargin=10pt,%
]{*example}{\hspace*{-10pt}\rule{5pt}{5pt}\hspace*{5pt}Example}
\newmdtheoremenv[%
style=boxedtheorem,%
hidealllines=true,%
innertopmargin=\topskip,%
innerbottommargin=\topskip,%
backgroundcolor=lightgrey,%
]{*anmerkung}{Annotation}
\newmdtheoremenv[%
style=boxedtheorem,%
hidealllines=true,%
innertopmargin=\topskip,%
innerbottommargin=\topskip,%
backgroundcolor=lightgrey,%
]{mathematica}{Mathematica/WolframAlpha-Befehle}
\newtheorem{overview}[theorem]{Overview}
\newmdtheoremenv[
outerlinewidth=0.1pt,
]{repetition}{Wiederholung}
%various unnumbered thereoms and environment (usually included in theorems like above)
%To get rid of the parentheses, a new theorem style is neccessary (definition of nonumberbreak from ntheorem.sty)
%to achieve the underlining, this needed to put in the theoremstyle definition
\theoremheaderfont{\mdseries}
\theoremseparator{:}
\theorempostskip{0pt}
%define new theorem-styles
\newtheoremstyle{noparentheses}%
{\item[\rlap{\vbox{\hbox{\hskip\labelsep \theorem@headerfont
\underline{##1}\theorem@separator}\hbox{\strut}}}]}%
{\item[\rlap{\vbox{\hbox{\hskip\labelsep \theorem@headerfont
\underline{##1\ ##3\theorem@separator}}\hbox{\strut}}}]}
\newtheoremstyle{underlinedPlain}%
{\item[\hskip\labelsep \uline{\theorem@headerfont ##1\theorem@separator}]}%
{\item[\hskip\labelsep \uline{\theorem@headerfont ##1\ \theorem@headerfont(##3)\theorem@separator}]}
\newtheoremstyle{plainEnvironment}{}%
{\item[\hskip\labelsep {##1\theorem@headerfont ##3\theorem@separator}]}
\newtheoremstyle{underlinedEnvironment}{}%
{\item[\hskip\labelsep \uline{##1\theorem@headerfont ##3\theorem@separator}]}
\newtheoremstyle{boldEnvironment}{}%
{\item[\hskip\labelsep \textbf{##1\theorem@headerfont ##3\theorem@separator}]}
%theorems
\theoremstyle{noparentheses}
\newmdtheoremenv[%
hidealllines=true,%
innerleftmargin=1em,%
innerbottommargin=0pt,%
innerrightmargin=0,%
skipbelow=0pt,%
]{interpretation}{\hspace*{\dimexpr - \mdflength{innerleftmargin}\relax}Interpretation}
\theoremstyle{underlinedPlain}
\newmdtheoremenv[%
hidealllines=true,%
innerleftmargin=1em,%
innerrightmargin=0,%
skipbelow=0pt,%
]{hint}{\hspace*{\dimexpr - \mdflength{innerleftmargin}\relax}Hinweis}
\theoremstyle{plainEnvironment}
\newmdtheoremenv[%
hidealllines=true,%
innerleftmargin=1em,%
innerbottommargin=0pt,%
innerrightmargin=0,%
skipbelow=0pt,%
]{plainenvironment}{\hspace*{\dimexpr -\mdflength{innerleftmargin}\relax}}
\theoremstyle{underlinedEnvironment}
\newmdtheoremenv[%
hidealllines=true,%
innerleftmargin=1em,%
innerrightmargin=0,%
skipbelow=0pt,%
]{underlinedenvironment}{\hspace*{\dimexpr -\mdflength{innerleftmargin}\relax}}
\theoremheaderfont{\bfseries}
\theoremstyle{boldEnvironment}
\newmdtheoremenv[%
hidealllines=true,%
innerleftmargin=1em,%
innerrightmargin=0,%
skipbelow=0pt,%
]{boldenvironment}{\hspace*{\dimexpr -\mdflength{innerleftmargin}\relax}}
%proofs
\newtheoremstyle{proofstyle}%
{\item[\hskip\labelsep {\theorem@headerfont ##1}\theorem@separator]}%
{\item[\hskip\labelsep {\theorem@headerfont ##1}\ (##3)\theorem@separator]}
\theoremstyle{proofstyle}
\theoremheaderfont{\normalfont\normalsize\itshape}
\theorembodyfont{\normalfont\small}
\theoremseparator{.}
\theorempreskip{5pt}
\theorempostskip{5pt}
\theoremsymbol{$\square$}
\ifinlist{proof}{\theorem@disable}{\RenewEnviron{proof}{}{}}{\renewtheorem{proof}{Proof}}
%cref: print correct translation
\crefname{theorem}{Theorem}{Theorems}
\crefname{proposition}{Proposition}{Propositions}
\crefname{lemma}{Lemma}{Lemmas}
\crefname{conclusion}{Conclusion}{Conclusions}
\crefname{definition}{Definition}{Definitions}
\crefname{remark}{Remark}{Remarks}
\crefname{example}{Example}{Examples}
\crefname{erinnerung}{Reminder}{Reminders}
\crefname{algorithm}{Algorithm}{Algorithms}
\crefname{*definition}{Definition}{Definitions}
\crefname{*remark}{Remark}{Remarks}
\crefname{*example}{Example}{Examples}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%Additional features
%some minor commands
\newcommand*{\rom}[1]{\expandafter\@slowromancap\romannumeral #1@} %print integer in capital roman letters
\newcommand{\leqnos}{\tagsleft@true\let\veqno\@@leqno} %set equation numbering left / right
\newcommand{\reqnos}{\tagsleft@false\let\veqno\@@eqno}
%declare a new label; store current chapter number
\newcommand*{\proplbl}[1]{%
\@bsphack
\begingroup
\label{#1}%
\zref@setcurrent{default}{\arabic{chapter}}%
\zref@wrapper@immediate{%
\zref@labelbyprops{#1@chapter}{default}%
}
\endgroup
\@esphack
}
%refer to a label set by proplbl.
%If the label is not defined (yet), question marks are output at the calling position. If the label is defined, the chapter number is prepended to the link output by \cref if the current chapter number and the one set when calling \proplbl differ.
%the macro handels both text and math mode. mbox is needed due to a feature concerning ulem / cleveref
\newcommand*{\propref}[1]{%
\ifcsdef{r@#1}%in first compilation the label may not be defined yet
{%
\zref@refused{#1@chapter}%
\cref@gettype{#1}{\propositionref@current@type}%get the environment's name
\ifnumcomp{\c@chapter}{=}{\zref@extractdefault{#1@chapter}{default}{\z@}}%
{%same chapter
\ifmmode
\ref{#1}%
\else
\mbox{\cref{#1}}%
\fi
}%
{%otherwise
%example for following line:
%\crefformat{truetheorem}{\cref@truetheorem@name~##2\rom{\zref@extractdefault{#1}{#1chapter}{1}}.##1##3}
%this changes the format used by \cref to <environtment name> <chapter-number>.<section-number>.<theorem number>
\crefformat{\propositionref@current@type}{%
\csname cref@\propositionref@current@type @name\endcsname ~##2\rom{\zref@extractdefault{#1@chapter}{default}{\@ne}}.##1##3%
}%
\ifmmode
\ref{#1}%
\else
\mbox{\cref{#1}}%
\fi
\crefformat{\propositionref@current@type}{%
\csname cref@\propositionref@current@type @name\endcsname~##2##1##3%
}%
}%
}%
{\begingroup\ifmmode\else\bfseries\fi ???\endgroup}%similar to \ref\cref: question marks in case of undefined labels
}
%declare new term to the index, output if no star is given to call position
\NewDocumentCommand{\begriff}{s O{} m O{}}{%
\index{#2#3#4}%
\IfBooleanTF{#1}{}{\uline{#3}}%
}
%append a new mathsymbol to the index, output if no star is given at the call position
\NewDocumentCommand{\mathsymbol}{s O{} m m O{}}{%
\IfBooleanTF{#1}%
{\index[symbols]{#2#3@\detokenize{#4}#5}}%
{#4\index[symbols]{#2#3@\detokenize{#4}#5}}%
}
%remove skip before / after amsmath-environments: default to 0pt. 1star: just before the environment, 2stars: just after the environment, no star: both
\NewDocumentCommand{\zeroAmsmathAlignVSpaces}{s s O{0 pt} O{0 pt}}{%
\IfBooleanTF{#1}%
{%
\IfBooleanTF{#2}%
{\setlength{\belowdisplayskip}{#4}}%
{\setlength{\abovedisplayskip}{#3}}%
}%
{%
\setlength{\abovedisplayskip}{#3}%
\setlength{\belowdisplayskip}{#4}%
}%
}
\NewDocumentCommand{\itemEq}{s m}{%
\begingroup%
\setlength{\abovedisplayskip}{\dimexpr -\parskip + 1pt\relax}%
\setlength{\belowdisplayskip}{0pt}%
\IfBooleanTF{#1}%
{\parbox[c]{\linewidth}{\begin{flalign*}#2&&\end{flalign*}}}%}
{\parbox[c]{\linewidth}{\begin{flalign}#2&&\end{flalign}}}%}
\endgroup%
}
%macro that defines the spacing between bracket and content of a matrix
\NewDocumentCommand{\matrixBracketSpacing}{}{\mspace{4.0mu plus 3.0mu minus 1.0mu}}
%macro width customized spacing between bracktes / content, lineheight and columnwidth
\newenvironment{henrysmatrix}{%
\renewcommand*{\arraystretch}{1.2}
\setlength\arraycolsep{5pt}
\left(\matrixBracketSpacing
\begin{matrix}
}{%
\end{matrix}
\matrixBracketSpacing\right)
}
%redefine \overline to customize the space between text / line (currently 0.4mm + height of the content)
%ATTENTION: when changing the 0.4mm unfortunately, in \kringel the 0.4mm need to be changed accordingly
\let\@old@overline\overline
\renewcommand*{\overline}[1]{%
\ifmmode
\@old@overline{\raisebox{0pt}[\dimexpr\height+0.4mm\relax]{$#1$}}%
\else
\bar{#1}
\fi
}
%encircle some content. Arguments: border color (optional), background color (mandatory), content (mandatory)
%two lengths to get width / height of content (important for width / height of the circle)
\newlength{\@kringel@contentheight}
\newlength{\@kringel@contentwidth}
\newlength{\@kringel@depth}
\NewDocumentCommand{\kringel}{O{blue} m m}{%
%as the macro should work for both text and math mode, add some macros for later use to distinguish
%in text mode, nothing happens (except discarding the 1st argument for the raisebox, that is permantently given), in math mode, the content needs to be enbraced by \ensuremath, the tcolorbox-environment by a raisebox
%ATTENTION: when changing the height-factor of tcolorbox, the depth correction needs to be changed as well
\let\@kringel@inner\relax
\let\@kringel@outer\@secondoftwo
\ifmmode
\let\@kringel@inner\ensuremath
\let\@kringel@outer\raisebox
\fi
%set the width and height
\settoheight{\@kringel@contentheight}{\hbox{\@kringel@inner{#3}}}
\settowidth{\@kringel@contentwidth}{\@kringel@inner{#3}}
\settodepth{\@kringel@depth}{\@kringel@inner{#3}}
%change the depth correction dependend whethere there is a depth (e.g. y) or not (e.g. a)
\ifdim \@kringel@depth > 0pt%
\setlength{\@kringel@depth}{\dimexpr\@kringel@depth+0.5mm\relax}
\else
\settodepth{\@kringel@depth}{y}
\setlength{\@kringel@depth}{\dimexpr\@kringel@depth+0.3mm\relax}
\fi
%output the colorbox width given parameter: frame color, background color, computed width and height, and escaped content depending on math / text mode
\@kringel@outer{\dimexpr-\@kringel@contentheight/2-\@kringel@depth\relax}{\begin{tcolorbox}[colframe=#1,halign=center,=center,width=\dimexpr1.5\@kringel@contentwidth+1mm\relax,height=2.5\@kringel@contentheight,left=0pt,right=0pt,bottom=0pt,top=0pt,boxrule=0.8pt,colback=#2,boxsep=0pt,bean arc]
\@kringel@inner{#3}
\end{tcolorbox}}
}
%update ntheorem macro to provide space between theorem numbers and any optional comment, adds in upper roman letters the chapter number. removes any numbering for theorem environments with just one counter
\renewcommand{\thm@@thmline@name}[5]{%-
\def\thm@@thmline@name@tmp{%
\if\relax\detokenize{#3}\relax\else%
{\hspace*{2.2ex}#3}%
\fi%
}
\let\thm@@thmline@name@numbering\relax
\StrCount{#5}{.}[\thm@@thmline@name@dot@number]
\ifnum\thm@@thmline@name@dot@number < 3%
\def\thm@@thmline@name@tmp{\hspace*{-1em}#3}
\else%
\StrBetween[1,2]{#5}{.}{.}[\thm@@thmline@name@chap@num]%get the chapter number for roman transliteration
\def\thm@@thmline@name@numbering{\rom{\thm@@thmline@name@chap@num}.#2}
\fi
\ifx\\#5\\%
\@dottedtocline{-2}{0em}{4.4em}%
{#1 \protect\numberline{\thm@@thmline@name@numbering:}{}}%
{#4}
\else
\ifHy@linktocpage\relax\relax
\@dottedtocline{-2}{0em}{1.3em}%
{#1 \protect\numberline{\thm@@thmline@name@numbering:}\thm@@thmline@name@tmp}%
{\hyper@linkstart{link}{#5}{#4}\hyper@linkend}%
\else
\@dottedtocline{-2}{0em}{4.em}%
{#1 \protect\numberline{\thm@@thmline@name@numbering:}\thm@@thmline@name@tmp}%
{\hyper@linkstart{link}{#5}{#4}\hyper@linkend}%
\fi
\fi
}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%55
%Settings
%disable brackets left for linear systems by systeme-package
\sysdelim..
\pdfstringdefDisableCommands{%
\def\\{}%
\def\texttt#1{<#1>}%
\def\mathbb#1{<#1>}%
}
\reqnos
%Headings
%\titlelabel{\thetitle.\quad}%. behind section/sub... (3. instead of 3)
%\counterwithout{section}{chapter}
%\renewcommand{\thechapter}{\Roman{chapter}}
%\renewcommand{\thepart}{\Alph{part}}
%italic chapters (due to titlesec package some more stuff)
%\titleformat{command}[shape]{format}{label}{sep}{before-code}[after-code]
%\titleformat{\chapter}[display]{\bfseries}{\Large\chaptername\;\thechapter}{-5pt}{\huge\bfseries\itshape}
%\titlespacing{\chapter}{0pt}{0pt}{10pt}
%\titleformat{\section}[hang]{\bfseries\Large}{\thesection.}{8pt}{\Large\bfseries}
%\titlespacing{command}{left}{before-sep}{after-sep}
%\titlespacing{\subsection}{0pt}{0pt}{5pt}
%change appearence of heading of toc: 0 space above, bold, italic huge toc-heading
%\renewcommand{\cftbeforetoctitleskip}{0pt}
%\renewcommand{\cfttoctitlefont}{\itshape\Huge\bfseries}
%change indentations due to width of capital roman numbers
%\renewcommand{\cftchapnumwidth}{2.5em}
%\renewcommand{\cftsecindent}{2.5em}
%\renewcommand{\cftsubsecindent}{4.8em}
% Add new page-style (just footer), patch \chapter command to use this page style
\fancypagestyle{plainChapter}{%
\fancyhf{}%
\fancyfoot[C]{\thepage}%
\renewcommand{\headrulewidth}{0pt}% Line at the header invisible
\renewcommand{\footrulewidth}{0.4pt}% Line at the footer visible
}
%changes pagestyle; instead of empty page the normal footer is printed
\patchcmd{\chapter}{\thispagestyle{plain}}{\thispagestyle{plainChapter}}{}{}
%usually, after a new chapter the section counter needs to be reset manually. Instead, automatic reset
\ifreset@section@after@chapter
\pretocmd{\chapter}{\setcounter{section}{0}}{}{}
\fi
\pagestyle{fancy}
\pagenumbering{arabic}
%remember chapter-title in \leftmark and \rightmark
\renewcommand{\chaptermark}[1]{%
\markboth{\chaptername
\ \thechapter:\ #1}{}}
%remember section title in \leftmark
\renewcommand{\sectionmark}[1]{%
\markright{\thesection.\ #1}{}}
%remove page number from part{}-pages
\let\sv@endpart\@endpart
\def\@endpart{\thispagestyle{empty}\sv@endpart}
%change header:
\renewcommand{\headrulewidth}{0.75pt}
\renewcommand{\footrulewidth}{0.3pt}
\lhead{\rightmark}%left: section-number. section-title
\rhead{\leftmark}%right: chapter chapternumber: chapter-title
%change numbering of equations to be section by section
\counterwithout{equation}{section}
\pretocmd{\section}{\setcounter{equation}{0}}{}{}
%appendix:
\let\old@appendix\appendix
\def\appendix{%
\old@appendix%
\patchcmd{\chapter}{\thispagestyle{plainChapter}}{\thispagestyle{fancy}}{}{}%
\renewcommand{\chaptername}{Anhang}%
\renewcommand{\thesection}{\Alph{chapter}.\arabic{section}}%
\titleformat{\chapter}[hang]{\bfseries}{\LARGE\chaptername\ \thechapter:}{0.5em}{\LARGE\bfseries}%
\titlespacing{\chapter}{0pt}{-0.75cm}{0pt}%
}
\endinput

View file

@ -71,3 +71,6 @@ Skript und Aufgaben zu den Vorlesungen **Theoretische Mechanik** (Prof. Dr. Ketz
- Zusammenfassung: [https://www.magentacloud.de/lnk/UXvys0zi](https://www.magentacloud.de/lnk/UXvys0zi) Passwort: 123456
- Mitschrift: [https://www.magentacloud.de/lnk/MuvSMJgb](https://www.magentacloud.de/lnk/MuvSMJgb) Passwort: 123456
- Aufgaben: [https://www.magentacloud.de/lnk/wxvyMA1u](https://www.magentacloud.de/lnk/wxvyMA1u) Passwort: 123456
# Erasmus-Semester in der University of Bristol, UK
Während meines Erasmus-Aufenthaltes in Bristol, UK habe ich mir folgenden Vorlesungen angehört: **Applied Statistics**, **Introduction into Artificial Intelligience**, **Partial Differential Equations** und **Web Technologies**. Eine Auswahl an Mitschriften ist hier veröffentlicht.

View file

@ -48,13 +48,60 @@
\ooalign{$\gneq$\cr\raise.22ex\hbox{$\rhd$}\cr}%
}
%General newcommands!
\newcommand{\comp}{\mathbb{C}} % complex set C
\newcommand{\real}{\mathbb{R}} % real set R
\newcommand{\whole}{\mathbb{Z}} % whole number Symbol
%%%%%%%% General newcommands %%%%%%%%%%%
% Sets
%%%%%%%%%%% Old ones, to be compatible with old latex scripts %%%%%%%%%%%
\newcommand{\natur}{\mathbb{N}} % natural number Symbol
\newcommand{\whole}{\mathbb{Z}} % whole number Symbol
\newcommand{\ratio}{\mathbb{Q}} % rational number symbol
\newcommand{\real}{\mathbb{R}} % real set R
\newcommand{\comp}{\mathbb{C}} % complex set C
\newcommand{\quat}{\mathbb{H}} % quaternion
\newcommand{\field}{\mathbb{K}} % general field for the others above!
%%%% Alternative emptyset symbol %%%%%%%%%%%%%%%%%
\let\oldemptyset\emptyset
\let\emptyset\varnothing
%%%%%%%%%%% new shorter ones %%%%%%%%%%%%%%%%%
\newcommand{\N}{\mathbb{N}} % natural number Symbol
\newcommand{\Z}{\mathbb{Z}} % whole number Symbol
\newcommand{\Q}{\mathbb{Q}} % rational number symbol
\newcommand{\R}{\mathbb{R}} % real set R
\newcommand{\Rn}{\mathbb{R}^n} % real set R^n
\newcommand{\Rd}{\mathbb{R}^d} % real set R^n for MINT
\undef{\H}
\undef{\C}
\newcommand{\C}{\mathbb{C}} % complex set C
\newcommand{\H}{\mathbb{H}} % quaternion
\newcommand{\F}{\mathbb{F}} % general field for the others above!
\newcommand{\powerset}{\mathcal{P}} % Powerset
\newcommand{\pows}{\mathcal{P}} % shoter one
%%%%%%%%%%% p-adics %%%%%%%%%%%
\newcommand{\pZ}{\mathbb{Z}_p}
\newcommand{\pQ}{\mathbb{Q}_p}
\newcommand{\pC}{\mathbb{C}_p}
%%%%%%%%%%% Old commands, need to check, which ones are in use, if not delete!
%%% Metrics, Norms, etc %%%%
\newcommand{\norm}{\Vert \cdot \Vert}
\newcommand{\metric}{\vert \cdot \vert}
%%%%%%%%%%% Measure Theory %%%%%%%%%%%%%%%%%%
\newcommand{\sigA}{\mathscr{A}} % typical Sigma A for sigma algebras
\newcommand{\sigB}{\mathscr{B}} % typical Sigma B for sigma algebras
\newcommand{\sigG}{\mathscr{G}} % typical Generator for sigma algebras
\newcommand{\sigO}{\mathscr{O}} % sigma algebra of open sets
\newcommand{\probP}{\mathbb{P}} % probability measure
\newcommand{\diff}{\mathrm{d}} % differential d
\newcommand{\s}{\,\,} % space after the function in the intergral
\newcommand{\cont}{\mathcal{C}} % Contour C
@ -77,8 +124,6 @@
\newcommand{\naturpos}{\natur_{>0}}
\newcommand{\Imag}{\operatorname{Im}} % Imaginary symbol
\newcommand{\Realz}{\operatorname{Re}} % Real symbol
\newcommand{\norm}{\Vert \cdot \Vert}
\newcommand{\metric}{\vert \cdot \vert}
\newcommand{\foralln}{\forall n} %all n
\newcommand{\forallnset}{\forall n \in \natur} %all n € |N
\newcommand{\forallnz}{\forall n \geq _0} % all n >= n_0
@ -87,7 +132,6 @@
\newcommand{\lproofar}{"`$ \Leftarrow $"'} % "`<="'
\newcommand{\rproofar}{"`$ \Rightarrow $"'} % "`=>"'
\newcommand{\beha}{\Rightarrow \text{ Behauptung}}
\newcommand{\powerset}{\mathcal{P}}
\newcommand{\person}[1]{\textsc{#1}}
\newcommand{\highlight}[1]{\emph{#1}}
\newcommand{\realz}{\mathfrak{Re}}
@ -102,10 +146,7 @@
\newcommand{\BIGboxplus}{\mathop{\mathchoice{\raise-0.35em\hbox{\huge $\boxplus$}}{\raise-0.15em\hbox{\Large $\boxplus$}}{\hbox{\large $\boxplus$}}{\boxplus}}}
\newcommand{\eps}{\textit{eps }}
%%%% Alternative emptyset symbol %%%%%%%%%%%%%%%%%
\let\oldemptyset\emptyset
\let\emptyset\varnothing
%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\newcommand{\skalar}[2]{\left\langle #1,#2\right\rangle}
\newcommand{\qraum}[2]{\sfrac{#1}{#2}}
@ -138,7 +179,7 @@
%%%%%%%%%%%%%%%%% MINT %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\newcommand{\sigmalg}{$\sigma$-Algebra}
%\newcommand{\sigmalg}{$\sigma$-Algebra}
% declares the Bourbakis dangerous bend symbol, to highlight common Errors done with definitions, theorems, find it quite useful.
\DeclareFontFamily{U}{stixbbit}{}
\DeclareFontShape{U}{stixbbit}{m}{it}{<-> stix-mathbbit}{}
@ -147,62 +188,105 @@
}
% Math Operators
\DeclareMathOperator{\inn}{int} % Set of inner points
\DeclareMathOperator{\ext}{ext} % Set of outer points
\DeclareMathOperator{\cl}{cl} % Closure
\DeclareMathOperator{\grad}{grad}
\DeclareMathOperator{\D}{d}
\DeclareMathOperator{\id}{id}
\DeclareMathOperator{\graph}{graph}
\DeclareMathOperator{\Int}{int}
\DeclareMathOperator{\Ext}{ext}
\DeclareMathOperator{\diam}{diam}
\DeclareMathOperator{\supp}{supp}
\DeclareMathOperator{\cond}{cond}
\DeclareMathOperator{\rd}{rd}
\undef\div
\DeclareMathOperator{\div}{div}
\DeclareMathOperator{\rot}{rot}
%%%%%% A
\DeclareMathOperator{\Aff}{Aff} % Affine
\DeclareMathOperator{\Aut}{Aut} % Automorphism
\DeclareMathOperator{\Abb}{Abb} % Abbildung
\DeclareMathOperator{\End}{End}
\DeclareMathOperator{\Aff}{Aff}
\DeclareMathOperator{\Aut}{Aut}
\DeclareMathOperator{\Hom}{Hom}
\DeclareMathOperator{\Abb}{Abb}
%%%%%% B
\DeclareMathOperator{\Bil}{Bil}
\DeclareMathOperator{\Eig}{Eig}
\DeclareMathOperator{\Mat}{Mat}
\DeclareMathOperator{\Ker}{Ker}
\DeclareMathOperator{\diag}{diag}
\DeclareMathOperator{\GL}{GL}
\DeclareMathOperator{\tr}{tr}
\DeclareMathOperator{\rk}{rk}
\DeclareMathOperator{\ZR}{ZR}
\DeclareMathOperator{\SR}{SR}
\DeclareMathOperator{\sgn}{sgn}
\DeclareMathOperator{\Span}{span}
\DeclareMathOperator{\Image}{Im}
\DeclareMathOperator{\Sym}{Sym}
\DeclareMathOperator{\Hau}{Hau}
\DeclareMathOperator{\pr}{pr}
\DeclareMathOperator{\Orth}{O}
\DeclareMathOperator{\SO}{SO}
\DeclareMathOperator{\Uni}{U}
\DeclareMathOperator{\SU}{SU}
\DeclareMathOperator{\SL}{SL}
\DeclareMathOperator{\ggT}{ggT}
\DeclareMathOperator{\kgV}{kgV}
\DeclareMathOperator{\rang}{rang}
\DeclareMathOperator{\ord}{ord}
\DeclareMathOperator{\Inn}{Inn}
\DeclareMathOperator{\Z}{Z}
\DeclareMathOperator{\UG}{UG}
\DeclareMathOperator{\Fix}{Fix}
\DeclareMathOperator{\Stab}{Stab}
\DeclareMathOperator{\Syl}{Syl}
\DeclareMathOperator{\Typ}{Typ}
\DeclareMathOperator{\LC}{LC}
\DeclareMathOperator{\Quot}{Quot}
\DeclareMathOperator{\w}{w}
%%%%%% C
\DeclareMathOperator{\cl}{cl} % Closure
\DeclareMathOperator{\cond}{cond} % ?
\DeclareMathOperator{\curl}{curl} % Curl/Rotation
%%%%%% D
\DeclareMathOperator{\diam}{diam} % Diameter of a set
\undef\div
\DeclareMathOperator{\div}{div} % Divergence
\DeclareMathOperator{\diag}{diag} % Diagonal matrix
\DeclareMathOperator{\D}{d} % ?
%%%%%% E
\DeclareMathOperator{\Eig}{Eig} % Eigenspace
\DeclareMathOperator{\End}{End} % Endomorphism
\DeclareMathOperator{\ext}{ext} % Exterior
\DeclareMathOperator{\Ext}{ext} % 2nd time?
%%%%%% F
\DeclareMathOperator{\Fix}{Fix} % Fix-points for group theory
%%%%%% G
\DeclareMathOperator{\ggT}{ggT} % Größte geimeinsamer Teiler (gcd)
\DeclareMathOperator{\GL}{GL} % General Linear Group
\DeclareMathOperator{\graph}{graph} % Graph
\DeclareMathOperator{\grad}{grad} % Gradient
%%%%%% H
\DeclareMathOperator{\Hom}{Hom} % family of Homomorphisms
\DeclareMathOperator{\Hau}{Hau} % Hauptraum (generalized Eigenspace)
%%%%%% IJK
\DeclareMathOperator{\Image}{Im} % Image
\DeclareMathOperator{\Inn}{Inn} % Inner autommorphism
\DeclareMathOperator{\inn}{int} % Set of inner points
\DeclareMathOperator{\Int}{int} % 2nd time
\DeclareMathOperator{\id}{id} % Identity
\DeclareMathOperator{\kgV}{kgV} % kleinste gemeinsamer Teiler (lcd)
%%%%%% LMN
\DeclareMathOperator{\LC}{LC} % Leitkoeffizient
\DeclareMathOperator{\Mat}{Mat} % Matrix
%%%%%% OPQ
\DeclareMathOperator{\ord}{ord} % Order of a group
\DeclareMathOperator{\Orth}{O} % Orthogonal Group
\DeclareMathOperator{\Out}{Out} % Outer Automorphism
\DeclareMathOperator{\pr}{pr} % Projection
\DeclareMathOperator{\Quot}{Quot} % Quotient
%%%%%% R
\DeclareMathOperator{\rd}{rd} % ?
\DeclareMathOperator{\rk}{rk} % Rank of a matrix
\DeclareMathOperator{\rot}{rot} % Rotation (Curl)
\DeclareMathOperator{\rang}{rang} % Matrix Rank
%%%%%% S
\DeclareMathOperator{\sgn}{sgn} % Signum function
\DeclareMathOperator{\SL}{SL} % Special Linear Group
\DeclareMathOperator{\SO}{SO} % Special Orthogonal Group
\DeclareMathOperator{\Span}{span} % Span
\DeclareMathOperator{\SR}{SR} % Spaltenraum
\DeclareMathOperator{\Stab}{Stab} % Stabilizer
\DeclareMathOperator{\supp}{supp} % Support
\DeclareMathOperator{\Syl}{Syl} % Sylow Group
\DeclareMathOperator{\Sym}{Sym} % Symmetric Group
\DeclareMathOperator{\SU}{SU} % Special Unitary Group
%%%%%% T
\DeclareMathOperator{\tr}{tr} % Trace operator
\DeclareMathOperator{\Typ}{Typ} % Type
%%%%%% UVW
\DeclareMathOperator{\UG}{UG} % Untergruppe (subgroup)
\DeclareMathOperator{\Uni}{U} % Unitary Group
\DeclareMathOperator{\w}{w} % ?
%%%%%% XYZ
\DeclareMathOperator{\ZR}{ZR} % Zeilenraum
%\DeclareMathOperator{\Z}{Z} % ?
% WHITESPACE COMMANDS
\newcommand{\nl}{\\[\baselineskip]} % wordwrap with empty Zeile line after w/o underfull-hbox-warning
\newcommand{\enter}{$ $\newline} % wordwrap w/o condition
\newcommand\tab[1][1cm]{\hspace*{#1}} % practical Tabulator
\endinput

View file

@ -361,6 +361,14 @@
outerlinewidth=0.1pt,
]{repetition}{Wiederholung}
\newmdtheoremenv[%
hidealllines=true,%
frametitlefont=\normalfont\bfseries\color{black},%
innerleftmargin=0pt,%
skipabove=5pt,%
innerleftmargin=10pt,%
]{hint}{\hspace*{-10pt}$\blacktriangleright$\hspace*{\dimexpr 10pt - \blacktrianglewidth\relax}Hinweis}
%various unnumbered thereoms and environment (usually included in theorems like above)
%To get rid of the parentheses, a new theorem style is neccessary (definition of nonumberbreak from ntheorem.sty)
%to achieve the underlining, this needed to put in the theoremstyle definition