Browse Source

Adding cleaned up version of the thesis

master
Renata Kopecna 3 years ago
parent
commit
dd3c89bca4
  1. 37
      .gitignore
  2. 87
      Appendix/appendix_0.tex
  3. 105
      Appendix/appendix_1.tex
  4. 57
      Appendix/appendix_2.tex
  5. 69
      Appendix/appendix_3.tex
  6. 64
      Appendix/appendix_4.tex
  7. 61
      Appendix/appendix_5.tex
  8. 123
      Chapters/Acceptance/acceptance.tex
  9. 48
      Chapters/Acceptance/app_angProj_Run1.tex
  10. 73
      Chapters/Acceptance/app_angProj_Run2.tex
  11. 204
      Chapters/AnalysisIntroduction/anaIntro.tex
  12. 285
      Chapters/AnalysisTheory/anaTheroy.tex
  13. 25
      Chapters/Conclusions/conclusions.tex
  14. 50
      Chapters/EventSelection/Backgrounds.tex
  15. 102
      Chapters/EventSelection/Cut.tex
  16. 77
      Chapters/EventSelection/Efficiency.tex
  17. 21
      Chapters/EventSelection/FitsInQ2.tex
  18. 56
      Chapters/EventSelection/L0Efficiency.tex
  19. 205
      Chapters/EventSelection/MVA.tex
  20. 44
      Chapters/EventSelection/MultCand.tex
  21. 55
      Chapters/EventSelection/Signal.tex
  22. 73
      Chapters/EventSelection/Simulation.tex
  23. 125
      Chapters/EventSelection/TrigStrip.tex
  24. 104
      Chapters/EventSelection/eventSelection.tex
  25. 21
      Chapters/Introduction/intro.tex
  26. 156
      Chapters/LHCbExperiment/lhcb.tex
  27. 9
      Chapters/ParameterMeasurement/MainFit_Ref_FinalToys_Run12.tex
  28. 9
      Chapters/ParameterMeasurement/MainFit_Ref_Run12.tex
  29. 9
      Chapters/ParameterMeasurement/MainFit_Ref_folding0_Run12.tex
  30. 9
      Chapters/ParameterMeasurement/MainFit_Ref_folding1_Run12.tex
  31. 9
      Chapters/ParameterMeasurement/MainFit_Ref_folding2_Run12.tex
  32. 9
      Chapters/ParameterMeasurement/MainFit_Ref_folding3_Run12.tex
  33. 9
      Chapters/ParameterMeasurement/MainFit_Ref_folding4_Run12.tex
  34. 270
      Chapters/ParameterMeasurement/parameterMeasurement.tex
  35. 5
      Chapters/Results/results.tex
  36. 293
      Chapters/StandardModel/standardModel.tex
  37. 26
      Chapters/Toys/jobs/614.tex
  38. 32
      Chapters/Toys/jobs/pull_table_631_col.tex
  39. 48
      Chapters/Toys/jobs/pull_table_632_fld.tex
  40. 48
      Chapters/Toys/jobs/pull_table_632_fld_col.tex
  41. 22
      Chapters/Toys/jobs/pull_table_633.tex
  42. 22
      Chapters/Toys/jobs/pull_table_634.tex
  43. 22
      Chapters/Toys/jobs/pull_table_635.tex
  44. 22
      Chapters/Toys/jobs/pull_table_636.tex
  45. 32
      Chapters/Toys/jobs/pull_table_643_col.tex
  46. 48
      Chapters/Toys/jobs/pull_table_644_fld_col.tex
  47. 22
      Chapters/Toys/jobs/pull_table_645.tex
  48. 22
      Chapters/Toys/jobs/pull_table_646.tex
  49. 22
      Chapters/Toys/jobs/pull_table_647.tex
  50. 22
      Chapters/Toys/jobs/pull_table_648.tex
  51. 263
      Chapters/Toys/toys.tex
  52. 85
      Chapters/TrackEff/TrackCalib.tex
  53. 187
      Chapters/TrackEff/measurement.tex
  54. 128
      Chapters/TrackEff/production.tex
  55. 64
      Chapters/TrackEff/results.tex
  56. 41
      Chapters/TrackEff/systematics.tex
  57. 12
      Chapters/TrackEff/trackEff.tex
  58. 19
      Chapters/Uncertanities/uncertanities.tex
  59. 94
      Chapters/Validation/refFit.tex
  60. 131
      Chapters/Validation/validation.tex
  61. BIN
      Feynman/BuToKstJpsi.pdf
  62. 49
      Feynman/BuToKstJpsi.tex
  63. BIN
      Feynman/BuToKstPhi.pdf
  64. 56
      Feynman/BuToKstPhi.tex
  65. BIN
      Feynman/Documentation.pdf
  66. BIN
      Feynman/GenericBox.pdf
  67. 17
      Feynman/GenericBox.tex
  68. BIN
      Feynman/Kaon_box.pdf
  69. 27
      Feynman/Kaon_box.tex
  70. BIN
      Feynman/Kaon_box_cropped.pdf
  71. 9
      Feynman/Kaon_box_diagram.t1
  72. BIN
      Feynman/Kaon_penguin1.pdf
  73. 29
      Feynman/Kaon_penguin1.tex
  74. BIN
      Feynman/Kaon_penguin2.pdf
  75. 27
      Feynman/Kaon_penguin2.tex
  76. BIN
      Feynman/O1_a.pdf
  77. 32
      Feynman/O1_a.tex
  78. BIN
      Feynman/O1_b.pdf
  79. 35
      Feynman/O1_b.tex
  80. BIN
      Feynman/O3_6.pdf
  81. 41
      Feynman/O3_6.tex
  82. BIN
      Feynman/bsll_DM.pdf
  83. 35
      Feynman/bsll_DM.tex
  84. BIN
      Feynman/bsll_Zprime.pdf
  85. 33
      Feynman/bsll_Zprime.tex
  86. BIN
      Feynman/bsll_box.pdf
  87. 38
      Feynman/bsll_box.tex
  88. BIN
      Feynman/bsll_eff.pdf
  89. 35
      Feynman/bsll_eff.tex
  90. BIN
      Feynman/bsll_eff_meson.pdf
  91. 65
      Feynman/bsll_eff_meson.tex
  92. BIN
      Feynman/bsll_eff_meson_charm.pdf
  93. 80
      Feynman/bsll_eff_meson_charm.tex
  94. BIN
      Feynman/bsll_leptoquark.pdf
  95. 30
      Feynman/bsll_leptoquark.tex
  96. BIN
      Feynman/bsll_penguin.pdf
  97. 42
      Feynman/bsll_penguin.tex
  98. BIN
      Feynman/bsmumu_eff.pdf
  99. 35
      Feynman/bsmumu_eff.tex
  100. 51
      Feynman/make_plot.sh

37
.gitignore

@ -0,0 +1,37 @@
# Directories with David's thesis
AnaNote/
DavidsThesis/
#Directories with other sources
Articles/
# Atomatically created latex files
*eps-converted-to.pdf
*.aux
*.log
*.nav
*.out
*.snm
*.toc
*.nlo
*.bbl
*.lot
*.lof
*.blg
*.tdo
*.nls
*.ilg
*.mp
# Other files
*.root
# Compressed files
*.gz
#Actual pdf
dizertacka.pdf
#Annoying files
*(busy*
figures/TrackEff/*/*.C

87
Appendix/appendix_0.tex

@ -0,0 +1,87 @@
%==========================================
%
% Appendices related to the theory
%
%==========================================
\section[Theoretical introduction to the \texorpdfstring{${\BuToKstmmBF}$}{BuToKstmumu} decay]
{Theoretical introduction to the \texorpdfstring{\BuToKstmmBF}{BuToKstmumu} decay}
\label{app:ANA-Theo}
\subsection{Decay rate}
The full form of \refEq{decay_rate} with explicitly stated $f_i$ is a rather lengthy \refEq{decay_rate_full}:
%%
\begin{align}\label{eq:decay_rate_full}\begin{split}
\frac{\deriv^4\Gamma}{\deriv\cos\thetak\deriv\cos\thetak\deriv\phi\deriv q^2} = \frac{9}{32\pi}&\sum_i{ J_i\left(q^2\right)f_i\left(\cos\thetal,\cos\thetak,\phi\right)} =\\
= \frac{9}{32\pi}& \left\{\right. J_{1s} \sin^2\thetak \\
&+J_{1c} \cos^2\thetak \\
&+J_{2s} \sin^2\thetak \cos 2\thetal \\
&+J_{2c} \cos^2\thetak \cos 2\thetal \\
&+J_{3} \sin^2\thetak \sin^2\thetal \cos 2\phi \\
&+J_{4} \sin 2\thetak \sin 2\thetal \cos \phi \\
&+J_{5} \sin 2\thetak \sin \thetal \cos \phi \\
&+J_{6s} \sin^2\thetak \cos \thetal \\
&+J_{7} \sin 2\thetak \sin \thetal \sin \phi \\
&+J_{8} \sin 2\thetak \sin 2\thetal \sin \phi \\
&+J_{9} \sin^2\thetak \sin^2\thetal \sin 2\phi \left.\right\}\,. \\
\end{split}\end{align}
%
This lengthy formula can be simplified by neglecting the muon mass. This is a good assumptions for $\qsq\gtrsim1\gevgev$. Under this assumption, the following relations can be obtained:
\begin{align}\begin{split}
J_1^c &= 1-\frac{4}{3}J_1^s\,,\\
J_2^s &= \frac{1}{3}J_1^s\,,\\
J_2^c &= \frac{4}{3}J_1^s-1\,.\\
\end{split}\end{align}
In some cases it is also convenient to define \emph{CP-asymmetric} angular observables $A_i$ besides the usual $S_i$ variables (see \refEq{Si_definition})
\begin{equation}\label{eq:Ai_definition}
A_i = \frac{J_i-\bar{J_i}}{\Gamma+\bar{\Gamma}}\,.
\end{equation}
\clearpage
Rewriting the \refEq{decay_rate_full} using the \emph{CP-symmetric} $S_i$ variables results in the following formula:
%
\begin{align}\label{eq:decay_rate2}\begin{split}
&\left.
\frac{{\rm d}(\Gamma+\bar{\Gamma})}{{\rm dcos}\thetal\,{\rm dcos}\thetak\,{\rm d}\phi}
\right
|_{\rm P} = \tfrac{9}{32\pi}\Bigl[\tfrac{3}{4} (1-\textcolor{red}{F_{\rm L}})\sin^2\thetak\\
&+ \textcolor{red}{F_{\rm L}}\cos^2\thetak + \tfrac{1}{4}(1-\textcolor{red}{F_{\rm L}})\sin^2\thetak\cos 2\thetal\nonumber\\
&- \textcolor{red}{F_{\rm L}} \cos^2\thetak\cos 2\thetal + \textcolor{red}{S_3}\sin^2\thetak \sin^2\thetal \cos 2\phi\nonumber\\
&+ \textcolor{red}{S_4} \sin 2\thetak \sin 2\thetal \cos\phi + \textcolor{red}{S_5}\sin 2\thetak \sin \thetal \cos \phi\nonumber\\
&+ \tfrac{4}{3} \textcolor{red}{A_{\rm FB}} \sin^2\thetak \cos\thetal + \textcolor{red}{S_7} \sin 2\thetak \sin\thetal \sin\phi\nonumber\\
&+ \textcolor{red}{S_8} \sin 2\thetak \sin 2\thetal \sin\phi + \textcolor{red}{S_9}\sin^2\thetak \sin^2\thetal \sin 2\phi \nonumber
\Bigr]
\end{split}\end{align}
\subsection{\swave decay rate}
The decay rate of the \swave is
\begin{equation}\label{eq:decay_rate_S_app}
\left.
\frac{{\rm d}(\Gamma+\bar{\Gamma})}{{\rm dcos}\theta_L\,{\rm dcos}\theta_K\,{\rm d}\phi}
\right |_{\rm S}
=\frac{3}{16\pi}{F_S}\sin^2\theta_L\,.
\end{equation}
%
The \pwave and \swave interference term can be parameterized as follows:
%
\begin{equation}
\label{eq:decay_rate_PS_full}
\begin{aligned}
\left.
\frac{{\rm d}(\Gamma+\bar{\Gamma})}{{\rm dcos}\theta_L\,{\rm dcos}\theta_K\,{\rm d}\phi}
\right |_{\rm PS}
=\frac{3}{16\pi}\left[\right.
&{S_{S1}}\sin^2\theta_L\cos\theta_K \\
+& {S_{S2}}\sin2\theta_L \sin \theta_K \cos\phi\\
+& {S_{S3}}\sin\theta_L \sin \theta_K \cos\phi \\
+& {S_{S4}}\sin\theta_L \sin \theta_K \sin\phi \\
+& {S_{S5}}\sin2\theta_L \sin \theta_K \sin\phi
\left.\right]\,.
\end{aligned}
\end{equation}
%
These two terms need to be added to the full measured decay rate. The full angular description of the decay is then in \refEq{decay_rate_final}.
\clearpage

105
Appendix/appendix_1.tex

@ -0,0 +1,105 @@
%==========================================
%
% Appendices related to the selection
%
%==========================================
\section{Event selection}
\subsection{Crystal Ball function}\label{app:CrystalBall}
The Crystal Ball function is a probability density function widely used to model processes with losses \cite{APP-CB}.
It consists of a gaussian core and one power-law low end tail, that describes the loss, typically from the final state radiation. The function got its name from the Crystal Ball collaboration \cite{APP-CBCollab}.
The experiment was placed at the SPEAR accelerator at SLAC National Laboratory and designed as a spark chamber surrounded by an almost complete sphere (covering 98\% of the solid angle) made of scintillating crystals. Therefore, the detector got its prophetic name. The detector is operating until today.
It is located in Mainz, placed at the MAMI microtron \cite{APP-CBMainz}.
The Crystal Ball function is then defined as
\begin{equation}
\mathcal{P}(x;\alpha,n,\bar x,\sigma) = N \cdot
\begin{cases}
\exp(- \frac{(x - \bar x)^2}{2 \sigma^2}), & \mbox{for }\frac{x - \bar x}{\sigma} > -\alpha \\
A \cdot (B - \frac{x - \bar x}{\sigma})^{-n} & \mbox{for }\frac{x - \bar x}{\sigma} \leqslant -\alpha
\end{cases}\,,
\end{equation}
where $A$ and $\alpha$ and $n$ describe the tail, $\mu$ and $\sigma$ are the mean and the width of the peak. $N$ is a normalization factor, $A$ and $B$ are constants defined as:
\begin{align}
\begin{split}
A &= \left(\frac{n}{\left| \alpha \right|}\right)^n \cdot \exp\left(- \frac {\left| \alpha \right|^2}{2}\right)\,,\\
B &= \frac{n}{\left| \alpha \right|} - \left| \alpha \right|\,.\\
%N &= \frac{1}{\sigma (C + D)}\,,\\
%C &= \frac{n}{\left| \alpha \right|} \cdot \frac{1}{n-1} \cdot \exp\left(- \frac {\left| \alpha \right|^2}{2}\right)\,,\\
%D &= \sqrt{\frac{\pi}{2}} \left(1 + \operatorname{erf}\left(\frac{\left| \alpha \right|}{\sqrt 2}\right)\right)\,.
\end{split}
\end{align}
%
%The $\operatorname{erf}$ is Gauss error function defined as
%
%\begin{equation}
% erf(z) =\frac{2}{\sqrt\pi}\int_0^z e^{-t^2}\,dt\,.
%\end{equation}
\subsubsection{Double sided Crystal Ball function}
The Crystal Ball function can be extended to contain a gaussian core and two power-law low end tails.
The double sided Crystal Ball function is then defined as
%
\begin{align}
\mathcal{P}_{CB}(x; x_{peak}, \sigma, n_1, n_2, \alpha_1, \alpha_2) = N \cdot
\begin{cases}
A_1\cdot(B_1 - \frac{x-x_{peak}}{\sigma})^{-n_1} & $for $ \frac{x - x_{peak}}{\sigma} \leq -\alpha_1
\vspace*{0.3cm}\\
\exp(\frac{-(x - x_{peak})^{2}}{2\sigma^{2}}) & $for $ -\alpha_1 \leq \frac{x - x_{peak}}{\sigma} \leq \alpha_2
\vspace*{0.3cm}\\
A_2\cdot(B_2 - \frac{x-x_{peak}}{\sigma})^{-n_2} & $for $ \alpha_2 \leq \frac{x - x_{peak}}{\sigma}
\end{cases}
\,,
\end{align}
%
$N$ is a normalization factor, $A_{1,2}$ and $B_{1,2}$ are constants defined as:
\begin{align}
\begin{split}
A_{1,2}= & ( \frac{n_{1,2}}{\abs{n_{1,2}}} )^{n_{1,2}} \cdot \exp(\pm\frac{\alpha_{1,2}^{2}}{2}),
\vspace*{0.3cm}\\
B_{1,2}= & \frac{n_{1,2}}{\abs{\alpha_{1,2}}} - \abs{\alpha_{1,2}}.
\end{split}
\end{align}
\subsection{ExpGaus function}\label{app:ExpGaus}
ExpGaus function is a function used to describe partially reconstructed backgrounds in \B~meson decays. The definition is in \refEq{App-ExpGaus}. The $\mu$ denotes the mean of the distribution, $\sigma$ is the variance of the function, D is a constant representing the decay of the B meson.
%
\begin{align}\label{eq:App-ExpGaus}
f_{EG}(x) =
\begin{cases}
\exp\left(-\frac{\mu-D}{\sigma^2} \left(x-D\right)\right) \exp\left(-\frac{1}{2}\left(\frac{x-\mu}{\sigma}\right)^2\right) & \text{if } x \leq D\\
\exp\left(-\frac{1}{2}\left(\frac{x-\mu}{\sigma}\right)^2\right) & \text{otherwise}
\end{cases}
\,.
\end{align}
%\clearpage
%\subsection{Correction to the simulation}\label{app:SimulationCorrection}
%\todo[inline]{\piz pseudorapidity resolution: data does not agree with MC, see talk from 2018\_05\_14}
%
%\clearpage
\subsection{Reweighted distributions of parameters used for the MLP training}\label{app:CompareVariables}
\input{./figures/fig_CompareVariables}
%\input{./figures/fig_CompareVariables_sig}
%\clearpage
%\subsection{\lone trigger efficiency}\label{app:L0Eff}
%\input{Chapters/EventSelection/L0Efficiency}
%
\clearpage
\subsection[Signal yield in bins of the dimuon invariant mass squared]{Signal yield in bins of the dimuon invariant mass squared}\label{app:yield_q2}
\input{Chapters/EventSelection/FitsInQ2}
\clearpage

57
Appendix/appendix_2.tex

@ -0,0 +1,57 @@
%==========================================
%
% Appendices related to angular corrections
%
%==========================================
\vspace*{-2.cm}\section{Angular parametrization}\label{app:AngCorr}
\vspace{-1.0cm}
\begin{figure}[hb!]
\centering\vspace{-5pt}
\includegraphics[width=0.30\textwidth]{./Angular/projections/ctkeff0_KplusPi0Resolved_Run1.eps}
\includegraphics[width=0.30\textwidth]{./Angular/projections/ctkeff1_KplusPi0Resolved_Run1.eps}
\includegraphics[width=0.30\textwidth]{./Angular/projections/ctkeff2_KplusPi0Resolved_Run1.eps}\\ \vspace{-1pt}
\includegraphics[width=0.30\textwidth]{./Angular/projections/ctkeff3_KplusPi0Resolved_Run1.eps}
\includegraphics[width=0.30\textwidth]{./Angular/projections/ctkeff4_KplusPi0Resolved_Run1.eps}
\includegraphics[width=0.30\textwidth]{./Angular/projections/ctkeff5_KplusPi0Resolved_Run1.eps}\\ \vspace{-1pt}
\includegraphics[width=0.30\textwidth]{./Angular/projections/ctkeff6_KplusPi0Resolved_Run1.eps}
\includegraphics[width=0.30\textwidth]{./Angular/projections/ctkeff7_KplusPi0Resolved_Run1.eps}
\includegraphics[width=0.30\textwidth]{./Angular/projections/ctkeff8_KplusPi0Resolved_Run1.eps}\\ \vspace{-1pt}
\includegraphics[width=0.30\textwidth]{./Angular/projections/ctkeff9_KplusPi0Resolved_Run1.eps}
\includegraphics[width=0.30\textwidth]{./Angular/projections/ctkeff10_KplusPi0Resolved_Run1.eps}
\includegraphics[width=0.30\textwidth]{./Angular/projections/ctkeff11_KplusPi0Resolved_Run1.eps}\\ \vspace{-1pt}
\includegraphics[width=0.30\textwidth]{./Angular/projections/ctkeff12_KplusPi0Resolved_Run1.eps}
\includegraphics[width=0.30\textwidth]{./Angular/projections/ctkeff13_KplusPi0Resolved_Run1.eps}
\includegraphics[width=0.30\textwidth]{./Angular/projections/ctkeff14_KplusPi0Resolved_Run1.eps}\\ \vspace{-1pt}
\includegraphics[width=0.30\textwidth]{./Angular/projections/ctkeff15_KplusPi0Resolved_Run1.eps}
\includegraphics[width=0.30\textwidth]{./Angular/projections/ctkeff16_KplusPi0Resolved_Run1.eps}
\includegraphics[width=0.30\textwidth]{./Angular/projections/ctkeff17_KplusPi0Resolved_Run1.eps}\\ \vspace{-5pt}
\captionof{figure}[Angular acceptance parametrization projections in \ctk for \runI.]{One-dimensional projections of the angular acceptance in the dimension of \ctk in 18 bins of \qsq. The data points are \runI PHSP MC sample, the solid curve is the four dimensional Legendre-polynomial parametrization. \label{fig:app_angProj_ctk_Run1}}
\end{figure}
\clearpage
\include{Chapters/Acceptance/app_angProj_Run1}
\include{Chapters/Acceptance/app_angProj_Run2}
%Both Legendre and Chebyshev are classical orthogonal polynomials
%\begin{equation}\label{eq:legendre}
%\int_{-1}^{1}P_m(x)P_n(x)dx =0 \quad\text{if\,} n \neq m\,.
%\end{equation}
%\begin{equation}\label{eq:OGlegendre}
%P_0(x)=1\,,\\
%P_1(x)=x\,,\\
%\left(n+1\right)P_{n+1}(x) = \left(2n+1\right)xP_{n}(x)-nP_{n-1}(x)\,.
%\end{equation}
%
%\begin{equation}\label{eq:OGcebysev}
%T_0(x)=1\,,\\
%T_1(x)=x\,,\\
%T_{n+1}(x) = 2xT_{n}(x)-T_{n-1}(x)\,.
%\end{equation}
%
%p value: greater than 0.05 $\Rightarrow$ believe the variables are independent
%\begin{equation}\label{eq:chi2}
%\chisq = \left[ \frac{(O-E)^2}{E}\right]\,\quad\text{O=observed, E = expected}
%\end{equation}
%

69
Appendix/appendix_3.tex

@ -0,0 +1,69 @@
\subsection{Validation of the generation of the pseudoexperiments}\label{app:toy-valid}
\begin{figure}[hbt!]
\centering
\includegraphics[width=0.32\textwidth]{FCNC/Toys/Ref/6/ctk_toyfit__6__JpsiFit_OnlySignal_1BIN_bin0_Run12_SimultaneousFit_folding0_HighBmass_AllPDFs.eps}
\includegraphics[width=0.32\textwidth]{FCNC/Toys/Ref/6/ctl_toyfit__6__JpsiFit_OnlySignal_1BIN_bin0_Run12_SimultaneousFit_folding0_HighBmass_AllPDFs.eps}
\includegraphics[width=0.32\textwidth]{FCNC/Toys/Ref/6/phi_toyfit__6__JpsiFit_OnlySignal_1BIN_bin0_Run12_SimultaneousFit_folding0_HighBmass_AllPDFs.eps}\\
\includegraphics[width=0.32\textwidth]{FCNC/Toys/Ref/6/ctk_toyfit__6__JpsiFit_OnlySignal_1BIN_bin0_Run12_SimultaneousFit_folding1_HighBmass_AllPDFs.eps}
\includegraphics[width=0.32\textwidth]{FCNC/Toys/Ref/6/ctl_toyfit__6__JpsiFit_OnlySignal_1BIN_bin0_Run12_SimultaneousFit_folding1_HighBmass_AllPDFs.eps}
\includegraphics[width=0.32\textwidth]{FCNC/Toys/Ref/6/phi_toyfit__6__JpsiFit_OnlySignal_1BIN_bin0_Run12_SimultaneousFit_folding1_HighBmass_AllPDFs.eps}\\
\includegraphics[width=0.32\textwidth]{FCNC/Toys/Ref/6/ctk_toyfit__6__JpsiFit_OnlySignal_1BIN_bin0_Run12_SimultaneousFit_folding2_HighBmass_AllPDFs.eps}
\includegraphics[width=0.32\textwidth]{FCNC/Toys/Ref/6/ctl_toyfit__6__JpsiFit_OnlySignal_1BIN_bin0_Run12_SimultaneousFit_folding2_HighBmass_AllPDFs.eps}
\includegraphics[width=0.32\textwidth]{FCNC/Toys/Ref/6/phi_toyfit__6__JpsiFit_OnlySignal_1BIN_bin0_Run12_SimultaneousFit_folding2_HighBmass_AllPDFs.eps}\\
\includegraphics[width=0.32\textwidth]{FCNC/Toys/Ref/6/ctk_toyfit__6__JpsiFit_OnlySignal_1BIN_bin0_Run12_SimultaneousFit_folding3_HighBmass_AllPDFs.eps}
\includegraphics[width=0.32\textwidth]{FCNC/Toys/Ref/6/ctl_toyfit__6__JpsiFit_OnlySignal_1BIN_bin0_Run12_SimultaneousFit_folding3_HighBmass_AllPDFs.eps}
\includegraphics[width=0.32\textwidth]{FCNC/Toys/Ref/6/phi_toyfit__6__JpsiFit_OnlySignal_1BIN_bin0_Run12_SimultaneousFit_folding3_HighBmass_AllPDFs.eps}\\
\includegraphics[width=0.32\textwidth]{FCNC/Toys/Ref/6/ctk_toyfit__6__JpsiFit_OnlySignal_1BIN_bin0_Run12_SimultaneousFit_folding4_HighBmass_AllPDFs.eps}
\includegraphics[width=0.32\textwidth]{FCNC/Toys/Ref/6/ctl_toyfit__6__JpsiFit_OnlySignal_1BIN_bin0_Run12_SimultaneousFit_folding4_HighBmass_AllPDFs.eps}
\includegraphics[width=0.32\textwidth]{FCNC/Toys/Ref/6/phi_toyfit__6__JpsiFit_OnlySignal_1BIN_bin0_Run12_SimultaneousFit_folding4_HighBmass_AllPDFs.eps}\\
\captionof{figure}[]{Fit with applied folding to 52\,000 pseudoexperiments with only the signal component. The black points represent the generated pseudoexperiments, the black line represents the fitted model. The blue space represents the signal component. The green dashed line shows only the \pwave component, the orange dotted line represents the \swave components and the dot-and-dash line depicts the interference between the \pwave and the \swave.}\label{fig:toy-sigOnly-Ref-fold}
\end{figure}
\begin{textblock*}{23cm}(1.12\textwidth,5.8cm) % {block width} (coords)
\rotatebox{-90}{\centering Folding 0 \hspace{1.85cm} Folding 1 \hspace{1.85cm} Folding 2 \hspace{1.85cm} Folding 3 \hspace{1.85cm} Folding 4}
\end{textblock*}
%\begin{figure}[hbt!]
% \centering
% \includegraphics[width=0.32\textwidth]{FCNC/Toys/Ref/6/m_toyfit__6__JpsiFit_OnlySignal_1BIN_bin0_Run12_SimultaneousFit_folding0_HighBmass_AllPDFs.eps}
% \includegraphics[width=0.32\textwidth]{FCNC/Toys/Ref/6/mkpi_toyfit__6__JpsiFit_OnlySignal_1BIN_bin0_Run12_SimultaneousFit_folding0_HighBmass_AllPDFs.eps}\\
% \includegraphics[width=0.32\textwidth]{FCNC/Toys/Ref/6/m_toyfit__6__JpsiFit_OnlySignal_1BIN_bin0_Run12_SimultaneousFit_folding1_HighBmass_AllPDFs.eps}
% \includegraphics[width=0.32\textwidth]{FCNC/Toys/Ref/6/mkpi_toyfit__6__JpsiFit_OnlySignal_1BIN_bin0_Run12_SimultaneousFit_folding1_HighBmass_AllPDFs.eps}\\
% \includegraphics[width=0.32\textwidth]{FCNC/Toys/Ref/6/m_toyfit__6__JpsiFit_OnlySignal_1BIN_bin0_Run12_SimultaneousFit_folding2_HighBmass_AllPDFs.eps}
% \includegraphics[width=0.32\textwidth]{FCNC/Toys/Ref/6/mkpi_toyfit__6__JpsiFit_OnlySignal_1BIN_bin0_Run12_SimultaneousFit_folding3_HighBmass_AllPDFs.eps}\\
% \includegraphics[width=0.32\textwidth]{FCNC/Toys/Ref/6/m_toyfit__6__JpsiFit_OnlySignal_1BIN_bin0_Run12_SimultaneousFit_folding3_HighBmass_AllPDFs.eps}
% \includegraphics[width=0.32\textwidth]{FCNC/Toys/Ref/6/mkpi_toyfit__6__JpsiFit_OnlySignal_1BIN_bin0_Run12_SimultaneousFit_folding4_HighBmass_AllPDFs.eps}\\
% \includegraphics[width=0.32\textwidth]{FCNC/Toys/Ref/6/m_toyfit__6__JpsiFit_OnlySignal_1BIN_bin0_Run12_SimultaneousFit_folding4_HighBmass_AllPDFs.eps}
% \includegraphics[width=0.32\textwidth]{FCNC/Toys/Ref/6/mkpi_toyfit__6__JpsiFit_OnlySignal_1BIN_bin0_Run12_SimultaneousFit_folding4_HighBmass_AllPDFs.eps}\\
%\end{figure}
\begin{figure}[hbt!]
\centering
\includegraphics[width=0.32\textwidth]{FCNC/Toys/Bkg/7/ctk_toyfit__7__JpsiFit_OnlyBackground_1BIN_bin0_Run12_SimultaneousFit_folding0_HighBmass_AllPDFs.eps}
\includegraphics[width=0.32\textwidth]{FCNC/Toys/Bkg/7/ctl_toyfit__7__JpsiFit_OnlyBackground_1BIN_bin0_Run12_SimultaneousFit_folding0_HighBmass_AllPDFs.eps}
\includegraphics[width=0.32\textwidth]{FCNC/Toys/Bkg/7/phi_toyfit__7__JpsiFit_OnlyBackground_1BIN_bin0_Run12_SimultaneousFit_folding0_HighBmass_AllPDFs.eps}\\
\includegraphics[width=0.32\textwidth]{FCNC/Toys/Bkg/7/ctk_toyfit__7__JpsiFit_OnlyBackground_1BIN_bin0_Run12_SimultaneousFit_folding1_HighBmass_AllPDFs.eps}
\includegraphics[width=0.32\textwidth]{FCNC/Toys/Bkg/7/ctl_toyfit__7__JpsiFit_OnlyBackground_1BIN_bin0_Run12_SimultaneousFit_folding1_HighBmass_AllPDFs.eps}
\includegraphics[width=0.32\textwidth]{FCNC/Toys/Bkg/7/phi_toyfit__7__JpsiFit_OnlyBackground_1BIN_bin0_Run12_SimultaneousFit_folding1_HighBmass_AllPDFs.eps}\\
\includegraphics[width=0.32\textwidth]{FCNC/Toys/Bkg/7/ctk_toyfit__7__JpsiFit_OnlyBackground_1BIN_bin0_Run12_SimultaneousFit_folding2_HighBmass_AllPDFs.eps}
\includegraphics[width=0.32\textwidth]{FCNC/Toys/Bkg/7/ctl_toyfit__7__JpsiFit_OnlyBackground_1BIN_bin0_Run12_SimultaneousFit_folding2_HighBmass_AllPDFs.eps}
\includegraphics[width=0.32\textwidth]{FCNC/Toys/Bkg/7/phi_toyfit__7__JpsiFit_OnlyBackground_1BIN_bin0_Run12_SimultaneousFit_folding2_HighBmass_AllPDFs.eps}\\
\includegraphics[width=0.32\textwidth]{FCNC/Toys/Bkg/7/ctk_toyfit__7__JpsiFit_OnlyBackground_1BIN_bin0_Run12_SimultaneousFit_folding3_HighBmass_AllPDFs.eps}
\includegraphics[width=0.32\textwidth]{FCNC/Toys/Bkg/7/ctl_toyfit__7__JpsiFit_OnlyBackground_1BIN_bin0_Run12_SimultaneousFit_folding3_HighBmass_AllPDFs.eps}
\includegraphics[width=0.32\textwidth]{FCNC/Toys/Bkg/7/phi_toyfit__7__JpsiFit_OnlyBackground_1BIN_bin0_Run12_SimultaneousFit_folding3_HighBmass_AllPDFs.eps}\\
\includegraphics[width=0.32\textwidth]{FCNC/Toys/Bkg/7/ctk_toyfit__7__JpsiFit_OnlyBackground_1BIN_bin0_Run12_SimultaneousFit_folding4_HighBmass_AllPDFs.eps}
\includegraphics[width=0.32\textwidth]{FCNC/Toys/Bkg/7/ctl_toyfit__7__JpsiFit_OnlyBackground_1BIN_bin0_Run12_SimultaneousFit_folding4_HighBmass_AllPDFs.eps}
\includegraphics[width=0.32\textwidth]{FCNC/Toys/Bkg/7/phi_toyfit__7__JpsiFit_OnlyBackground_1BIN_bin0_Run12_SimultaneousFit_folding4_HighBmass_AllPDFs.eps}\\
\captionof{figure}[]{Fit with applied folding to 13\,000 pseudoexperiments with only the background component. The black points represent the generated pseudoexperiments, the black line represents the fitted model. The red space represents the background component.}\label{fig:toy-bkgOnly-Ref-fold}
\end{figure}
\begin{textblock*}{23cm}(1.06\textwidth,4.2cm) % {block width} (coords)
\rotatebox{-90}{\centering Folding 0 \hspace{1.85cm} Folding 1 \hspace{1.85cm} Folding 2 \hspace{1.85cm} Folding 3 \hspace{1.85cm} Folding 4}
\end{textblock*}

64
Appendix/appendix_4.tex

@ -0,0 +1,64 @@
\section[Pseudoexperiments]{Pseudoexperiments}\label{app:toys}
\subsection{Large scale pseudoexperiments}\label{app:toys-ref}
\vspace{-0.6cm}
%Folding 0
\begin{figure}[hbt!]
\centering
\includegraphics[width=0.32\textwidth]{FCNC/Toys/626/Fl_bin0_Pulls.eps}
\includegraphics[width=0.32\textwidth]{FCNC/Toys/626/S3_bin0_Pulls.eps}
\includegraphics[width=0.32\textwidth]{FCNC/Toys/626/Afb_bin0_Pulls.eps}\\
\includegraphics[width=0.32\textwidth]{FCNC/Toys/626/S9_bin0_Pulls.eps}
\includegraphics[width=0.32\textwidth]{FCNC/Toys/626/FS_bin0_Pulls.eps}
\includegraphics[width=0.32\textwidth]{FCNC/Toys/626/SS1_bin0_Pulls.eps}
\captionof{figure}[Pull distributions of the \pwave angular moments in reference-like pseudoexperiments with folding 0 applied.]{Pull distributions of the \pwave angular parameters. 500 pseudoexperimentsare generated, mimicking the refrence \BuToKstJpsi decay. Each pseudoexperiment consists of 65\,000 pseudoevents. In the fit to the pseudoexperiments, folding 0 is applied. Note the small range of the x-axis: seemingly large shift of the mean value is in the order of units of percent.}\label{fig:app-toys-ref-fld0}
\end{figure}
\vspace{-0.4cm}
%-----------------------------------------
%Folding 1
\begin{figure}[hbt!]
\centering
\includegraphics[width=0.32\textwidth]{FCNC/Toys/627/Fl_bin0_Pulls.eps}
\includegraphics[width=0.32\textwidth]{FCNC/Toys/627/S3_bin0_Pulls.eps}
\includegraphics[width=0.32\textwidth]{FCNC/Toys/627/S4_bin0_Pulls.eps}\\
\includegraphics[width=0.32\textwidth]{FCNC/Toys/627/FS_bin0_Pulls.eps}
\includegraphics[width=0.32\textwidth]{FCNC/Toys/627/SS1_bin0_Pulls.eps}
\includegraphics[width=0.32\textwidth]{FCNC/Toys/627/SS2_bin0_Pulls.eps}
\captionof{figure}[Pull distributions of the \pwave angular moments in reference-like pseudoexperiments with folding 1 applied.]{Pull distributions of the \pwave angular parameters. 500 pseudoexperimentsare generated, mimicking the refrence \BuToKstJpsi decay. Each pseudoexperiment consists of 65\,000 pseudoevents. In the fit to the pseudoexperiments, folding 1 is applied. Note the small range of the x-axis: seemingly large shift of the mean value is in the order of units of percent.}\label{fig:app-toys-ref-fld1}
\end{figure}
%-----------------------------------------
%Folding 2
\begin{figure}[hbt!]
\centering
\includegraphics[width=0.32\textwidth]{FCNC/Toys/628/Fl_bin0_Pulls.eps}
\includegraphics[width=0.32\textwidth]{FCNC/Toys/628/S3_bin0_Pulls.eps}
\includegraphics[width=0.32\textwidth]{FCNC/Toys/628/S5_bin0_Pulls.eps}\\
\includegraphics[width=0.32\textwidth]{FCNC/Toys/628/FS_bin0_Pulls.eps}
\includegraphics[width=0.32\textwidth]{FCNC/Toys/628/SS1_bin0_Pulls.eps}
\includegraphics[width=0.32\textwidth]{FCNC/Toys/628/SS3_bin0_Pulls.eps}
\captionof{figure}[Pull distributions of the \pwave angular moments in reference-like pseudoexperiments with folding 2 applied.]{Pull distributions of the \pwave angular parameters. 500 pseudoexperimentsare generated, mimicking the refrence \BuToKstJpsi decay. Each pseudoexperiment consists of 65\,000 pseudoevents. In the fit to the pseudoexperiments, folding 2 is applied. Note the small range of the x-axis: seemingly large shift of the mean value is in the order of units of percent.}\label{fig:app-toys-ref-fld2}
\end{figure}
%-----------------------------------------
%Folding 3
\begin{figure}[hbt!]
\centering
\includegraphics[width=0.32\textwidth]{FCNC/Toys/629/Fl_bin0_Pulls.eps}
\includegraphics[width=0.32\textwidth]{FCNC/Toys/629/S3_bin0_Pulls.eps}
\includegraphics[width=0.32\textwidth]{FCNC/Toys/629/S7_bin0_Pulls.eps}\\
\includegraphics[width=0.32\textwidth]{FCNC/Toys/629/FS_bin0_Pulls.eps}
\includegraphics[width=0.32\textwidth]{FCNC/Toys/629/SS1_bin0_Pulls.eps}
\includegraphics[width=0.32\textwidth]{FCNC/Toys/629/SS4_bin0_Pulls.eps}
\captionof{figure}[Pull distributions of the \pwave angular moments in reference-like pseudoexperiments with folding 3 applied.]{Pull distributions of the \pwave angular parameters. 500 pseudoexperimentsare generated, mimicking the refrence \BuToKstJpsi decay. Each pseudoexperiment consists of 65\,000 pseudoevents. In the fit to the pseudoexperiments, folding 3 is applied. Note the small range of the x-axis: seemingly large shift of the mean value is in the order of units of percent.}\label{fig:app-toys-ref-fld3}
\end{figure}
%-----------------------------------------
%Folding 4
\begin{figure}[hbt!]
\centering
\includegraphics[width=0.32\textwidth]{FCNC/Toys/650/Fl_bin0_Pulls.eps}
\includegraphics[width=0.32\textwidth]{FCNC/Toys/650/S3_bin0_Pulls.eps}
\includegraphics[width=0.32\textwidth]{FCNC/Toys/650/S8_bin0_Pulls.eps}\\
\includegraphics[width=0.32\textwidth]{FCNC/Toys/650/SS4_bin0_Pulls.eps}
\captionof{figure}[Pull distributions of the \pwave angular moments in reference-like pseudoexperiments with folding 4 applied.]{Pull distributions of the \pwave angular parameters. 500 pseudoexperimentsare generated, mimicking the refrence \BuToKstJpsi decay. Each pseudoexperiment consists of 65\,000 pseudoevents. In the fit to the pseudoexperiments, folding 4 is applied.}\label{fig:app-toys-ref-fld4}
\end{figure}
%-----------------------------------------

61
Appendix/appendix_5.tex

@ -0,0 +1,61 @@
\subsection{Realistic scale pseudoexperiments}\label{app:toys-rare}
%No folding
\begin{figure}[hbt!]
\centering
\includegraphics[width=0.32\textwidth]{FCNC/Toys/631/Fl_bin0_Pulls.eps}
\includegraphics[width=0.32\textwidth]{FCNC/Toys/631/S3_bin0_Pulls.eps}
\includegraphics[width=0.32\textwidth]{FCNC/Toys/631/S4_bin0_Pulls.eps}\\
\includegraphics[width=0.32\textwidth]{FCNC/Toys/631/S4_bin0_Pulls.eps}
\includegraphics[width=0.32\textwidth]{FCNC/Toys/631/Afb_bin0_Pulls.eps}
\includegraphics[width=0.32\textwidth]{FCNC/Toys/631/S7_bin0_Pulls.eps}\\
\includegraphics[width=0.32\textwidth]{FCNC/Toys/631/S8_bin0_Pulls.eps}
\includegraphics[width=0.32\textwidth]{FCNC/Toys/631/S9_bin0_Pulls.eps}
\captionof{figure}[Pull distributions of the \pwave angular moments in rare-like pseudoexperiments.]{Pull distributions of the \pwave angular parameters. 500 pseudoexperimentsare generated, mimicking the rare \BuToKstmm decay. Each pseudoexperiment consists of 871 pseudoevents distributed among \qsq bins. Full angular fit is performed. Note the small range of the x-axis: seemingly large shift of the mean value is in the order of units of percent.}\label{fig:app-toys-sig}
\end{figure}
%-----------------------------------------
\begin{figure}[hbt!]
\centering
\includegraphics[width=0.32\textwidth]{FCNC/Toys/632/Fl_bin0_Pulls.eps}
\includegraphics[width=0.32\textwidth]{FCNC/Toys/632/S3_bin0_Pulls.eps}
\includegraphics[width=0.32\textwidth]{FCNC/Toys/632/Afb_bin0_Pulls.eps}\\
\includegraphics[width=0.32\textwidth]{FCNC/Toys/632/S9_bin0_Pulls.eps}
\captionof{figure}[Pull distributions of the \pwave angular moments in rare-like pseudoexperiments with folding 0 applied.]{Pull distributions of the \pwave angular parameters. 500 pseudoexperimentsare generated, mimicking the rare \BuToKstmm decay. Each pseudoexperiment consists of 871 pseudoevents distributed among \qsq bins. In the fit to the pseudoexperiments, folding 0 is applied. Note the small range of the x-axis: seemingly large shift of the mean value is in the order of units of percent.}\label{fig:app-toys-sig-fld0}
\end{figure}
%-----------------------------------------
%Folding 1
\begin{figure}[hbt!]
\centering
\includegraphics[width=0.32\textwidth]{FCNC/Toys/633/Fl_bin0_Pulls.eps}
\includegraphics[width=0.32\textwidth]{FCNC/Toys/633/S3_bin0_Pulls.eps}
\includegraphics[width=0.32\textwidth]{FCNC/Toys/633/S4_bin0_Pulls.eps}\\
\captionof{figure}[Pull distributions of the \pwave angular moments in rare-like pseudoexperiments with folding 1 applied.]{Pull distributions of the \pwave angular parameters. 500 pseudoexperimentsare generated, mimicking the rare \BuToKstJpsi decay. Each pseudoexperiment consists of 871 pseudoevents distributed among \qsq bins. In the fit to the pseudoexperiments, folding 1 is applied. Note the small range of the x-axis: seemingly large shift of the mean value is in the order of units of percent.}\label{fig:app-toys-sig-fld1}
\end{figure}
%-----------------------------------------
%Folding 2
\begin{figure}[hbt!]
\centering
\includegraphics[width=0.32\textwidth]{FCNC/Toys/634/Fl_bin0_Pulls.eps}
\includegraphics[width=0.32\textwidth]{FCNC/Toys/634/S3_bin0_Pulls.eps}
\includegraphics[width=0.32\textwidth]{FCNC/Toys/634/S5_bin0_Pulls.eps}\\
\captionof{figure}[Pull distributions of the \pwave angular moments in rare-like pseudoexperiments with folding 2 applied.]{Pull distributions of the \pwave angular parameters. 500 pseudoexperimentsare generated, mimicking the rare \BuToKstmm decay. Each pseudoexperiment consists of 871 pseudoevents distributed among \qsq bins. In the fit to the pseudoexperiments, folding 2 is applied. Note the small range of the x-axis: seemingly large shift of the mean value is in the order of units of percent.}\label{fig:app-toys-sig-fld2}
\end{figure}
%-----------------------------------------
%Folding 3
\begin{figure}[hbt!]
\centering
\includegraphics[width=0.32\textwidth]{FCNC/Toys/635/Fl_bin0_Pulls.eps}
\includegraphics[width=0.32\textwidth]{FCNC/Toys/635/S3_bin0_Pulls.eps}
\includegraphics[width=0.32\textwidth]{FCNC/Toys/635/S7_bin0_Pulls.eps}\\
\captionof{figure}[Pull distributions of the \pwave angular moments in rare-like pseudoexperiments with folding 3 applied.]{Pull distributions of the \pwave angular parameters. 500 pseudoexperimentsare generated, mimicking the rare \BuToKstmm decay. Each pseudoexperiment consists of 871 pseudoevents distributed among \qsq bins. In the fit to the pseudoexperiments, folding 3 is applied. Note the small range of the x-axis: seemingly large shift of the mean value is in the order of units of percent.}\label{fig:app-toys-sig-fld3}
\end{figure}
%-----------------------------------------
%Folding 4
\begin{figure}[hbt!]
\centering
\includegraphics[width=0.32\textwidth]{FCNC/Toys/636/Fl_bin0_Pulls.eps}
\includegraphics[width=0.32\textwidth]{FCNC/Toys/636/S3_bin0_Pulls.eps}
\includegraphics[width=0.32\textwidth]{FCNC/Toys/636/S8_bin0_Pulls.eps}\\
\captionof{figure}[Pull distributions of the \pwave angular moments in rare-like pseudoexperiments with folding 4 applied.]{Pull distributions of the \pwave angular parameters. 500 pseudoexperimentsare generated, mimicking the rare \BuToKstmm decay. Each pseudoexperiment consists of 871 pseudoevents distributed among \qsq bins. In the fit to the pseudoexperiments, folding 4 is applied.}\label{fig:app-toys-sig-fld4}
\end{figure}
%-----------------------------------------

123
Chapters/Acceptance/acceptance.tex

@ -0,0 +1,123 @@
\section{Acceptance effects}\label{sec:Accept}
An accurate measurement of the angular distribution of the \BuToKstmm decay in different \qsq regions requires a correction of the acceptance effects. The non-flat angular acceptance is given by the geometry of the \lhcb detector and by the event selection. A dedicated simulation sample is used to ascertain this effect. In this simulation sample, the events are generated with a pure phase-space (PHSP) coupling in the decay chain. Therefore, this set of simulated events is per construction generated with flat distributions in the 4D space of \angles and \qsq. %I will not mention that we have to correct extra for q2, leave this to the ana note
The PHSP simulation sample is used to validate that the event selection does not strongly bias the \angles distributions. Then, the PHSP simulation is corrected to match the data in the same way as described in \refSec{sel-SimulationCorrection}. Last step is the 4D-parametrization of \angles and \qsq distributions, resulting in weights applied to the data.
\subsection{Simulation with homogeneous phase-space distribution}\label{sec:Accept-PHSP}
In order to precisely describe the acceptance, a PHSP simulation sample is generated for each data-taking year. The number of events passing the event selection are summarized in \refTab{PHSP_events}.
The trigger, central and cut-based selections are heavily influenced by the detector geometry. A dedicated cross-check is done to verify the multi-variate selection does not depend on the decay angles. This is validated by establishing the efficiency of the MLP $\varepsilon_{MLP}$ in a same way as described in \refSec{sel-Efficiency}.
The MLP efficiency $\varepsilon_{MLP}$ in dependence on \qsq, \ctk, \ctl and $\phi$ is shown in \refFig{eff_MLP_angles}. In the top row, $\varepsilon_{MLP} = \varepsilon_{MLP}(\qsq)$ is shown. Careful reader will notice a small dip at $\sim3\gevgev$. This is purely caused by the detectors acceptance, similarly to the \emph{roof}-like trend in $\varepsilon_{MLP} = \varepsilon_{MLP}(\ctl)$. A large effect is visible in the MLP efficiency in the very-high \ctk region. As \ctk is proportional to the asymmetry between the momentum of \Kstarp meson decay products $\frac{p_{\Kp}-p_{\piz}}{p_{\Kp}+p_{\piz}}\simeq \ctk$, events with very low \piz momentum are more affected by background contributions and hence the efficiency drops at \ctk$\sim1$. %The MLP efficiency dependent on \ctl and $\phi$ is flat.
\begin{table}[hbt!]
\centering
\begin{tabular}{p{2cm}|cccccc}
year & 2011 & 2012 & 2015 & 2016 & 2017 & 2018 \\ \hline
events & 6965 & 15836 & 8431 & 28631 & 31589 & 36307\\
\end{tabular}
\captionof{table}[Number of PHSP signal candidates passing the full selection.]{Number of PHSP signal candidates passing the full selection. For the number of generated PHSP events, see \refTab{ANA-MCsamples}. \label{tab:PHSP_events}} %\runI: 23403, \runII: 112584
\end{table}
\begin{figure}[hbt!] \vspace{-10pt}
\centering
\includegraphics[width=0.43\textwidth]{./Data/Efficiencies/TMVA/PHSP/Run1/KplusPi0Resolved_PHSP_IDTM_rndGamma_weighted_BDT_AloneOnly_Efficiency_Run1_q2_binned.eps}
\includegraphics[width=0.43\textwidth]{./Data/Efficiencies/TMVA/PHSP/Run2/KplusPi0Resolved_PHSP_IDTM_rndGamma_weighted_BDT_AloneOnly_Efficiency_Run2_q2_binned.eps}
\includegraphics[width=0.43\textwidth]{./Data/Efficiencies/TMVA/PHSP/Run1/KplusPi0Resolved_PHSP_IDTM_rndGamma_weighted_BDT_AloneOnly_Efficiency_Run1_phi.eps}
\includegraphics[width=0.43\textwidth]{./Data/Efficiencies/TMVA/PHSP/Run2/KplusPi0Resolved_PHSP_IDTM_rndGamma_weighted_BDT_AloneOnly_Efficiency_Run2_phi.eps}
\includegraphics[width=0.43\textwidth]{./Data/Efficiencies/TMVA/PHSP/Run1/KplusPi0Resolved_PHSP_IDTM_rndGamma_weighted_BDT_AloneOnly_Efficiency_Run1_thetak_equal.eps}
\includegraphics[width=0.43\textwidth]{./Data/Efficiencies/TMVA/PHSP/Run2/KplusPi0Resolved_PHSP_IDTM_rndGamma_weighted_BDT_AloneOnly_Efficiency_Run2_thetak_equal.eps}
\includegraphics[width=0.43\textwidth]{./Data/Efficiencies/TMVA/PHSP/Run1/KplusPi0Resolved_PHSP_IDTM_rndGamma_weighted_BDT_AloneOnly_Efficiency_Run1_thetal_equal.eps}
\includegraphics[width=0.43\textwidth]{./Data/Efficiencies/TMVA/PHSP/Run2/KplusPi0Resolved_PHSP_IDTM_rndGamma_weighted_BDT_AloneOnly_Efficiency_Run2_thetal_equal.eps}
\captionof{figure}[MLP efficiency versus \angleDist and \qsq from PHSP simulation.]{MLP efficiency per Run obtained from PHSP simulation sample as a function of \angleDist and \qsq. The dip of $\varepsilon_{MLP}$ at $\ctk\sim1$ is caused by events with very low neutral pion momentum $p_\piz$.\label{fig:eff_MLP_angles}}
\end{figure}
\clearpage
\subsection{Parametrization of the angular acceptance}\label{sec:Accept-parametrizaiton}
In order to obtain the correction weights, the PHSP sample, flat in \angles and \qsq, are parametrized using a Legendre polynomial~\cite{FIT-legendre}. As the four observables do not factorize, the polynomial takes the form of
%
\begin{equation}\label{eq:legendre_eff}
\epsilon(\ctl, \ctk, \phi, \qsq) = \sum_{l,m,n,o} c_{lmno} \times P_l(\qsq) \times P_m(\ctl) \times P_n(\ctk) \times P_o(\phi) \,,
\end{equation}
%
%\begin{align}\label{eq:legendre_eff}
% \epsilon(\ctl, \ctk, \phi, \qsq) & = \sum_{l,m,n,o} c_{lmno} \times P_l(\qsq) \times P_m(\ctl) \times P_n(\ctk) \times P_o(\phi) \\
% & = \sum_{h,i,j,k} c_{hijk} \times \left(\qsq\right)^h \times \left( \ctl \right)^i \times \left( \ctk \right)^j \times \left( \phi \right)^k \,,
%\end{align}
%
where $P_{l,m,n,o}$ are Legendre polynomials of orders $l, m, n$ and $o$. The maximal order of the polynomial is chosen in a way that the polynomial describes the acceptance well while preventing picking-up statistical fluctuations in the PHSP simulation sample. Moreover, the parametrization in $\phi$ is forced to be symmetric. The possible asymmetry in the $\phi$ distribution is smeared out by integrating over \Bu and \Bub meson decays as well as the reversal of polarity of the bending magnet.
The parametrization is obtained individually for each Run. The maximal order of the polynomial is optimized using a \chisq-goodness of the parametrization (see \refFig{app_chisq}) and visual inspection of the projections (see \refFig{angProj_Run1}, \refFig{angProj_Run2} and \refApp{AngCorr}). It is clear from \refFig{app_chisq} that there is no clear best maximal order of the polynomial. This should be taken into account as a systematic uncertainty and it is discussed later in \refSec{toy-sig}. The order of the Legendre polynomial describing the PHSP simulation sample well is found to be six in \ctk, three in \ctl, flat in $\phi$ and seven in \qsq. The higher order of the \ctk polynomial is caused by the very low acceptance in the high \ctk region arising from the high background contribution in the low \piz momentum region. The acceptance at very high \ctk is essentially zero. This leads to huge weights destabilizing the angular fit later on. For this reason, the \ctk range is limited to $[-1.0,0.8]$. The \ctk range is further limited in the case of applying folding 4 defined in \refSec{ANA_folding}: in order to be able to fold in the \ctk dimension, only candidates with \ctk$\in[-0.8,0.8]$ are considered.
The final form of the Legendre polynomial takes the form of
\begin{equation}\label{eq:legendre_eff_final}
\epsilon(\ctk, \ctl, \phi, \qsq) = \sum_{l=1}^6\sum_{m=1}^3\sum_{o=1}^7 c_{lmno} \times P_l(\ctk) \times P_m(\ctl) \times P_1(\phi) \times P_o(\qsq) \,.
\end{equation}
Finally, in order to correct the data for the angular acceptance, each event is weighted with the weight $w$
\begin{equation}\label{eq:angularAcc_weight}
w(\ctl, \ctk, \phi, \qsq) = \frac{1}{\epsilon(\ctl, \ctk, \phi, \qsq)}\,.
\end{equation}
\begin{figure}[hbt!]
\centering
\includegraphics[width=0.48\textwidth]{./Angular/scan/ScanChi2MaxOrderPolynomRun1.eps}
\includegraphics[width=0.48\textwidth]{./Angular/scan/ScanChi2MaxOrderPolynomRun2.eps}
\captionof{figure}[Angular acceptance parametrization \chisq-goodness scan.]{Angular acceptance parametrization \chisq-goodness scan for \runI (left) and \runII (right). The numbers on the axis correspond to the applied order of the Legendre polynomial for the given variables. Note that the \chisq values for each of the parametrization are very close to each other: there is no preference of the order of the polynomial in the orders considered here. \label{fig:app_chisq}}
\end{figure}
\begin{figure}[hbt!]
\centering
\includegraphics[width=0.45\textwidth]{./Angular/projections/ctkeff_KplusPi0Resolved_Run1.eps}
\includegraphics[width=0.45\textwidth]{./Angular/projections/ctleff_KplusPi0Resolved_Run1.eps}\\
\includegraphics[width=0.45\textwidth]{./Angular/projections/phieff_KplusPi0Resolved_Run1.eps}
\includegraphics[width=0.45\textwidth]{./Angular/projections/q2eff_KplusPi0Resolved_Run1.eps}
\captionof{figure}[Angular acceptance parametrization projections for \runI.]{One-dimensional projections of the angular acceptance. The data points are \runI PHSP simulation, the solid curve is the four dimensional Legendre-polynomial parametrization described by \refEq{legendre_eff_final}. \label{fig:angProj_Run1}}
\end{figure}
\begin{figure}[hbt!]
\centering
\includegraphics[width=0.45\textwidth]{./Angular/projections/ctkeff_KplusPi0Resolved_Run2.eps}
\includegraphics[width=0.45\textwidth]{./Angular/projections/ctleff_KplusPi0Resolved_Run2.eps}\\
\includegraphics[width=0.45\textwidth]{./Angular/projections/phieff_KplusPi0Resolved_Run2.eps}
\includegraphics[width=0.45\textwidth]{./Angular/projections/q2eff_KplusPi0Resolved_Run2.eps}
\captionof{figure}[Angular acceptance parametrization projections for \runII.]{One-dimensional projections of the angular acceptance. The data points are \runII PHSP simulation, the solid curve is the four dimensional Legendre-polynomial parametrization described by \refEq{legendre_eff_final}. \label{fig:angProj_Run2}}
\end{figure}
%\subsubsection{Angular resolution}\label{sec:Accept-resolution}
%\begin{figure}[hbt!]
% \centering
% \includegraphics[width=0.19\textwidth]{./Angular/resolution/Reso_ctk_2011.eps}
% \includegraphics[width=0.19\textwidth]{./Angular/resolution/Reso_ctk_2012.eps}
% \includegraphics[width=0.19\textwidth]{./Angular/resolution/Reso_ctk_2016.eps}
% \includegraphics[width=0.19\textwidth]{./Angular/resolution/Reso_ctk_2017.eps}
% \includegraphics[width=0.19\textwidth]{./Angular/resolution/Reso_ctk_2018.eps}\\
% \includegraphics[width=0.19\textwidth]{./Angular/resolution/Reso_ctl_2011.eps}
% \includegraphics[width=0.19\textwidth]{./Angular/resolution/Reso_ctl_2012.eps}
% \includegraphics[width=0.19\textwidth]{./Angular/resolution/Reso_ctl_2016.eps}
% \includegraphics[width=0.19\textwidth]{./Angular/resolution/Reso_ctl_2017.eps}
% \includegraphics[width=0.19\textwidth]{./Angular/resolution/Reso_ctl_2018.eps}\\
% \includegraphics[width=0.19\textwidth]{./Angular/resolution/Reso_phi_2011.eps}
% \includegraphics[width=0.19\textwidth]{./Angular/resolution/Reso_phi_2012.eps}
% \includegraphics[width=0.19\textwidth]{./Angular/resolution/Reso_phi_2016.eps}
% \includegraphics[width=0.19\textwidth]{./Angular/resolution/Reso_phi_2017.eps}
% \includegraphics[width=0.19\textwidth]{./Angular/resolution/Reso_phi_2018.eps}\\
% \captionof{figure}[Angular resolution per year.]{Angular resolution in \angles for each year. \label{fig:angProj_Run2}}
%\end{figure}
\clearpage

48
Chapters/Acceptance/app_angProj_Run1.tex

@ -0,0 +1,48 @@
\begin{figure}[hbt!]
\centering
\includegraphics[width=0.30\textwidth]{./Angular/projections/ctleff0_KplusPi0Resolved_Run1.eps}
\includegraphics[width=0.30\textwidth]{./Angular/projections/ctleff1_KplusPi0Resolved_Run1.eps}
\includegraphics[width=0.30\textwidth]{./Angular/projections/ctleff2_KplusPi0Resolved_Run1.eps}\\
\includegraphics[width=0.30\textwidth]{./Angular/projections/ctleff3_KplusPi0Resolved_Run1.eps}
\includegraphics[width=0.30\textwidth]{./Angular/projections/ctleff4_KplusPi0Resolved_Run1.eps}
\includegraphics[width=0.30\textwidth]{./Angular/projections/ctleff5_KplusPi0Resolved_Run1.eps}\\
\includegraphics[width=0.30\textwidth]{./Angular/projections/ctleff6_KplusPi0Resolved_Run1.eps}
\includegraphics[width=0.30\textwidth]{./Angular/projections/ctleff7_KplusPi0Resolved_Run1.eps}
\includegraphics[width=0.30\textwidth]{./Angular/projections/ctleff8_KplusPi0Resolved_Run1.eps}\\
\includegraphics[width=0.30\textwidth]{./Angular/projections/ctleff9_KplusPi0Resolved_Run1.eps}
\includegraphics[width=0.30\textwidth]{./Angular/projections/ctleff10_KplusPi0Resolved_Run1.eps}
\includegraphics[width=0.30\textwidth]{./Angular/projections/ctleff11_KplusPi0Resolved_Run1.eps}\\
\includegraphics[width=0.30\textwidth]{./Angular/projections/ctleff12_KplusPi0Resolved_Run1.eps}
\includegraphics[width=0.30\textwidth]{./Angular/projections/ctleff13_KplusPi0Resolved_Run1.eps}
\includegraphics[width=0.30\textwidth]{./Angular/projections/ctleff14_KplusPi0Resolved_Run1.eps}\\
\includegraphics[width=0.30\textwidth]{./Angular/projections/ctleff15_KplusPi0Resolved_Run1.eps}
\includegraphics[width=0.30\textwidth]{./Angular/projections/ctleff16_KplusPi0Resolved_Run1.eps}
\includegraphics[width=0.30\textwidth]{./Angular/projections/ctleff17_KplusPi0Resolved_Run1.eps}\\
\captionof{figure}[Angular acceptance parametrization projections in \ctl for \runI.]{One-dimensional projections of the angular acceptance in the dimension of \ctl in 18 bins of \qsq. The data points are \runI PHSP MC, the solid curve is the four dimensional Legendre-polynomial parametrization. \label{fig:app_angProj_ctl_Run1}}
\end{figure}
\begin{figure}[hbt!]
\centering
\includegraphics[width=0.30\textwidth]{./Angular/projections/phieff0_KplusPi0Resolved_Run1.eps}
\includegraphics[width=0.30\textwidth]{./Angular/projections/phieff1_KplusPi0Resolved_Run1.eps}
\includegraphics[width=0.30\textwidth]{./Angular/projections/phieff2_KplusPi0Resolved_Run1.eps}\\
\includegraphics[width=0.30\textwidth]{./Angular/projections/phieff3_KplusPi0Resolved_Run1.eps}
\includegraphics[width=0.30\textwidth]{./Angular/projections/phieff4_KplusPi0Resolved_Run1.eps}
\includegraphics[width=0.30\textwidth]{./Angular/projections/phieff5_KplusPi0Resolved_Run1.eps}\\
\includegraphics[width=0.30\textwidth]{./Angular/projections/phieff6_KplusPi0Resolved_Run1.eps}
\includegraphics[width=0.30\textwidth]{./Angular/projections/phieff7_KplusPi0Resolved_Run1.eps}
\includegraphics[width=0.30\textwidth]{./Angular/projections/phieff8_KplusPi0Resolved_Run1.eps}\\
\includegraphics[width=0.30\textwidth]{./Angular/projections/phieff9_KplusPi0Resolved_Run1.eps}
\includegraphics[width=0.30\textwidth]{./Angular/projections/phieff10_KplusPi0Resolved_Run1.eps}
\includegraphics[width=0.30\textwidth]{./Angular/projections/phieff11_KplusPi0Resolved_Run1.eps}\\
\includegraphics[width=0.30\textwidth]{./Angular/projections/phieff12_KplusPi0Resolved_Run1.eps}
\includegraphics[width=0.30\textwidth]{./Angular/projections/phieff13_KplusPi0Resolved_Run1.eps}
\includegraphics[width=0.30\textwidth]{./Angular/projections/phieff14_KplusPi0Resolved_Run1.eps}\\
\includegraphics[width=0.30\textwidth]{./Angular/projections/phieff15_KplusPi0Resolved_Run1.eps}
\includegraphics[width=0.30\textwidth]{./Angular/projections/phieff16_KplusPi0Resolved_Run1.eps}
\includegraphics[width=0.30\textwidth]{./Angular/projections/phieff17_KplusPi0Resolved_Run1.eps}\\
\captionof{figure}[Angular acceptance parametrization projections in $\phi$ for \runI.]{One-dimensional projections of the angular acceptance in the dimension of $\phi$ in 18 bins of \qsq. The data points are \runI PHSP MC, the solid curve is the four dimensional Legendre-polynomial parametrization. \label{fig:app_angProj_phi_Run1}}
\end{figure}

73
Chapters/Acceptance/app_angProj_Run2.tex

@ -0,0 +1,73 @@
\begin{figure}[hbt!]
\centering
\includegraphics[width=0.30\textwidth]{./Angular/projections/ctkeff0_KplusPi0Resolved_Run2.eps}
\includegraphics[width=0.30\textwidth]{./Angular/projections/ctkeff1_KplusPi0Resolved_Run2.eps}
\includegraphics[width=0.30\textwidth]{./Angular/projections/ctkeff2_KplusPi0Resolved_Run2.eps}\\
\includegraphics[width=0.30\textwidth]{./Angular/projections/ctkeff3_KplusPi0Resolved_Run2.eps}
\includegraphics[width=0.30\textwidth]{./Angular/projections/ctkeff4_KplusPi0Resolved_Run2.eps}
\includegraphics[width=0.30\textwidth]{./Angular/projections/ctkeff5_KplusPi0Resolved_Run2.eps}\\
\includegraphics[width=0.30\textwidth]{./Angular/projections/ctkeff6_KplusPi0Resolved_Run2.eps}
\includegraphics[width=0.30\textwidth]{./Angular/projections/ctkeff7_KplusPi0Resolved_Run2.eps}
\includegraphics[width=0.30\textwidth]{./Angular/projections/ctkeff8_KplusPi0Resolved_Run2.eps}\\
\includegraphics[width=0.30\textwidth]{./Angular/projections/ctkeff9_KplusPi0Resolved_Run2.eps}
\includegraphics[width=0.30\textwidth]{./Angular/projections/ctkeff10_KplusPi0Resolved_Run2.eps}
\includegraphics[width=0.30\textwidth]{./Angular/projections/ctkeff11_KplusPi0Resolved_Run2.eps}\\
\includegraphics[width=0.30\textwidth]{./Angular/projections/ctkeff12_KplusPi0Resolved_Run2.eps}
\includegraphics[width=0.30\textwidth]{./Angular/projections/ctkeff13_KplusPi0Resolved_Run2.eps}
\includegraphics[width=0.30\textwidth]{./Angular/projections/ctkeff14_KplusPi0Resolved_Run2.eps}\\
\includegraphics[width=0.30\textwidth]{./Angular/projections/ctkeff15_KplusPi0Resolved_Run2.eps}
\includegraphics[width=0.30\textwidth]{./Angular/projections/ctkeff16_KplusPi0Resolved_Run2.eps}
\includegraphics[width=0.30\textwidth]{./Angular/projections/ctkeff17_KplusPi0Resolved_Run2.eps}\\
\captionof{figure}[Angular acceptance parametrization projections in \ctk for \runII.]{One-dimensional projections of the angular acceptance in the dimension of \ctk in 18 bins of \qsq. The data points are \runII PHSP MC, the solid curve is the four dimensional Legendre-polynomial parametrization. \label{fig:app_angProj_ctk_Run2}}
\end{figure}
\begin{figure}[hbt!]
\centering
\includegraphics[width=0.30\textwidth]{./Angular/projections/ctleff0_KplusPi0Resolved_Run2.eps}
\includegraphics[width=0.30\textwidth]{./Angular/projections/ctleff1_KplusPi0Resolved_Run2.eps}
\includegraphics[width=0.30\textwidth]{./Angular/projections/ctleff2_KplusPi0Resolved_Run2.eps}\\
\includegraphics[width=0.30\textwidth]{./Angular/projections/ctleff3_KplusPi0Resolved_Run2.eps}
\includegraphics[width=0.30\textwidth]{./Angular/projections/ctleff4_KplusPi0Resolved_Run2.eps}
\includegraphics[width=0.30\textwidth]{./Angular/projections/ctleff5_KplusPi0Resolved_Run2.eps}\\
\includegraphics[width=0.30\textwidth]{./Angular/projections/ctleff6_KplusPi0Resolved_Run2.eps}
\includegraphics[width=0.30\textwidth]{./Angular/projections/ctleff7_KplusPi0Resolved_Run2.eps}
\includegraphics[width=0.30\textwidth]{./Angular/projections/ctleff8_KplusPi0Resolved_Run2.eps}\\
\includegraphics[width=0.30\textwidth]{./Angular/projections/ctleff9_KplusPi0Resolved_Run2.eps}
\includegraphics[width=0.30\textwidth]{./Angular/projections/ctleff10_KplusPi0Resolved_Run2.eps}
\includegraphics[width=0.30\textwidth]{./Angular/projections/ctleff11_KplusPi0Resolved_Run2.eps}\\
\includegraphics[width=0.30\textwidth]{./Angular/projections/ctleff12_KplusPi0Resolved_Run2.eps}
\includegraphics[width=0.30\textwidth]{./Angular/projections/ctleff13_KplusPi0Resolved_Run2.eps}
\includegraphics[width=0.30\textwidth]{./Angular/projections/ctleff14_KplusPi0Resolved_Run2.eps}\\
\includegraphics[width=0.30\textwidth]{./Angular/projections/ctleff15_KplusPi0Resolved_Run2.eps}
\includegraphics[width=0.30\textwidth]{./Angular/projections/ctleff16_KplusPi0Resolved_Run2.eps}
\includegraphics[width=0.30\textwidth]{./Angular/projections/ctleff17_KplusPi0Resolved_Run2.eps}\\
\captionof{figure}[Angular acceptance parametrization projections in \ctl for \runII.]{One-dimensional projections of the angular acceptance in the dimension of \ctl in 18 bins of \qsq. The data points are \runII PHSP MC, the solid curve is the four dimensional Legendre-polynomial parametrization. \label{fig:app_angProj_ctl_Run2}}
\end{figure}
\begin{figure}[hbt!]
\centering
\includegraphics[width=0.30\textwidth]{./Angular/projections/phieff0_KplusPi0Resolved_Run2.eps}
\includegraphics[width=0.30\textwidth]{./Angular/projections/phieff1_KplusPi0Resolved_Run2.eps}
\includegraphics[width=0.30\textwidth]{./Angular/projections/phieff2_KplusPi0Resolved_Run2.eps}\\
\includegraphics[width=0.30\textwidth]{./Angular/projections/phieff3_KplusPi0Resolved_Run2.eps}
\includegraphics[width=0.30\textwidth]{./Angular/projections/phieff4_KplusPi0Resolved_Run2.eps}
\includegraphics[width=0.30\textwidth]{./Angular/projections/phieff5_KplusPi0Resolved_Run2.eps}\\
\includegraphics[width=0.30\textwidth]{./Angular/projections/phieff6_KplusPi0Resolved_Run2.eps}
\includegraphics[width=0.30\textwidth]{./Angular/projections/phieff7_KplusPi0Resolved_Run2.eps}
\includegraphics[width=0.30\textwidth]{./Angular/projections/phieff8_KplusPi0Resolved_Run2.eps}\\
\includegraphics[width=0.30\textwidth]{./Angular/projections/phieff9_KplusPi0Resolved_Run2.eps}
\includegraphics[width=0.30\textwidth]{./Angular/projections/phieff10_KplusPi0Resolved_Run2.eps}
\includegraphics[width=0.30\textwidth]{./Angular/projections/phieff11_KplusPi0Resolved_Run2.eps}\\
\includegraphics[width=0.30\textwidth]{./Angular/projections/phieff12_KplusPi0Resolved_Run2.eps}
\includegraphics[width=0.30\textwidth]{./Angular/projections/phieff13_KplusPi0Resolved_Run2.eps}
\includegraphics[width=0.30\textwidth]{./Angular/projections/phieff14_KplusPi0Resolved_Run2.eps}\\
\includegraphics[width=0.30\textwidth]{./Angular/projections/phieff15_KplusPi0Resolved_Run2.eps}
\includegraphics[width=0.30\textwidth]{./Angular/projections/phieff16_KplusPi0Resolved_Run2.eps}
\includegraphics[width=0.30\textwidth]{./Angular/projections/phieff17_KplusPi0Resolved_Run2.eps}\\
\captionof{figure}[Angular acceptance parametrization projections in $\phi$ for \runII.]{One-dimensional projections of the angular acceptance in the dimension of $\phi$ in 18 bins of \qsq. The data points are \runII PHSP, the solid curve is the four dimensional Legendre-polynomial parametrization. \label{fig:app_angProj_phi_Run2}}
\end{figure}

204
Chapters/AnalysisIntroduction/anaIntro.tex

@ -0,0 +1,204 @@
\section{Introduction to the analysis}\label{sec:AnaIntro}
The aim of this thesis is to study angular parameters \Si{i} and \Pprime{i} in the decay of \BuToKstmm where \KstToKpPi. This chapter gives an overview of the analysis procedure. The biggest obstacle in this analysis is the presence of a neutral pion in the final state. Therefore, the reconstruction of the neutral pion is discussed in detail. Then, as discussed thoroughly in \refSec{SM_bsll}, the split of data into different classes based on the dimuon invariant mass squared \qsq is explained. Lastly, the simulation samples used in this analysis are listed.
\subsection{Analysis strategy}\label{sec:AnaIntro-strategy}
The analysis uses data from the full \runI and \runII data taking periods. During this time, the \lhcb experiment collected a dataset corresponding to an integrated luminosity of 9\invfb. The integrated luminosity $\int$\lum, the beam energy \Ebeam and the center-of-mass energy \sqs for each data-taking year is given in \refTab{ANA-dataset}.
In the first step of this analysis the recorded data is filtered to select only events containing the signal decay. The decay of \BuToKstmm is a rare decay with a branching fraction $\BR_{\BuToKstmm}=(9.6\pm1.0)\times10^{-7}$~\cite{PDG}. In order to get the full branching fraction of the \BuToKstmmFull decay, the branching fraction is multiplied by the branching fraction of $\BR_{\KstToKpPi}=0.5$~\cite{PDG}. This leads to very strict requirements on data selection: the background rejection needs to be as high as possible while keeping high signal selection efficiency.
At first, the data is selected centrally: events have to pass the trigger (online) selection and then the centrally-processed selection called \emph{stripping}. After this, specific preselection cuts are applied.
%
\begin{table}[hbt!]
\begin{center}\begin{tabular}{cccc} %\hline
year &$\int$\lum\,[fb\textsuperscript{-1}] &\Ebeam\,[TeV] & \sqs\,[TeV]\\
\hline
2011 &1.11 &3.5 &7.0\\
2012 &2.08 &4.0 &8.0\\
2015 &0.33 &6.5 &13.0\\
2016 &1.67 &6.5 &13.0\\
2017 &1.71 &6.5 &13.0\\
2018 &2.19 &6.5 &13.0\\
%\hline
\end{tabular}\end{center}
\captionof{table}[Data recording conditions for \lhcb in the years 2011-2018.]{
Data recording conditions for \lhcb in the years 2011-2018. For each year, the recorded integrated luminosity $\int$\lum, beam energy \Ebeam and center-of-mass energy $\sqs$ are given. \label{tab:ANA-dataset}
}
\end{table} %\vspace{\baselineskip}
%
The remaining background is studied and identified with the help of multi-variate classifiers. The selection is validated using the \BuToKstJpsi decay.
This \emph{reference} decay has the same final-state particles as the \emph{signal} decay \BuToKstmm and very similar kinematics. Moreover, due to the branching ratio of \BR(\BuToKstJpsi, \JpsiTomm)$=(1.43\times10^{-3})\times(5.96\times10^{-2})=8.52\times10^{-5}$~\cite{PDG}, the reference channel is $\sim$ 200 times more abundant than the signal channel.
After choosing the optimal selection criteria, the angular acceptance correction is applied. The \lhcb acceptance covers only the forward region. Moreover, all the subdetectors cover different regions in phase-space. For the accurate measurement of angles, angular acceptance corrections are crucial.
Lastly, the measured angle distributions are fitted in order to extract the \Si{i} parameters. As one can see from \refEq{decay_rate_final}, the distribution is not trivial and the fit requires careful approach, especially given the limited size of the sample. The fit is done in the reconstructed mass of the \Bu meson \BMass, the reconstructed mass of the \Kstarp meson \KstarMass and \ctk, \ctl and $\phi$ dimensions. The reference channel is used to validate the fitter framework. A pseudoexperiment study is performed in order to examine the possible sensitivity of this measurement.
\subsection{The neutral pion reconstruction}\label{sec:AnaIntro-piz}
It is mentioned in \refSec{ANA_Theo} that the available measurements of the \BuToKstmm decay are not as extensive as the measurements of the \BdToKstmm decay. The main reason is the intrinsic property of all high-energy detectors: the reconstruction of neutral particles is non-trivial and therefore not very effective. As \Kstarz decays to \Kp\pim, the final state contains four charged particles. The detection of those particles is rather simple\footnote{\Kstarz of course also decays to \Kz\piz, which is extremely difficult to reconstruct. Therefore it is typically omitted from the measurements.}. In the case of \Kstarp meson, it either decays into \Kz\pip or \Kp\piz. For \lhcb, the only relevant \Kz meson is \KS meson, as \KL meson is stopped in the \hcal without leaving any signal in the tracking detectors\footnote{Assuming a boost of $\gamma=20$ $(v\simeq0.999c)$, a free-flying \KL would decay after traveling 300\m.}. \KS mesons on the other hand decay fast enough into a \pip\pim pair, so the \lhcb tracking system is able to register both charged pions\footnote{Once again assuming $\gamma=20$, \KS decays after 53\cm. This means depending on the \KS boost, it either decays inside or outside of \velo, leading to more complications in analyzing this subdecay.}.
This analysis focuses on the decay channel where \KstToKpPi. The \Kp meson is detected by the tracking detectors. The \piz meson typically decays into a \g\g pair (branching ratio of \pizTogg is $\simeq$ 98.8\%~\cite{PDG}). Both photons are registered by the electromagnetic calorimeter. Due to the finite granularity of \ecal, the two photons can either be registered by one or two \ecal cells. A sketch of this is shown in \refFig{piz-principle}.
%
\begin{wrapfigure}[11]{r}{0.48\textwidth} \vspace{-0pt}
\includegraphics[width=0.18\textwidth]{./AnalysisIntro/Resolved.png}\hspace{15pt}%\\ \vspace{30pt}
\includegraphics[width=0.18\textwidth]{./AnalysisIntro/Merged.png}
\captionof{figure}[Neutral pion reconstruction illustration.]
{ Illustration of {\resolved} (left) and \merged (right) \piz mesons reconstruction in the \ecal cells. \label{fig:piz-principle}
}
\end{wrapfigure}
%
The neutral pions reconstructed from photons hitting one \ecal cell are called \emph{merged} pions, the ones reconstructed from two cells \emph{resolved} pions.
For the reconstruction of the \piz meson, proper settings of the electromagnetic calorimeter are essential. This is done in three steps: initial adjustment of \ecal energy scale, energy flow calibration and fine calibration of the \ecal cells~\cite{ANA-CALO}. The methods used for calibration are essentially the same in \runI and \runII. The main changes in \runII are the full automation of the calibration process and skipping the intermediate step.
The initial adjustment of the energy scale is done by adjusting the photo-multipliers' (PMTs) gain using the \ecal's LED monitoring system. A LED light is attached to PMTs generating a known signal. The voltage of the PMTs is adjusted to match the measured and the known signal. This adjustment leads to a precision of the cell-to-cell inter-calibration of 10\%. The reason for this uncertainty is the dispersion in the photoelectron yields and the accuracy of the light yield determination. The LED-based calibration is preformed approximately once a week.
Then, the energy flow calibration is performed. This is done in order to smooth the fluctuations in the flux among neighboring cells due to initial miscalibrations. The method is rather simple: one exploits the symmetry of the energy flow of the calorimeter surface~\cite{ANA-CALO-flow}. Simulations with known mis-calibration showed that the flux adjustments improves the calibration by a factor of $\sim3$, assuming an initial precision of the calibration of 10\%.
Aging negatively affects the \ecal's performance and has to be accounted for. This can be nicely seen from the time variation (decrease) of the reconstructed \piz mass presented in \refFig{piz-calibration}. To account for this effect, fine calibration exploiting the \piz mass is performed. The mean $\pizMass^{reco}$ is obtained from \g\g pairs from minimum-bias events\footnote{Minimum-bias events are events with at least one charged track in the VELO detector or the downstream tracking system.} with low multiplicity to remove possible pile-up events. The photons are reconstructed using $3\times3$ clusters with single photon signals, where the cell with the highest energy deposit is called seed. The seeds are then corrected to match the nominal \piz mass. The effect of this correction is depicted in \refFig{piz-calibration}. This calibration is performed every LHC-runnning month.
%
\begin{figure}[hbt!] \centering
\includegraphics[width=0.43\textwidth]{./AnalysisIntro/Fig14a.pdf}\hspace{10pt}
\includegraphics[width=0.46\textwidth]{./AnalysisIntro/PizCalib.pdf}
\captionof{figure}[\ecal calibration using the mass of \piz meson.]
{On the left, fitted neutral pion mass \pizMass as a function of run number (time) using 2011 data. The \piz mass is 135\mev~\cite{PDG}. The clear decrease in the \pizMass value is due to the ECAL ageing. On the right, invariant mass distribution for \pizTogg candidates used for the fine calibration. The red curve corresponds to the distribution before applying the fine calibration, the blue curve is the final \pizMass distribution. Values in the boxes are the mean and width of the signal peak distribution in\mev before (red box) and after (blue box) applying the \pizMass calibration. Taken from~Ref.\,\cite{ANA-piz-reco2}.
\label{fig:piz-calibration}
}
\end{figure}
For this analysis, only resolved \piz mesons are used. Merged \piz mesons tend to have higher momenta (as the higher boost results in more collimated photons). In this work, where the \piz mesons come from a \Kstarp meson, the statistical contribution of these events is low. The \piz mesons originating from the \Kstar have typically transverse momentum of a few gigaelectronvolts. In \refFig{piz-eff} left the higher abundance of resolved \piz mesons at lower momentum is shown. As merged and resolved \piz require their own careful approach, merged \piz mesons are not included in the analysis.
\begin{figure} [hbt!]\centering
\includegraphics[width=0.45\textwidth]{./AnalysisIntro/piz_resolved_merged2.png}
\includegraphics[width=0.45\textwidth]{./AnalysisIntro/piz_eff2.png}
\captionof{figure}[Transverse momentum distribution and reconstruction efficiency of \piz.]
{On the left, transverse momentum distributions of \merged (red) and \resolved (blue) \piz in the \lhcb acceptance originating from \Bd\to\pip\pim\piz decay.
On the right, the overall \merged (dashed line) and \resolved (full line) \piz efficiency (number of identified \piz / number of \piz in detector acceptance with $\pt^{\piz}>200$\mev). The black points represent the overall efficiency for both \resolved and \merged neutral pions. Taken from~Ref.\,\cite{ANA-piz-reco2}.\label{fig:piz-eff}
}
\end{figure}
The mass resolution of a decay including a \piz meson can be seen in \refFig{piz-resolution}. In this case, the \Dz\to\Km\pip\piz candidate mass is reconstructed using resolved and merged pions. It is clearly visible that the mass resolution of the \Dz meson candidate is better for resolved \piz meson.
Moreover, the \ecal resolution effects come into play. The resolution of \ecal is $\sigma/E = 0.1/\sqrt{E}\oplus0.01$, which is a very good resolution for a sampling calorimeter. The advantage is that the resolution \emph{decreases} with \emph{increasing} deposited energy. However, for low-energy photons this does not bring any asset.
To summarize, we expect the neutral pion mass peak to be wide and to be affected by the combinatorial background coming from the \ecal cells. As we focus on resolved \piz, combinatorial background contributions from \g\g combinations are expected. On the other hand, the usage of resolved \piz improved the particle-identification as we have information from two cells: the probability of misidentifying a random photon as a \piz meson is lower. The maximal efficiency to reconstruct resolved $\piz$ mesons is $\sim40$\% at low $\pt^\piz$.
%
\begin{figure}[hbt!] \centering
\includegraphics[width=0.45\textwidth]{./AnalysisIntro/fig27a.pdf} \hspace{15pt}
\includegraphics[width=0.45\textwidth]{./AnalysisIntro/fig27b.pdf}
\captionof{figure}[Mass distribution of the reconstructed \Dz\to\Km\pip\piz candidates.]
{Mass distribution of the reconstructed \Dz\to\Km\pip\piz candidates with resolved \piz (left) and merged \piz (right) obtained from the 2011 data sample. The blue curve corresponds to a fit. The signal component of the fit function (red dashed line) and the background (green dash-dotted line) contributions are shown. One can easily see the mass resolution of the \Dz candidate is much worse for merged \piz. Taken from~Ref.\,\cite{ANA-piz-reco2}.\label{fig:piz-resolution}
}
\end{figure}
\vspace{-\baselineskip}
\subsection{Binning in the dimuon invariant mass}\label{sec:AnaIntro-qsq}
It is explained in \refSec{SM_bsll} that different physics processes dominate in different \qsq (dimuon invariant mass squared) regions. Therefore, a veto in the resonance regions is applied to eliminate \BuToKstJpsi and \BuToKstPsi decay contributions. Moreover, an additional veto to eliminate the rare \BuToKstPhi decays is introduced. All three resonances \jpsi, \psitwos and $\phi$ are indistinguishable from the signal as they have a very short decay time and therefore they are not displaced enough from the \Bu vertex.
The range of the measured \qsq is limited by the rest mass of the two muons value and by the difference of the \Bu and \Kstarp mass squared $(\mBu - \mKstarp)^2=19.25\gevgev$. However, as the \lhcb acceptance at very high \qsq is low, the upper limit in this measurement is set to 18\gevgev.
In this work, \qsq is segmented into four signal regions and the three vetoed regions. As a result of the challenging reconstruction there is a smaller statistical significance in the signal yield compared to the previous analyses~\cite{ANA-LHCb-angular3,ANA-LHCb-angular4,ANA-LHCb-angular1,ANA-LHCb-angular2} and the \qsq segmentation is therefore coarser. These signal regions are larger than the ones chosen in previous analyses. The \qsq bins including the vetoed regions are listed in \refTab{q2-binning}. Furthermore, a wide bin $[1.1,6.0]$ is added on top of the four \qsq bins.
The wide bin is the theoretically favored region to observe New Physics effects and it has been included also in the previous measurements~\cite{ANA-LHCb-angular3,ANA-LHCb-angular4,ANA-LHCb-angular1,ANA-LHCb-angular2}.
\begin{table}[hbt!]
\centering
\begin{tabular}{clc}
bin &\qsq [\gevgev] &veto\\
\hline
1 &[0.1,~ 0.98] &\\
&[0.98, 1.1] &\Pphi\\
1 &[1.1,~ 4.0] &\\
2 &[4.0,~ 8.0] &\\
&[8.0,~ 11.0] &\jpsi\\
3 &[11.0, 12.5] &\\
&[12.5, 15.0] &\psitwos\\
4 &[15.0, 18.0] &\\
5 &[1.1,~ 6.0] &\\
\end{tabular}
\captionof{table}[The binning scheme of the dimuon invariant mass squared \qsq.]
{The binning scheme of the dimuon invariant mass squared \qsq in the angular analysis including the vetoed regions of resonances decaying to \mumu pair. In the first bin, the \Pphi resonance is removed. \label{tab:q2-binning}}
\end{table}
\subsection{Simulation samples}\label{sec:AnaIntro-MC}
In order to study the effects of the detector response and to identify possible backgrounds, several Monte Carlo simulation samples are used in the analysis. The exhaustive list of the MC samples is presented in \refTab{ANA-MCsamples}. The two main samples consist of the signal decay \BuToKstmm and the reference decay \BuToKstJpsi.
In order to study the acceptance of the detector, a PHase SPace (PHSP) Monte Carlo sample is used. This sample neglects the spin structure of the decay, reflecting only the kinematic properties of the decay. This effectively means the distributions of the angles \angles are flat. Moreover, an additional requirement is imposed on the sample: the dimuon invariant mass squared \qsq distribution is generated to be flat. The sample is used to understand the angular acceptance in four dimensions of \angles and \qsq.
Lastly, for the background investigation, an inclusive sample of \BuToXJpsi is used, where $X$ stands for any particle that a \Bu can decay into additionally to the \jpsi meson. This is particularly useful for identifying pollutions from other decays.
\begin{table}[!htb]
\begin{center}\begin{tabular}{ccx{3cm}} %\hline
MC decay, type & Year & Number of generated events per polarity \\ \hline \hline
\BuToKstmm & 2011 & 50\,000\\
Signal channel & 2012 & 50\,000\\
& 2015 & 100\,000\\
& 2016 & 100\,000\\
& 2017 & 115\,000\\
& 2018 & 120\,000\\ \hline
\BuToKstJpsi & 2011 & 100\,000\\
Reference channel & 2012 & 100\,000\\
& 2015 & 100\,000\\
& 2016 & 100\,000\\ \hline
\BuToKstmm & 2011 & 85\,000\\
Phase space & 2012 & 225\,000\\
& 2015 & 95\,000\\
& 2016 & 260\,000\\
& 2017 & 240\,000\\
& 2018 & 290\,000\\ \hline
\BuToXJpsi & 2011 & 250\,000\\
Inclusive sample& 2012 & 250\,000\\
& 2016 & 500\,000
%\hline
\end{tabular}\end{center}
\captionof{table}[Monte Carlo simulation samples used in this work.]{
Monte Carlo simulation samples used in this work. The bending magnet polarity is regurarly flipped during the data taking (see \refSec{det_tracking_vertexing}). Therefore, two samples, one for each polarity configuration, are produced. \label{tab:ANA-MCsamples}
}
\end{table}
%
%\begin{table}[!htb]
% \begin{center}\begin{tabular}{ccccc} %\hline
% MC decay, type & Year & SimVer & Number of generated events (Down+up) \\ \hline \hline
% \BuToKstmm & 2011 & Sim09a & 507551 + 502787 \\
% Signal channel & 2012 & Sim09a & 514015 + 500458 \\
% & 2015 & Sim09i & 1033424 + 1027977 \\
% & 2016 & Sim09i & 1045028 + 1013635 \\
% & 2017 & Sim09e & 1151738 + 1153816 \\
% & 2018 & Sim09h & 1235528 + 1196153 \\ \hline
% \BuToKstJpsi & 2011 & Sim09a & 1011831 + 1007920 \\
% Reference channel & 2012 & Sim09a & 1003888 + 1000278 \\
% & 2015 & Sim09e & 1007712 + 1009484 \\
% & 2016 & Sim09e & 1010609 + 1000281 \\ \hline
% \BuToKstmm & 2011 & Sim09f & 85736 + 86004 \\
% Phase space & 2012 & Sim09f & 226933 + 226430 \\
% & 2015 & Sim09h & 95323 + 94980 \\
% & 2016 & Sim09f & 264917 + 267674 \\
% & 2017 & Sim09f & 246457 + 242673 \\
% & 2018 & Sim09f & 291850 + 299252 \\ \hline
% \BuToXJpsi & 2011 & Sim08c & 2508491 + 2514495 \\
% Inclusive sample& 2012 & Sim08a & 2504990 + 2535488 \\
% & 2016 & Sim09b & 5090001 + 6055765\\
%
% %\hline
% \end{tabular}\end{center}
%
% \captionof{table}[Monte Carlo simulaiton samples used in this work.]{
% Monte Carlo simulaiton samples used in this work. \label{tab:ANA-MCsamples}
% }
%\end{table}
\clearpage

285
Chapters/AnalysisTheory/anaTheroy.tex

@ -0,0 +1,285 @@
\section[Theoretical introduction to \texorpdfstring{${\BuToKstmm}$}{BuToKstmm}]
{Theoretical introduction to \texorpdfstring{$\BuToKstmmBF$}{BuToKstmm}}\label{sec:ANA_Theo}
\vspace{-5pt}
The theory of \bsll transitions is generally introduced in \refSec{SM_bsll}. In this section, this general introduction is broadened using the specific example of \BuToKstmm decay. Note that throughout the thesis the inclusion of opposite charged decays (\ie\xspace \BuToKstmmCH) is implied. The vector meson \Kstarp decays further in a \Kp\piz pair. The \Kstarp meson considered in this works is the $\Kstarp(892)$ resonance with natural width of $50.8\pm0.8\mev$~\cite{PDG}, spin 1, and parity -1.
%The \BuToKstmm decay is a FCNC transition. Therefore it can only occur through loop diagrams in the SM. The lowest SM diagrams that contribute to the decay are two penguin diagrams exchanging a photon or a \Z boson and a box diagram exchanging two $\W$ bosons (see \refFig{penguin_bsll}). The amplitudes can be expressed in terms of Wilson coefficients \C7, \C9 and \C10.
The decay of \BuToKstKppizmm was first observed by the BaBar collaboration~\cite{ANA-KstMuMuDecayEvidence}. As the final state of the decay contains neutral particles, which are challenging to reconstruct, the angular analysis including this decay has been so far only performed by BaBar~\cite{ANA-BaBarAngDist} and Belle~\cite{ANA-BelleAngDist}. %Recently, studies of \BuToKstKspimm have been carried out by the LHCb~\cite{ANA-LHCb-angular4}, and CMS~\cite{ANA-CMS-angular} collaborations.
The \BuToKstmm decay is a FCNC transition. Therefore it can only occur through loop diagrams in the SM. The lowest SM diagrams that contribute to the decay are two penguin diagrams exchanging a photon or a \Z boson and a box diagram exchanging two $\W$ bosons, as shown in \refFig{penguin_bsll}). % The amplitudes can be expressed in terms of Wilson coefficients \C7, \C9 and \C10.
Looking back at \refSec{SM_bsll}, the contribution to the effective hamiltonian is polluted by QCD contributions (see \refFig{bsll_meson}). One way to validate the form-factor corrections to the decay is to change the \emph{spectator} quark in the decay: swapping the \uquark quark and \dquark quark changes the decay from \BuToKstmm to \BdToKstmm. Hence, it is important to study the \BuToKstmm decay and compare the results to previous extensive measurements of \BdToKstmm decay.
\subsection{Decay topology}\label{sec:ANA_Theo_Topo}
Due to the spin structure of the decay \BuToKstmm, the differential decay rate can be fully expressed using only four variables: the dimuon invariant mass squared \qsq and the three angles defined by the direction of flight of the decay products: \thetak, \thetal and $\phi$. These angles are shown in \refFig{anglesB+}. Denoting the normalized vector of a particle X in the rest frame of Y, $\hat{p}_X^{(Y)}$, the angles can be defined as in \refEq{anglesB+}:
This definition is compatible with previous \lhcb measurements~\cite{ANA-LHCb-angular3, ANA-LHCb-angular4, ANA-LHCb-angular1,ANA-LHCb-angular2}.
%
\begin{align} \label{eq:anglesB+}\begin{split}
\cos\thetal =& \left(\hat{p}_{\mupm}^{(\mumu)}\right) \cdot \left(\hat{p}_{\mumu}^{(\Bpm)}\right)
= \left(\hat{p}_{\mupm}^{(\mumu)}\right) \cdot \left(-\hat{p}^{\mumu}_{(\Bpm)}\right)\,,\\
%
\cos\thetak =& \left(\hat{p}_{\Kpm}^{(\Kstarpm)}\right) \cdot
\left(\hat{p}_{\Kstarpm}^{(\Bpm)}\right) = \left(\hat{p}_{\Kpm}^{(\Kstarpm)}\right) \cdot
\left(-\hat{p}^{\Kstarpm}_{(\Bpm)}\right)\,,\\
%
\cos\phi =& \left[\left(\hat{p}_{\mupm}^{(\Bpm)}\right) \times \left(\hat{p}_{\mump}^{(\Bpm)}\right)\right]
\cdot \left[\left(\hat{p}_{\Kpm}^{(\Bpm)}\right) \times \left(\hat{p}_{\piz}^{(\Bpm)}\right)\right]\,,\\
%
\sin\phi =& \left[\left(\hat{p}_{\mupm}^{(\Bpm)}\right) \times \left(\hat{p}_{\mump}^{(\Bpm)}\right)\right]
\times \left[\left(\hat{p}_{\Kpm}^{(\Bpm)}\right) \times \left(\hat{p}_{\piz}^{(\Bpm)}\right)\right]
\cdot \left(\hat{p}_{\Kstarpm}^{(\Bpm)}\right)\,.\\
%
\end{split} \end{align}
%
\begin{figure}[htb!] \centering
%\hspace{-20pt}
\includegraphics[width=0.65\textwidth]{./AnalysisTheory/anglesB+.pdf}
\captionof{figure}[Definition of the angles in the \BuToKstmm decay.]
{
Definition of the angles in the \BuToKstmm decay. The angle \thetal is
defined as the angle between the \mup flight direction in the \mup\mun rest frame and the flight direction of the \mup\mun pair in the \Bu meson rest frame. Similarly, \thetak is defined as the angle between the \Kp in the rest frame of \Kstarp and the flight direction of \Kstarp in the
\Bu meson rest frame. Finaly, the angle $\phi$ is the angle between the normal vector of the \Kp\piz system and the normal vector of the \mup\mun system. \label{fig:anglesB+}
}
\end{figure}
\subsection{Differential decay rate}\label{sec:ANA_Theo_BR}
As mentioned in the previous section, the differential decay rate of \BuToKstmm can be fully expressed using only four variables: the dimuon invariant mass squared \qsq and the angles \thetak, \thetal and $\phi$. The decay rate then takes the form of
%
\begin{equation}\label{eq:decay_rate}
\frac{\deriv^4\Gamma}{\deriv\cos\thetak\deriv\cos\thetak\deriv\phi\deriv q^2} = \frac{9}{32\pi}\sum_i{ J_i\left(q^2\right)f_i\left(\cos\thetal,\cos\thetak,\phi\right)}\,,\\
\end{equation}
%
where $f_i$ are angular terms only depending on the three decay angles. They are given purely by the spin structure of the decay. The coefficients $J_i$ are angular observables depending on the dimuon invariant mass squared \qsq. They 'control' how much the different $f_i$ terms contribute to the differential decay rate. For the full form, see \refApp{ANA-Theo}, \refEq{decay_rate_full}. The $J_i$ coefficients are directly connected to the Wilson coefficients listed in \refTab{SM_Wilson_sensitivity}. For the exact relation formulas, see~Ref.\,\cite{ANA-wilsonJi}.
Similarly, the decay rate of \BuToKstmmCH can be expressed in terms of $\bar{J_i}$. Given the CP asymmetry of this decay in the Standard Model is negligibly small, it is useful to define a set of \emph{CP averaged} angular observables $S_i$ instead of having a set of $J_i$ and $\bar{J_i}$:
%
\begin{equation}\label{eq:Si_definition}
S_i = \frac{J_i+\bar{J_i}}{\Gamma+\bar{\Gamma}}\,.\\
\end{equation}
The dependence of the decay rate on the dimuon invariant mass squared \qsq is sketched in \refFig{q2_theory}. At $q^2\approx0$ the dominating operator is \Ope7. Then, between 1\gevgev$\lesssim\qsq\lesssim8\gevgev$, the interference of \Ope7 and \Ope9 plays a role. In the region of charm resonances, the decay is dominated by tree-level diagrams. Above the resonances, $q^2\gtrsim15\gevgev$, the operators \Ope9 and \Ope10 dominate. The observables are measured in bins of \qsq. In each bin, the \qsq-averaged observables are defined as
%
\begin{equation}\label{eq:S_i}
\langle S_i\rangle \left(q_{min},q_{max}\right)
= \frac{\int_{q_{min}}^{q_{max}}\deriv q^2
\left(J_i + \bar{J_i}\right)}{\int_{q_{min}}^{q_{max}}\deriv q^2 \frac{ \deriv\left(\Gamma + \bar{\Gamma}\right)}{q^2}}\,.
\end{equation}
Following \refEq{decay_rate}, the available $S_i$ parameters are $S_{1s,6s}$ and $S_{3,4,5,7,8,9}$. This basis is also convenient from the experimental point of view: as the \BuToKstmm decay is a rare decay, measuring the $S_i$ rather than the $J_i$ and the $\bar{J}_i$ observables effectively doubles the signal yield. The $S_i$ observables are (linearly) connected to two historical observables: the forward-backward asymmetry of the \mumu pair, \AFB, and longitudinal polarization of \Kstar, \FL:
%
\begin{align}\label{eq:FLAFB_definition}\begin{split}
F_L &= 1 - \frac{4}{3} S_{1s}\,,\\
A_ {FB} &= \frac{3}{4} S_{6s}\\
\end{split}\end{align}
As mentioned in \refSec{SM_bsll}, there is a non-negligible form-factor contribution to the decay rate. The influence of form-factor uncertainties can be transformed in a way that the theoretical uncertainties mostly cancel when studying a \emph{single} parameter. The uncertainties are then shifted to other observables. Taking into account all correlations between the angular moments, this basis does not bring any advantage. In the scope of this work the main advantage of this basis is the possibility of a direct comparison to previous \lhcb measurements and measurements of the angular observables performed by other experiments. The basis is expressed as a set of \Pprime{i} observables and \FL:
%
%\begin{align}\label{eq:P'_definition}\begin{split}
%P'_1 & = \frac{S_3}{1-F_L}\,,\\
%P'_2 & = \frac{S_{6s}}{1-F_L}\,,\\
%P'_3 & = \frac{S_9}{1-F_L}\,,\\
%P^{'}_4 & = \frac{S_4}{\sqrt{F_L\left(1-F_L\right)}}\,,\\
%P^{'}_5 & = \frac{S_5}{\sqrt{F_L\left(1-F_L\right)}}\,,\\
%P^{'}_6 & = \frac{S_7}{\sqrt{F_L\left(1-F_L\right)}}\,,\\
%P^{'}_8 & = \frac{S_8}{\sqrt{F_L\left(1-F_L\right)}}\\
%\end{split}\end{align}
%
\begin{align}\label{eq:P'_definition}\begin{split}
P'_1 = \frac{S_3}{1-F_L}\,, \qquad\qquad\qquad P^{'}_4 & = \frac{S_4}{\sqrt{F_L\left(1-F_L\right)}}\,,\\
P'_2 = \frac{S_{6s}}{1-F_L}\,, \qquad\qquad\qquad P^{'}_5 & = \frac{S_5}{\sqrt{F_L\left(1-F_L\right)}}\,,\\
P'_3 = \frac{S_9}{1-F_L}\,, \qquad\qquad\qquad P^{'}_6 & = \frac{S_7}{\sqrt{F_L\left(1-F_L\right)}}\,,\\
P^{'}_8 & = \frac{S_8}{\sqrt{F_L\left(1-F_L\right)}}\\
\end{split}\end{align}
\subsection{\swave contribution}\label{sec:ANA_Theo_SWave}
The decay rate, as described by \refEq{decay_rate_full}, takes into account only the decay \BuToKstmm, where the \Kstar decays via \KstToKpPi. This is referred to as the \pwave.
However, in the measurement one has to consider the possible contributions from other higher \Kstarp resonances \eg $K^{*+}(1430)_0$ (\swave). As the \Kstarp resonance is very wide in mass, it is very difficult to distinguish the \swave component from the \pwave in the selection process. By considering only the events with the reconstructed $\Kstarp(892)$ mass being close to the $\Kstarp(892)$ rest mass ($|m_{\Kstar}^{reco}-m_{\Kstar}|<100\mev$), the \swave contribution is suppressed, but not fully eliminated.
The \swave component has a different angular structure and therefore pollutes the angle \angles distributions.
The decay rate of the \swave is
\begin{equation}\label{eq:decay_rate_S}
\left.
\frac{{\rm d}(\Gamma+\bar{\Gamma})}{{\rm dcos}\theta_L\,{\rm dcos}\theta_K\,{\rm d}\phi}
\right |_{\rm S}
=\frac{3}{16\pi}{F_S}\sin^2\theta_L\,.
\end{equation}
However, both \pwave and \swave amplitudes are complex. The addition of them results in interference term: % Both the \swave and interference term need to be added to the full decay rate.
%
\begin{equation}\label{eq:decay_rate_PS}
\left.
\frac{{\rm d}(\Gamma+\bar{\Gamma})}{{\rm dcos}\theta_L\,{\rm dcos}\theta_K\,{\rm d}\phi}
\right |_{\rm PS}
=\frac{3}{16\pi}\sum_1^5 S_{Si} f_{S_i}(\theta_L,\theta_K\,\phi)\,.
\end{equation}
%
The full expression is given in \refEq{decay_rate_PS_full}. For the full angular description of the \BuToKstmm decay, both the \swave and the interference terms have to be added to \refEq{decay_rate}.
%In this work, due to limited statistical power of the selected dataset, the \swave contribution is fixed according to previous measurement in~Ref.\,\cite{ANA-LHCb-angular4}. \todoAddCitation{Check}
\subsection{Angular observables}\label{sec:ANA_Theo_Angular}
Using \pwave term from \refEq{decay_rate}, \swave term from
\refEq{decay_rate_S}, and their interference term \refEq{decay_rate_PS} it is possible to construct the full angular description of \BuToKstmm. The full procedure is described in \refApp{ANA-Theo}. The full decay description is then given by \refEq{decay_rate_final}.
%
In total, there are eight moments related to the \pwave contribution and six moments related to the \swave contribution and its interference with the \pwave. The observables are for readers convenience shown in red. Each of the observable is measured in bins of \qsq. In order to measure all these variables, the statistical power of the measured sample is required to be rather large.
\vspace{-20pt}
\begin{equation} \label{eq:decay_rate_final}
\begin{aligned}
\left.
\frac{{\rm d}(\Gamma+\bar{\Gamma})}{{\rm dcos}\theta_L\,{\rm dcos}\theta_K\,{\rm d}\phi}
\right |_{\rm S+P}
=& \left.\frac{{\rm d}(\Gamma+\bar{\Gamma})}{{\rm dcos}\theta_L\,{\rm dcos}\theta_K\,{\rm d}\phi}
\right |_{\rm P}
+&& \left.\frac{{\rm d}(\Gamma+\bar{\Gamma})}{{\rm dcos}\theta_L\,{\rm dcos}\theta_K\,{\rm d}\phi}
\right |_{\rm S}
+ \left.\frac{{\rm d}(\Gamma+\bar{\Gamma})}{{\rm dcos}\theta_L\,{\rm dcos}\theta_K\,{\rm d}\phi}
\right |_{\rm PS}\\
=(1-\textcolor{red}{F_S})\frac{9}{32\pi}\Bigl[&\tfrac{3}{4}(1-\textcolor{red}{F_{L}})\sin^2\theta_K &&+ \textcolor{red}{F_{L}}\cos^2\theta_K \\
+& \tfrac{1}{4}(1-\textcolor{red}{F_{L}})\sin^2\theta_K\cos 2\theta_L &&- \textcolor{red}{F_{L}} \cos^2\theta_K\cos 2\theta_L \\
+& \textcolor{red}{S_3}\sin^2\theta_K \sin^2\theta_L \cos 2\phi &&+ \textcolor{red}{S_4} \sin 2\theta_K \sin 2\theta_L \cos\phi \\
+& \textcolor{red}{S_5}\sin 2\theta_K \sin \theta_L \cos \phi &&+ \tfrac{3}{4}\textcolor{red}{A_{FB}} \in^2\theta_K \cos\theta_L \\
+& \textcolor{red}{S_7} \sin 2\theta_K \sin\theta_L \sin\phi &&+ \textcolor{red}{S_8} \sin 2\theta_K \sin 2\theta_L \sin\phi\\
+& \textcolor{red}{S_9}\sin^2\theta_K \sin^2\theta_L \sin 2\phi
\Bigr]\\
+\frac{3}{16\pi}\Bigl[&\textcolor{red}{F_S}\sin^2\theta_L &&+\textcolor{red}{S_{S1}}\sin^2\theta_L\cos\theta_K \\
+& \textcolor{red}{S_{S2}}\sin2\theta_L \sin \theta_K \cos\phi &&+ \textcolor{red}{S_{S3}}\sin\theta_L \sin \theta_K \cos\phi \\
+& \textcolor{red}{S_{S4}}\sin\theta_L \sin \theta_K \sin\phi &&+ \textcolor{red}{S_{S5}}\sin2\theta_L \sin \theta_K \sin\phi
\Bigr].
\end{aligned}\end{equation}
\subsection{Folding of angles}\label{sec:ANA_folding}
In total, there are 14 angular observables for each \qsq bin. The full description also requires the parametrization of the mass distributions of the \Bu and \Kstar mesons, adding more free parameters to the decay description. Moreover, the background contribution needs to be modeled in all dimensions. The expected signal yield in this rare decay is in the order of less than a hundred events in each \qsq bin. This can result in a great instability in the data fit.
In order to improve the stability, a dedicated procedure is implemented. The \emph{folding} of angles is an angular transformation exploiting the symmetry of the angular functions in \refEq{decay_rate_final}. An example is a tranformation of $\phi\to\phi+\pi$ for signal candidates with $\phi < 0$. This results in canceling out the terms dependent on $\cos\phi$ and $\sin\phi$ and leaves the \refEq{decay_rate_final} only with observables $F_L$, $S_3$, \AFB, $S_9$ (and $F_S$ and $S_{S1}$). This method has been successfully applied in previous measurements, see \eg~Ref.\,\cite{ANA-LHCb_P5_AnaNote}.
Using a total of five foldings, all observables of interest are accessible. They are listed in \refEq{foldings}. This procedure greatly increases the stability of the data fit as most of the observables are canceled out. The price to pay is the loss of information about correlations between the observables.
\clearpage
%
\setstretch{1.0}
\begin{equation}\label{eq:foldings}
\begin{aligned}
{\rm \textbf{folding 0:}}\\
\phi &\;\to \;\phi+\pi &&\;{\rm for}\; \phi < 0\\[10pt]
{\rm \textbf{folding 1:}}\\
\phi &\;\to \;-\phi &&\;{\rm for}\; \phi < 0\\
\phi &\;\to \pi-\phi &&\;{\rm for}\; \ctl < 0\\
\ctl &\;\to \;-\ctl &&\;{\rm for}\; \ctl < 0\\[10pt]
{\rm \textbf{folding 2:}}\\
\phi &\;\to \;-\phi &&\;{\rm for}\; \phi < 0\\
\ctl &\;\to \;-\ctl &&\;{\rm for}\; \ctl < 0\\[10pt]
{\rm \textbf{folding 3:}}\\
\ctl &\;\to \;-\ctl &&\;{\rm for}\; \ctl < 0\\
\phi &\;\to \;\pi-\phi &&\;{\rm for}\; \phi > \sfrac{\pi}{2}\\
\phi &\;\to \;-\pi-\phi &&\;{\rm for}\; \phi < -\sfrac{\pi}{2}\\[10pt]
{\rm \textbf{folding 4:}}\\
\ctk &\;\to \;-\ctk &&\;{\rm for}\; \ctl < 0\\
\ctl &\;\to \;-\ctl &&\;{\rm for}\; \ctl < 0\\
\phi &\;\to \;\pi-\phi &&\;{\rm for}\; \phi > \sfrac{\pi}{2}\\
\phi &\;\to \;-\pi-\phi &&\;{\rm for}\; \phi < -\sfrac{\pi}{2}\\
\end{aligned}\end{equation}\setstretch{1.25}
A tabular overview of the sensitivity of the angular folding to \pwave angular moments is presented in \refTab{folding}. Using all the five angular foldings gives access to all eight \pwave angular moments. The \swave angular moments sensitivity is shown in \refTab{folding-s}.
\begin{table}[hbt!] \centering
\begin{tabular}{crccccc}\hline
observable &moment &0 &1 &2 &3 &4\\ \hline\hline
$S_{1s}$ &$\cos^2\theta_K$ &\checkmark &\checkmark &\checkmark &\checkmark &\checkmark \\
$S_{3}$ &$\sin^2\theta_K \sin^2\theta_L \cos 2\phi$ &\checkmark &\checkmark &\checkmark &\checkmark &\checkmark \\
$S_{4}$ &$\sin 2\theta_K \sin 2\theta_L \cos\phi$ & - &\checkmark & - & - & - \\
$S_{5}$ &$\sin 2\theta_K \sin \theta_L \cos \phi$ & - & - &\checkmark & - & - \\
$S_{6s}$ &$\sin^2\theta_K \cos\theta_L$ &\checkmark & - & - & - & - \\
$S_{7}$ &$\sin 2\theta_K \sin\theta_L \sin\phi$ & - & - & - &\checkmark & - \\
$S_{8}$ &$\sin 2\theta_K \sin 2\theta_L \sin\phi$ & - & - & - & - &\checkmark \\
$S_{9}$ &$\sin^2\theta_K \sin^2\theta_L \sin 2\phi$ &\checkmark & - & - & - & - \\
\hline
\end{tabular}
\captionof{table}[Angular folding sensitivity to \pwave angular moments.]{
Angular folding sensitivity to \pwave angular moments. \label{tab:folding}
}
\end{table}
\begin{table}[hbt!] \centering
\begin{tabular}{crccccc}\hline
observable &moment &0 &1 &2 &3 &4\\ \hline\hline
$F_{S}$ &$\sin^2\theta_L$ &\checkmark &\checkmark &\checkmark &\checkmark &\checkmark \\
$S_{S1}$ &$\ctk\sin^2\theta_L$ &\checkmark &\checkmark &\checkmark &\checkmark & - \\
$S_{S2}$ &$\sin\theta_K \sin2\theta_L \cos\phi$ & - &\checkmark & - & - & - \\
$S_{S3}$ &$\sin\theta_K \sin\theta_L \cos\phi$ & - & - &\checkmark & - & - \\
$S_{S4}$ &$\sin\theta_K \cos\theta_L \sin\phi$ & - & - & - & \checkmark & \checkmark \\
$S_{S5}$ &$\sin\theta_K \sin2\theta_L \sin\phi$ & - & - & - & - & - \\
\hline
\end{tabular}
\captionof{table}[Angular folding sensitivity to \swave angular moments.]{
Angular folding sensitivity to \swave angular moments. \label{tab:folding-s}
}
\end{table}
\subsection{Previous measurements}\label{sec:ANA_previous}
Experimentally, there are two main ways of studying the \bsll transitions: measurements of branching ratios, and of angular observables\footnote{\bsll transitions are also an important tool in studying lepton flavor universality. However, this is beyond the scope of this work.}. First measurements of branching fractions were agreeing with the SM predictions~\cite{ANA-BR-Belle,ANA-BR-CDF,ANA-BR-LHCb}, as the statistical power of the measurements did not allow for precision measurements. However, with more available data first discrepancies started to appear, such as in the differential branching fractions measurement of $\Bu\to K^{(*)}\mup\mun$ decays~\cite{SM-LHCb_BR} or in the $\Bs\to\phi\mup\mun$ branching fraction measurement~\cite{ANA-BR-BPHI-LHCb}.
% The results from this study are shown in \refFig{BR_LHCb}. A clear discrepancy between data and SM calculation appears in different decay channels. Similarly a discrepancy of over 3~standard deviations $\sigma$ appeared . All these measurements suggest branching-ratios below the SM predictions. It is interesting to note here that with increasing statistics in the experiments the discrepancy would remain around 3\stdev due to large (hadronic) theory uncertainties.
%
%\begin{figure}[htb!] \centering
% \includegraphics[width=0.32\textwidth]{./AnalysisTheory/kmumu_BF.pdf}
% \includegraphics[width=0.32\textwidth]{./AnalysisTheory/ksmumu_BF.pdf}
% \includegraphics[width=0.32\textwidth]{./AnalysisTheory/bukst_BF.pdf}
% \captionof{figure}[ Differnetal branching fraction results for the \Bu\to\Kp\mup\mun, \Bd\to\Kz\mup\mun and \BdToKstmm decays.]
% {
% Differential branching fraction measurement of the \Bu\to\Kp\mup\mun, \Bd\to\Kz\mup\mun and \BuToKstmm decays. The uncertainties shown on the data points are the quadratic sum
% of the statistical and systematic uncertainties. The shaded regions illustrate the theoretical
% predictions and their uncertainties from LCSR and lattice QCD calculations. In \qsq below \jpsi, measured BR are consistently below SM prediction value. Taken from~Ref.\,\cite{SM-LHCb_BR}. \label{fig:BR_LHCb}
% }
%\end{figure}
A discrepancy between a measurement and the SM predictions of angular observables appeared already in 2013, when \lhcb analyzed 1\invfb of data in the decay of \BdToKstmm~\cite{ANA-LHCb-angular1}. One out of 24 measurements (four \Pprime{} parameters in six bins of \qsq) is 3.7\stdev away from the SM prediction. The parameter is \Pprime{5}. If there is a New Physics contribution in the Wilson Coefficients \C9 and \C10, it would show first in the \Pprime{5}: this discrepancy sparked a lot of interested.
Since then, many similar measurements have been performed~\cite{ANA-LHCb-angular3, ANA-LHCb-angular2,ANA-Belle_P5,ANA-CMS_P5,ANA-ATLAS_P5}. These measurements are summarized in \refFig{P5prime_All}. The latest \lhcb result using the \BdToKstmm decay~\cite{ANA-LHCb-angular3} is yet not present in the figure. In this last measurement, the \Pprime{5} discrepancy in low \qsq increased from 2.4\stdev in~Ref.\,\cite{ANA-LHCb-angular2} to 2.8\stdev. %On the other hand, the angular analysis of \Bs\to$\phi$\mumu measured the angular observables in agreement with the Standard Model predictions~\cite{ANA-BR-BPHI-LHCb2}.
\begin{figure}[htb!] \centering
\includegraphics[width=0.65\textwidth]{./AnalysisTheory/P5prime_all.pdf}
\captionof{figure}[Measurements of the parameter \Pprime{5} compared to theory predictions.]
{Measurements of the optimized angular observable \Pprime{5} in bins of \qsq. The shaded areas represent charmonium resonances that are dominated by tree-level diagrams. Experimental results are taken from~Ref.\,\cite{ANA-LHCb-angular2,ANA-Belle_P5,ANA-CMS_P5,ANA-ATLAS_P5}, theory predictions are taken from~Ref.\,\cite{ANA-P5-theo1,ANA-P5-theo2,ANA-P5-theo3,ANA-P5-theo4}. \label{fig:P5prime_All}
}
\end{figure}
Moreover, the first angular study of \BuToKstKspimm at \lhcb~\cite{ANA-LHCb-angular4} has been recently published. The \Pprime{5} measured in eight \qsq bins is shown in \refFig{P5prime_David}. A global evaluation of the result in terms of the real part of the Wilson coefficient \C9 prefers a shift of Re(\C9 )=-1.9 from the Standard Model value with a significance of 3.1 standard deviations.
\begin{figure}[htb!] \centering
\includegraphics[width=0.65\textwidth]{./AnalysisTheory/P5_David.pdf}
\captionof{figure}[Measurement of the parameter \Pprime{5} in the \BuToKstKspimm decay compared to theory predictions.]
{Measurements of the optimized angular observable \Pprime{5} in bins of \qsq from \lhcb in decay of \BuToKstKspimm. The shaded areas represent charmonium resonances that are dominated by tree-level diagrams and $\phi$ pollution in the region around 1\gevgev. Experimental results are taken from~Ref.\,\cite{ANA-LHCb-angular4}, theory predictions are obtained from~Ref.\,\cite{ANA-P5-theo3,ANA-P5-theo5} using the \flavio package~\cite{ANA-flavio}. \label{fig:P5prime_David}
}
\end{figure}
\clearpage
It is clear there are numerous independent measurements in tension with the Standard Model in the order of 2-3\stdev. Similar tensions are also observed in the lepton flavor universality tests~\cite{ANA-LFU-1,ANA-LFU-2,ANA-LFU-3}. Moreover, there are similar tensions outside of the \bsll scope, such as the $R_{D}$ and $R_{D^*}$ measurement~\cite{ANA-LHCb-RD} or the $R_\jpsi$ measurement~\cite{ANA-LHCb-Rjpsi}. They all are consistent with each other, painting a picture of possible New Physics contribution. Specifically, New Physics contribution to the Wilson coefficients \C9 and \C10 may contribute to the anomalies~\cite{ANA-wilsonNP}. An example of a global fit to all these measurements is shown in \refFig{ANA-Moriond}. Further measurements, such as the work presented here, and improved theory calculations will cast light on these tensions in the near future. \vspace{\baselineskip}
\begin{figure}[htb!] \centering
\includegraphics[width=0.65\textwidth]{AnalysisIntro/C9-C10_allExperiments.pdf}
\captionof{figure}[Constraints to the NP contribution to Wilson coefficients $C_9$ and $C_{10}$.]
{Constraints to the New Physics contribution to Wilson coefficients $C_9$ and $C_{10}$ taken from ~\cite{ANA-Moriond2017}. All other Wilson coefficients are assumed to have Standard Model values. The bands represent the constrains from \B\to\Kstar\mup\mun and \Bs\to$\phi$\mup\mun measurements performed by listed collaborations, the countours represent one standard deviation $\sigma$. Branchingratio only measurements are shown as the yellow band. The global fit of these results is represented in red with the one, two and three $\sigma$ contours. In the case of no New Physics contribution, the $C_9^{NP}$ and $C_{10}^{NP}$ are equal to zero. Note that the global fit is however dominated by the \lhcb results and \cms measurements are compatible with the Standard Model. For the details about the global fit procedure, see~Ref.\,\cite{ANA-Moriond2017}. \label{fig:ANA-Moriond}
}
\end{figure}
\clearpage

25
Chapters/Conclusions/conclusions.tex

@ -0,0 +1,25 @@
\section*{Conclusions}
\addcontentsline{toc}{section}{\protect\numberline{}Conclusions}
In this thesis, the first study of the rare \BuToKstmmFull is presented. This work is the first attempt to perform the angular analysis with a neutral particle in the final state with the \lhcb dataset. The full dataset of 9\invfb collected by the \lhcb experiment is utilized in this measurement.
Due to the challenging reconstruction of this decay channel, the selection criteria is carefully chosen and tested in order to maximally suppress the background contributions while preserving high signal efficiency and an even angular acceptance.
On top of applying linear cuts in the selection, a multi-variate analysis is used to suppress the background pollution even more.
On top of applying a set of simple requirements in the selection, a multi-variate analysis is used to suppress the background pollution even more.
In total, 271$\pm$28 signal candidates are selected.
The fit model and the shape of the angular background present in the selected data sample is thoroughly investigated using simulation and data samples with large number of signal candidates. The angular shape of the background is modeled to maximize the fit stability and to avoid introducing biases in the angular parameters.
The fit model is validated using a fit to the reference resonant \BuToKstJpsi decay. The results of the fit to the \BuToKstJpsi decay agree with previous measurements at other experiments.
A detailed study of the maximum likelihood fit to the rare signal channel is done using pseudoexperiments. Multiple angular foldings are employed to maximize the stability of the fit. However, it is shown that the complicated background structure together with the low statistical power of the current data sample results in large uncertainties. The precision of the angular observables to be measured is estimated in five intervals of the dimuon invariant mass squared.
Using the \flavio package, a likelihood scan as a function of the real part of the Wilson coefficient Re(\C9 ) is performed, assuming a New Physics scenario with Re(\C9 )=-2, as observed in the previous studies. The best possible sensitivity to the deviation of the Wilson coefficient from the Standard Model value is estimated to be $\approx2.4$ standard deviations.
\clearpage

50
Chapters/EventSelection/Backgrounds.tex

@ -0,0 +1,50 @@
\subsection{Checks for possible backgrounds}
The main background source in this decay is combinatorial background. However, other background contributions could significantly shift the angular distributions and therefore need to be removed. It is shown already in \refSec{sel-KplusVeto} that the combinatorial background above the mass of the \Bu meson is dominated by \BuToKpmm decays, which are vetoed accordingly. Another check is performed to show that $\Bd\to\piz\mu\mu$ decays do not contribute to the signal. Contrary to the \BuToKpmm contribution, the reconstructed \piz\mumu mass does not peak in the \Bu mass (see \refFig{BuTopizmm} and compare to \refFig{BuToKpmm}).
\begin{figure}[hbt!]
\centering
\includegraphics[width=0.54\textwidth]{./Data/Background/Pizmumu_2018Data.eps}
\captionof{figure}[Reconstructed \piz\mumu mass in the 2018 data sample after cut-based selection.]{Reconstructed \piz\mumu mass in the 2018 data sample after cut-based selection. The red band represents region $\pm$100\mev around the \Bu mass. These events are vetoed. There is no peak suggesting a significant contribution of the $\Bd\to\piz\mu\mu$ sample to the selected data. Compare with \refFig{BuToKpmm}.} \label{fig:BuTopizmm}
\end{figure}
\subsubsection[Study of the \texorpdfstring{${\BuToXJpsi}$}{B to XJpsi} sample]{Study of the \texorpdfstring{\BuToXJpsiBF}{B to XJpsi} sample}
A validation of the selection is performed using a simulated sample of inclusive \BuToXJpsi decays. This simulation is available for the years 2011, 2012 and 2016. The full selection identical to the signal selection is performed. After the selection, the true ID of the particles is checked in order to identify possible background contributions. A small contribution from $\Bu^{\ast}$ is found. However, as this very quickly decays into \Bu meson and cannot be distinguished from the \emph{true} signal, these events are considered as signal. In \refFig{BuToXmm}, the \Bu mass distribution from the sample of inclusive \BuToXJpsi decay is shown, comparing the mass distribution from \emph{true} \BuToKstJpsi events to background events. No peaking background is observed. A similar check is successfully performed in the \Kstar mass distribution (\refFig{BuToXmm_Kstar}).
\begin{figure}[hbt!]
\centering
\includegraphics[width=0.44\textwidth]{./Data/Background/Inclusive_MC_Run_1.eps} \hspace{10pt}
\includegraphics[width=0.44\textwidth]{./Data/Background/Inclusive_MC_Run_2.eps}
\captionof{figure}[Reconstructed \Bu meson mass in the \BuToXJpsi decay simulation.]{Reconstructed \Bu meson mass in the simulation sample of inclusive \BuToXJpsi decay. The black line represents all candidates passing the whole selection, including the MLP response cut. The red line represents the events that are identified as background in the ID based truth-matching.} \label{fig:BuToXmm}
\end{figure}
\begin{figure}[hbt!]
\centering
\includegraphics[width=0.44\textwidth]{./Data/Background/Inclusive_MC_Run_1_Kstar.eps} \hspace{10pt}
\includegraphics[width=0.44\textwidth]{./Data/Background/Inclusive_MC_Run_2_Kstar.eps}
\captionof{figure}[Reconstructed \Kstar mass in the simulation sample of \BuToXJpsi.]{Reconstructed \Kstar mass in the simulation sample of inclusive \BuToXJpsi decay. The black line represents all candidates passing the whole selection, including the MLP response cut. The red line represents the events that are identified as background in the ID based truth-matching.} \label{fig:BuToXmm_Kstar}
\end{figure}
Despite no clear contribution of peaking backgrounds, thorough checks revealed the presence of two background components: $\Bu\to K^{**}\jpsi$ and $\Bu\to\Kp (Y_{\ccbar}\to\jpsi X)$. Both of these decays pass the selection if a \piz meson or a photon from the decay of the excited $K^{**}$ or charmonium $Y_{\ccbar}$ meson is missed. Note that both of these decays have $\jpsi$ in their final state: they do not contribute to the signal channel, but to the reference channel only!
The shape of the \piz\mumu mass distribution typical for these decays is shown in \refFig{BuToXmm_peaking}. While the $\Bu\to K^{**}\jpsi$ mass distribution shape is similar to the $\BuToKstJpsi$ mass distribution shape, there is a clear peak at $\sim3650\mev$ in the $\Bu\to\Kp (Y_{\ccbar}\to\jpsi X)$ case. However, the overall contribution of this background is several percent, as can be seen in \refFig{BuToXmm_peakingCut}. In order to virtually eliminate this background and push it to a 2\% level only, a cut on the \piz\mumu mass is proposed. This cut, corresponding to removing any event with $m_{\piz\mumu}<3700\mev$ would also remove $\sim12\%$ of the actual reference channel signal. Furthermore, this cut also removes virtually all events with soft \piz mesons and therefore all events with $\ctk\sim1$. This cut would then skew the angular distribution of the reference channel. This background accounts for $\sim$15\% of all candidates with $\ctk>0.25$. Given the small number of such events and the already low selection efficiency in $\ctk\sim1$, this peaking background is modeled as a part of the combinatorial background.
\begin{figure}[hbt!]
\centering
\includegraphics[width=0.45\textwidth]{./Data/Background/JpsiX_mpi0mumu.pdf}
\includegraphics[width=0.45\textwidth]{./Data/Background/JpsiX_cosThetaK_norm.pdf}
\captionof{figure}
[Background contributions in reconstructed \piz\mumu mass and \ctk.]
{Normalized reconstructed \piz\mumu mass (left) and \ctk (right) with different background contributions. The blue line represents the event candidates identified as signal, the orange line represents events coming from \decay{\Bu}{K^{**}\jpsi} and the green line represents the contribution from higher \cquark\cquarkbar resonances $Y_{\cquark\cquarkbar}$ decaying further into \jpsi and a meson. The \decay{\Bu}{K^{**}\jpsi} contamination accounts for 15\% of all signal candidates with $\ctk>0.25$. } \label{fig:BuToXmm_peaking}
\end{figure}
\begin{figure}[hbt!]
\centering
\includegraphics[width=0.54\textwidth]{./Data/Background/nice_pollutioncurve.png} \hspace{10pt}
\captionof{figure}[The $\Bu\to\Kp (Y_{\ccbar}\to\jpsi X)$ pollution vs. the \BuToKstJpsi efficiency.]{The $\Bu\to\Kp (Y_{\ccbar}\to\jpsi X)$ pollution against the \BuToKstJpsi efficiency. In order to reduce the background contribution to 2\%, the signal efficiency would drop to $\sim$87\%. This figure is produced using the inclusive ${\BuToXJpsi}$ simulation sample. } \label{fig:BuToXmm_peakingCut}
\end{figure}

102
Chapters/EventSelection/Cut.tex

@ -0,0 +1,102 @@
\subsection{Preselection}\label{sec:sel-Preselection}
The events that pass the stripping are only roughly selected and therefore further cut-based selection needs to be applied. The cuts are listed in \refTab{presel_cuts}. Visual illustration of the effect of application of the cuts is shown in \refFig{ANA-nonPrese}.
\begin{figure}[hbt!]
\centering
\includegraphics[width=0.49\textwidth]{Data/non_Presel/2017_Stripped.pdf}
\includegraphics[width=0.49\textwidth]{Data/non_DTF/2017_KplusPi0Resolved_BplusMassModel_OneCB_SingleExponential_DTF_constrained_fixShape_final.pdf}
\captionof{figure}[Mass distribution of \Bu before and after applying preselection cuts.]{Comparison of \Bu mass distribution before and after applying preselection cuts. On the left, \Bu mass distribution after applying \emph{stripping} cuts is shown, on the right reconstructed \Bu mass after applying preselection cuts. The mean of the signal distribution is denoted $\mu(m_B)$, $\sigma(m_B)$ is the width of the peak. The signal shape is constrained to the signal shape of simulation samples. The signal (blue) is fitted by a two-sided Crystal Ball function (see \refApp{CrystalBall}). The background (red) is described by an exponential function.} \label{fig:ANA-nonPrese}
\end{figure}
In the case of only charged particles in the decay chain, the decay is reconstructed starting from the most downstream vertex and then built upstream (in this case it would mean starting from the \Kstar vertex, continuing to the \Bu meson).
This means there is no propagation of information from the mother vertices to the daughter particles. In the case of neutral particles, this relation between the mother vertex and the daughter particles can contain a lot of important constraints and improve the event selection. The method exploiting these constrains, Decay Tree Fitter (DTF), was used for the first time by the \babar collaboration to reconstruct \KS\to\piz\piz decays~\cite{ANA-DTF}.
DTF constrains the mass of the neutral daughter (in our case \piz) and adds this information to the vertex of the mother particle. The decay chain is then parameterized in vertex position, decay length and particle momenta. A simultaneous fit of the decay is performed, taking into account all physical constrains (such as four-momentum conservation). In the case of the decay presented here, the \Bu mass resolution is significantly improved by using DTF. Therefore, momenta and mass variables used for the cut-based preselection are obtained using DTF. Moreover, to remove events where the DTF fit did not converge, only events with DTF status zero (meaning the DTF fit converged) and events with $\chisq_{DTF}<200$ are kept.\vspace{0.5\baselineskip}
\begin{figure}[hbt!]
\centering
\includegraphics[width=0.49\textwidth]{Data/non_DTF/2017_KplusPi0Resolved_BplusMassModel_OneCB_ExpGauss_constrained_fixShape_final.eps}
\includegraphics[width=0.49\textwidth]{Data/non_DTF/2017_KplusPi0Resolved_BplusMassModel_OneCB_SingleExponential_DTF_constrained_fixShape_final.eps}
\captionof{figure}[Fit to \Bu mass data collected during the data-taking year 2017.]
{An example of \Bu mass fit to data collected during the 2017 data-taking year. The mean of the signal distribution is denoted $\mu(m_B)$, the width of the peak is denoted $\sigma(m_B)$. The signal shape is fixed to the signal shape of simulated samples.
On the left, mass calculated without the DTF is shown, signal (blue) is fitted by a two-sided Crystal Ball function, background (red) consists of an exponential combinatorial background and a function called ExpGaus\protect\footnotemark. The fit does not descibe the data well, the signal peak is rather wide.
The sharp drop at 5000\mev is caused by cutting on mass obtianed by the DTF.
On the right, mass obtained using the DTF is shown. Signal (blue) is fitted by a two-sided Crystal Ball function, background (red) consists only of an exponential combinatorial background. The mass resolution improved significantly.
} \label{fig:ANA-nonDTF}
\end{figure}
\footnotetext{This function is used for partly reconstructed background in B decays, for the definition see \refApp{ExpGaus}. }
Moreover, a DTF-like correction to the \Kstarp mass is applied. This is done by fixing the reconstructed \Bu meson mass to its known mass 5279.34\mev~\cite{PDG}. Then, the \piz momentum is adjusted according to the fixed \Bu mass. The adjusted \piz momentum is then used to estimate the \Kstarp mass. This has to be performed in order to remove the effects of the \piz momentum resolution on the reconstructed \Kstarp mass. Without this adjustment, the description of the \Kstarp mass peak by the Breit-Wigner formula fails.\vspace{0.5\baselineskip}
In order to isolate the reconstructed candidates from nearby tracks, a \emph{cone \pt asymmetry} is defined by \refEq{ANA-conePT}. The variable $\pt^\Bu$ denotes the transverse momentum of the reconstructed \Bu, while $\pt^{cone}$ is the sum of the transverse momenta of all charged tracks \emph{near} the reconstructed \Bu. A \emph{near} track is a track in a cone $\sqrt{ (\Delta\phi)^2+(\Delta\eta)^2} \leq 1.0$, where $\Delta\phi$ is the difference between the track's momentum and the \Bu meson momentum in azimuthal angle and $\Delta\eta$ is the difference in pseudorapidity. The cone \pt asymmetry is then calculated as
%
\begin{equation}\label{eq:ANA-conePT}
A_{\pt} = \frac{\pt^\Bu-\pt^{cone}}{\pt^\Bu+\pt^{cone}}\,.
\end{equation}
In \refSec{det_RICH}, the PID variable DLL is definied. The likelihood information from each PID subsystem (\rich, CALO, MUON) is added linearly, forming a set of combined likelihoods. Final DLL is the likelihood of a given mass hypothesis relative to the pion mass hypothesis. This does not take into account correlations between the subsystems and it does not fully exploit the non-PID information from the subdetectors. Therefore, another variable, \emph{ProbNN} is used. ProbNN combines the PID information from the detectors and the non-PID information in a multi-variate analysis. Therefore, in the cut-based selection, ProbNN variables are used, contrary to the DLL variables in the stripping selection. The ProbNN is calculated for each type of particle, the notation is \eg ProbNNmu for the muon ProbNN.
In the case of photon PID, one relies only on the information from the \ecal. The variable \emph{confidence level} is constructed from the DLL values to indicate the confidence that the chosen assignment of particle ID is correct. It is calculated as the ratio of the likelihood of the chosen hypothesis and the sum of all hypotheses $X$. In the case of photon it becomes
\begin{equation}\label{eq:ANA-CL}
CL_\gamma = \frac{DLL_{\gamma\pi}}{\sum_{X} DLL_{X\pi}}\,.
\end{equation}
\begin{table}[hbt!]
\centering
\begin{tabular}{c|c}
candidate & Selection criterion\\
\hline\hline
\Bu & 5.0\gev $< m_{\Bu} <$ 5.8\gev \\
& $\pt^\Bu>2000$\mev \\
& Cone-$\pt$ asymmetry $>$ -0.5 \\
& DIRA $>9\mrad$\\% 0.99996 \\
& $\chisqip<12$ \\
& $\chisq_{FD}>121$ \\
\hline
\Kstarp & $792\mev< m_{K^*} <992\mev$ \\
& $\pt> 1350$\mev \\
& $\chisq_{FD}>9$ \\
\hline
\mumu & Angle between muons $>$ 0.001 \\
& ProbNNmu $>$ 0.25 \\
& $\chisqip>9$ \\
& $0.1\gevgev < \qsq < 21.0 \gevgev$, $\qsq$ binned \\ %check
\hline
\Kp & ProbNNk $>0.25$ \\
& Angle between $K^+$ and $\pi^0 >$ 0.001\\ %check
\hline
\piz & $p_T > 800$\,MeV \\ \hline
\g & CL$_\gamma>0.15$ \\
\hline
tracks & $\eta > 1.6$ \\
\end{tabular}
\captionof{table}[Preselection cuts.]{Preselection cuts. \label{tab:presel_cuts}}
\end{table}
\subsubsection{Charmonium vetoes}\label{sec:sel-Charmonium}
The decay rate of a \bsll transition in dependence on the dimuon mass squared \qsq shows two large excesses, as shown in \refFig{q2_theory}. They are caused by the resonant decays of \JpsiTomm and \PsiTomm coming from \BuToKstJpsi and \BuToKstPsi decays. Moreover, a small contribution of \PhiTomm from the rare decay \BuToKstPhi is expected. The contributions from these resonances are depicted in \refFig{ANA-q2_veto}. The \jpsi and \psitwos resonances are clearly dominating the event population. As discussed in \refSec{SM_bsll}, these resonances come from tree-level processes and therefore are removed from the selection. The process \BuToKstPhi is strongly influenced by QCD and therefore could potentially pollute the angular distribution and is removed.
\begin{figure}[hbt!]
\centering
\includegraphics[width=0.5\textwidth]{./Data/q2_dist.eps}
\includegraphics[width=0.49\textwidth]{./Data/q2_veto_2.eps}
\captionof{figure}[Distribution of \qsq and \qsq vs \Kp\piz\mup\mun invariant mass.]{Dimuon invariant mass squared \qsq distribution (left) and \qsq versus \Kp\piz\mup\mun invariant mass (right) from the full \runI and \runII dataset. The shaded bands represent the regions surrounding $\phi$, $\jpsi$ and $\psitwos$ resonances (from the bottom to the top) that are vetoed in the signal selection. The region surrounding $\jpsi$ is further used as a control channel for validation of the fit.} \label{fig:ANA-q2_veto}
\end{figure}
\subsubsection[\texorpdfstring{${\BuToKpmm}$}{BuToKpmm} veto]{\texorpdfstring{\BuToKpmmBF}{BuToKpmm} veto}\label{sec:sel-KplusVeto}
The decay of \BuToKpmm wrongly associated with an independent \piz meson mimics the signal. The invariant \Kp\piz\mup\mun mass of these candidates is well above the the \Bp meson mass. Therefore, this background is not contributing to the signal. This is shown in \refFig{BuToKpmm}. \vspace{\baselineskip}
However, the vetoed events account for a big part of the combinatorial background above the \Bu mass. After applying the full selection, the \BuToKpmm contribution even dominates this region. In order to suppress this background, a dedicated veto rejecting candidates with $\Kp\mup\mun$ mass close to the \Bu mass, $|m_{\Bu}-m_{\Kp\mup\mun}| < 100 \mev$, is applied.
\begin{figure}[hbt!]
\centering
\includegraphics[width=0.44\textwidth]{./Data/Background/Kmumu_2018.eps}
\includegraphics[width=0.44\textwidth]{./Data/Background/Kmumu_Bmass_2018_2.eps}
\captionof{figure}[Invatiant mass of \Kp\mup\mun in the 2018 data sample.]{Invatiant mass of \Kp\mup\mun in the 2018 data sample after cut-based selection. On the left, the \BuToKpmm mass is shown. There is a clear peak suggesting a contribution of \BuToKpmm sample to selected data. The red band represents the region $\pm$100\mev around the \Bu mass. These events are vetoed. On the right, the mass of the vetoed \Kp\mumu candidates with the associated random \piz meson is shown. The magenta band shows the region of \Bu meson mass $\pm100\mev$. The \BuToKpmm decay does not contribute to the signal of \BuToKstmm.} \label{fig:BuToKpmm}
\end{figure}

77
Chapters/EventSelection/Efficiency.tex

@ -0,0 +1,77 @@
\subsubsection{Efficiency estimation}\label{sec:sel-Efficiency}
The efficiency for this analysis is estimated purely using the simulation sample. There are many limitations arising from this: availability of simulation samples or mismodeling of kinematic variables in the simulation.
%Due to a discrepancy between simulation and data, \lone trigger efficiency is studied.
In order to obtain a signal yield estimation, the acceptance efficiency from simulation is obtained. At this level, the acceptance efficiency is approximated by the generator-level efficiency: the fraction of generated events being in the \lhcb acceptance. As the resolution of the angles \ctk, \ctl and $\phi$ is small, this is a good approximation.
\paragraph*{Generator-level efficiency}
Generator-level efficiencies are provided with the Monte Carlo simulation sample. Available values are summarized in \refTab{eff_gen}. As the efficiency is studied per Run, final values are obtained by simply averaging over the magnet polarities and years. As the point of interest is the \emph{ratio} of the efficiency of signal and reference channels, this approximation holds well enough.
\begin{table}[hbt!]
\centering
\begin{tabular}{cl|cccccc}
\multicolumn{1}{l}{} & & 2011 & 2012 & 2015 & 2016 & 2017 & 2018 \\ \hline
\multirow{2}{*}{\BuToKstmm} & Down & --- & --- & 16.15 & 16.10 & 16.09 & 16.05 \\
& Up & --- & --- & 16.08 & 16.11 & 15.95 & 16.09 \\ \hline
\multirow{2}{*}{\BuToKstJpsi} & Down & 14.39 & 14.77 & 15.81 & 15.85 & --- & --- \\
& Up & 14.42 & 14.79 & 15.74 & 15.90 & --- & ---
\end{tabular}
\captionof{table}[Generator-level efficiencies.]{Available generator-level efficiencies for signal and reference channels. The numbers represent the ratio of accepted signal events over generated signal events in [\%]. Missing values for signal channel simulation (when these samples were produced, the generator-level efficiency was not automatically saved) samples are taken from reference channel simulation, missing values for reference simulation are taken from 2016 reference channel simulation. \vspace{\baselineskip} \label{tab:eff_gen} }
% The value for signal channel simulation for year 2015 is taken from a production of 2015 signal simulation channel that can not be used further. However, the generator-level efficiencies are correct for this year.
\end{table}
\paragraph*{Full selection efficiency}
The next step is the full selection efficiency. This efficiency is the ratio of weighted truth-matched events passing the cut-based selection relative to the number of all generated events in the \lhcb acceptance. The values of this efficiency for each year used to calculate the full efficiency according to \refEq{EffEfficiency} is shown in \refFig{eff_sel}. \vspace{\baselineskip}
\begin{figure}[hbt!]
\centering
\includegraphics[width=0.6\textwidth]{./Data/Efficiencies/Selection/All_Run12/KplusPi0Resolved_IDTM_rndGamma_weighted_SelectionEfficiency_All_Run12.eps}
\captionof{figure}[Selection efficiency.]{Selection efficiency from available simulation samples per year. Red points show the efficiency for the reference \BuToKstJpsi channel, black points represent the signal \BuToKstmm channel. Higher efficiency for the reference channel is caused by generally higher selection efficiency at \qsq$\sim9\gevgev$ (see \refFig{eff_sel_q2}). } \label{fig:eff_sel}
\end{figure}
A simple cross-validation of the selection process is done using a small sample of ten thousand signal events that passed only the generator-level requirements.
%The genereted events in the given \qsq interval and in the Bmass window are scaled to replect the number of actually generated MC events. So the efficiency is full mc simmulation passing the full selection / scaled gen events in the \qsq interval.
It is shown in \refFig{eff_sel_q2} that there is no significant kink in the efficiency in the resonance regions and hence no bias in the selection of \BuToKstmm with respect to the \BuToKstJpsi decay is introduced. The \refFig{eff_sel_q2} also explains why the reference channel \BuToKstJpsi efficiency is larger than the signal channel \BuToKstmm efficiency: the \qsq selection efficiency is large around \qsq$\sim9\gevgev$ and therefore the selection efficiency is larger in the reference channel.
\begin{figure}[hbt!]
\centering
\includegraphics[width=0.47\textwidth]{./Data/Efficiencies/Selection/Run1/KplusPi0Resolved_IDTM_rndGamma_weighted_SelectionEfficiency_Run1_q2_binned.eps} \hspace{10pt}
\includegraphics[width=0.47\textwidth]{./Data/Efficiencies/Selection/Run2/KplusPi0Resolved_IDTM_rndGamma_weighted_SelectionEfficiency_Run2_q2_binned.eps}
\captionof{figure}[Selection efficiency in \qsq dependence.]{Selection efficiency in \qsq dependence. The efficiency is estimated using ten thousand simulation events passing only generator-level requirements. The trend follows the \qsq acceptance of \lhcb with no significant kink in the resonance regions ($[8.0\gevgev,11.0\gevgev]$ and $[12.5\gevgev,15.0\gevgev]$). } \label{fig:eff_sel_q2}
\end{figure}
\paragraph*{Multilayer perceptron efficiency}
While the MLP is designed to separate between signal and background, it cannot be 100\% \emph{effective} and therefore a fraction of signal events is removed together with the background. The MLP efficiency is obtained from truth-matched simulation as the ratio of events passing MLP response cut that are purged of multiple candidates (for details of this procedure see \refSec{sel-MultipleCandidates}) over all truth-matched simulation candidates. The efficiency in dependence on MLP response is presented in \refFig{eff_MLP}. \vspace{2\baselineskip}%The removal of multiple candidates is reflected in the efficiency in order to get good estimation of signal yield in data, where the multiple candidates are also removed.
\begin{figure}[hbt!]
\centering
\includegraphics[width=0.44\textwidth]{./Data/Efficiencies/TMVA/KplusPi0Resolved_mumu_IDTM_rndGamma_weighted_BDT_AloneOnly_Efficiency_Run1.eps}
\includegraphics[width=0.44\textwidth]{./Data/Efficiencies/TMVA/KplusPi0Resolved_mumu_IDTM_rndGamma_weighted_BDT_AloneOnly_Efficiency_Run2.eps}
\includegraphics[width=0.44\textwidth]{./Data/Efficiencies/TMVA/KplusPi0Resolved_JpsiOnly_IDTM_rndGamma_weighted_BDT_AloneOnly_Efficiency_Run1.eps}
\includegraphics[width=0.44\textwidth]{./Data/Efficiencies/TMVA/KplusPi0Resolved_JpsiOnly_IDTM_rndGamma_weighted_BDT_AloneOnly_Efficiency_Run2.eps}
\captionof{figure}[MLP and multiple-candidates removal efficiency per Run.]{MLP and multiple-candidates removal efficiency per Run obtained from signal and reference channels simulation sample. The offset from $\varepsilon_{MLP}=1$ at MLP response equal to zero is caused by the removal of multiple candidates. } \label{fig:eff_MLP}
\end{figure}
\clearpage
%Putting together the efficiencies, the final recipe to estimate total selection efficiency becomes:
%\begin{equation}\label{eq:EffEfficiencyReal}
%\varepsilon_{tot} =\varepsilon_{acc} \times \varepsilon_{sel+TM} \times \varepsilon_{MLP}^{Removed~multiple}\,.
%%\varepsilon_{tot} =\varepsilon_{acc} \times \varepsilon_{sel+TM}^{weighted} \times \varepsilon_{MLP}^{Removed~multiple}\,.
%\end{equation}
%\paragraph*{\lone trigger efficiency}
%
%\todo[inline]{Definitely move elsewhere later and add some more info}
%
%Unfortunately, a small discrepancy in \lone trigger thresholds between the simulation and data is introduced. Therefore, a study on the \lone trigger efficiency is performed.
%
%All the efficiency plots can be also found in \refApp{L0Eff}.

21
Chapters/EventSelection/FitsInQ2.tex

@ -0,0 +1,21 @@
\vspace{-\baselineskip}
\begin{figure}[hbt!]
\centering
\includegraphics[width=0.440\textwidth]{./Data/FinalSelection/Q2_scan/Run1_KplusPi0Resolved_mumu_BplusMassModel_OneCB_SingleExponential_DTF_removedMultiple_TMVA0.998500_constrained_fixShape_fixedMassWindow_q2_binned_fit_0.eps}
\includegraphics[width=0.440\textwidth]{./Data/FinalSelection/Q2_scan/Run1_KplusPi0Resolved_mumu_BplusMassModel_OneCB_SingleExponential_DTF_removedMultiple_TMVA0.998500_constrained_fixShape_fixedMassWindow_q2_binned_fit_1.eps}\\
\includegraphics[width=0.440\textwidth]{./Data/FinalSelection/Q2_scan/Run1_KplusPi0Resolved_mumu_BplusMassModel_OneCB_SingleExponential_DTF_removedMultiple_TMVA0.998500_constrained_fixShape_fixedMassWindow_q2_binned_fit_3.eps}
\includegraphics[width=0.440\textwidth]{./Data/FinalSelection/Q2_scan/Run1_KplusPi0Resolved_mumu_BplusMassModel_OneCB_SingleExponential_DTF_removedMultiple_TMVA0.998500_constrained_fixShape_fixedMassWindow_q2_binned_fit_5.eps} \\
\captionof{figure}[Signal channel yield in \qsq bins in \runI.]{Signal channel yield in \qsq bins in \runI. } \label{fig:app-yieldInQ2_Run1}
\end{figure}
\vspace{-\baselineskip}
\begin{figure}[hbt!]
\centering
\includegraphics[width=0.440\textwidth]{./Data/FinalSelection/Q2_scan/Run2_KplusPi0Resolved_mumu_BplusMassModel_OneCB_SingleExponential_DTF_removedMultiple_TMVA0.996000_constrained_fixShape_fixedMassWindow_q2_binned_fit_0.eps}
\includegraphics[width=0.440\textwidth]{./Data/FinalSelection/Q2_scan/Run2_KplusPi0Resolved_mumu_BplusMassModel_OneCB_SingleExponential_DTF_removedMultiple_TMVA0.996000_constrained_fixShape_fixedMassWindow_q2_binned_fit_1.eps}\\
\includegraphics[width=0.440\textwidth]{./Data/FinalSelection/Q2_scan/Run2_KplusPi0Resolved_mumu_BplusMassModel_OneCB_SingleExponential_DTF_removedMultiple_TMVA0.996000_constrained_fixShape_fixedMassWindow_q2_binned_fit_3.eps}
\includegraphics[width=0.440\textwidth]{./Data/FinalSelection/Q2_scan/Run2_KplusPi0Resolved_mumu_BplusMassModel_OneCB_SingleExponential_DTF_removedMultiple_TMVA0.996000_constrained_fixShape_fixedMassWindow_q2_binned_fit_5.eps}\\
\captionof{figure}[Signal channel yield in \qsq bins in \runII.]{Signal channel yield in \qsq bins in \runII.} \label{fig:app-yieldInQ2_Run2}
\end{figure}

56
Chapters/EventSelection/L0Efficiency.tex

@ -0,0 +1,56 @@
\begin{figure}[hbt!]
\centering
\includegraphics[width=0.45\textwidth]{./Data/Efficiencies/L0Muon/2016/KplusPi0Resolved_IDTM_rndGamma_L0MuonEfficiency_2016_DiMuon_MC.eps}
\includegraphics[width=0.45\textwidth]{./Data/Efficiencies/L0Muon/2017/KplusPi0Resolved_IDTM_rndGamma_L0MuonEfficiency_2017_DiMuon_MC.eps}\\
\includegraphics[width=0.45\textwidth]{./Data/Efficiencies/L0Muon/2018/KplusPi0Resolved_IDTM_rndGamma_L0MuonEfficiency_2018_DiMuon_MC.eps}
\captionof{figure}[\lone trigger efficiency.]{\lone trigger efficiency, signal MC. The events passed events are triggered either by L0DiMuonDecision (TOS) or L0MuonDecision (TOS).}
\end{figure}
\begin{figure}[hbt!]
\centering
\includegraphics[width=0.45\textwidth]{./Data/Efficiencies/L0Muon/2015/KplusPi0Resolved_IDTM_rndGamma_L0MuonEfficiency_2015_DiMuon_RefMC.eps}
\includegraphics[width=0.45\textwidth]{./Data/Efficiencies/L0Muon/2016/KplusPi0Resolved_IDTM_rndGamma_L0MuonEfficiency_2016_DiMuon_RefMC.eps}
\captionof{figure}[\lone trigger efficiency.]{\lone trigger efficiency, reference MC, dimuon. The events passed events are triggered either by L0DiMuonDecision (TOS) or L0MuonDecision (TOS).}
\end{figure}
\begin{figure}[hbt!]
\centering
\includegraphics[width=0.45\textwidth]{./Data/Efficiencies/L0Muon/2015/KplusPi0Resolved_IDTM_rndGamma_L0MuonEfficiency_2015_DiMuon_PHSP.eps}
\includegraphics[width=0.45\textwidth]{./Data/Efficiencies/L0Muon/2016/KplusPi0Resolved_IDTM_rndGamma_L0MuonEfficiency_2016_DiMuon_PHSP.eps}\\
\includegraphics[width=0.45\textwidth]{./Data/Efficiencies/L0Muon/2017/KplusPi0Resolved_IDTM_rndGamma_L0MuonEfficiency_2017_DiMuon_PHSP.eps}
\includegraphics[width=0.45\textwidth]{./Data/Efficiencies/L0Muon/2018/KplusPi0Resolved_IDTM_rndGamma_L0MuonEfficiency_2018_DiMuon_PHSP.eps}
\captionof{figure}[\lone trigger efficiency.]{\lone trigger efficiency, PHSP MC. The events passed events are triggered either by L0DiMuonDecision (TOS) or L0MuonDecision (TOS).}
\end{figure}
\begin{figure}[hbt!]
\centering
\includegraphics[width=0.45\textwidth]{./Data/Efficiencies/L0Muon/2011/KplusPi0Resolved_IDTM_rndGamma_L0MuonEfficiency_2011_MC.eps}
\includegraphics[width=0.45\textwidth]{./Data/Efficiencies/L0Muon/2012/KplusPi0Resolved_IDTM_rndGamma_L0MuonEfficiency_2012_MC.eps}\\
\includegraphics[width=0.45\textwidth]{./Data/Efficiencies/L0Muon/2016/KplusPi0Resolved_IDTM_rndGamma_L0MuonEfficiency_2016_MC.eps}
\includegraphics[width=0.45\textwidth]{./Data/Efficiencies/L0Muon/2017/KplusPi0Resolved_IDTM_rndGamma_L0MuonEfficiency_2017_MC.eps}\\
\includegraphics[width=0.45\textwidth]{./Data/Efficiencies/L0Muon/2018/KplusPi0Resolved_IDTM_rndGamma_L0MuonEfficiency_2018_MC.eps}
\captionof{figure}[\lone trigger efficiency.]{\lone trigger efficiency, signal MC. The events passed events are triggered either by L0MuonDecision (TOS).}
\end{figure}
\begin{figure}[hbt!]
\includegraphics[width=0.45\textwidth]{./Data/Efficiencies/L0Muon/2011/KplusPi0Resolved_IDTM_rndGamma_L0MuonEfficiency_2011_RefMC.eps}
\includegraphics[width=0.45\textwidth]{./Data/Efficiencies/L0Muon/2012/KplusPi0Resolved_IDTM_rndGamma_L0MuonEfficiency_2012_RefMC.eps}\\
\includegraphics[width=0.45\textwidth]{./Data/Efficiencies/L0Muon/2015/KplusPi0Resolved_IDTM_rndGamma_L0MuonEfficiency_2015_RefMC.eps}
\includegraphics[width=0.45\textwidth]{./Data/Efficiencies/L0Muon/2016/KplusPi0Resolved_IDTM_rndGamma_L0MuonEfficiency_2016_RefMC.eps}
\captionof{figure}[\lone trigger efficiency.]{\lone trigger efficiency, reference MC. The events passed events are triggered either by L0MuonDecision (TOS).}
\end{figure}
\begin{figure}[hbt!]
\includegraphics[width=0.45\textwidth]{./Data/Efficiencies/L0Muon/2011/KplusPi0Resolved_IDTM_rndGamma_L0MuonEfficiency_2011_PHSP.eps}
\includegraphics[width=0.45\textwidth]{./Data/Efficiencies/L0Muon/2012/KplusPi0Resolved_IDTM_rndGamma_L0MuonEfficiency_2012_PHSP.eps}\\
\includegraphics[width=0.45\textwidth]{./Data/Efficiencies/L0Muon/2015/KplusPi0Resolved_IDTM_rndGamma_L0MuonEfficiency_2015_PHSP.eps}
\includegraphics[width=0.45\textwidth]{./Data/Efficiencies/L0Muon/2016/KplusPi0Resolved_IDTM_rndGamma_L0MuonEfficiency_2016_PHSP.eps}\\
\includegraphics[width=0.45\textwidth]{./Data/Efficiencies/L0Muon/2017/KplusPi0Resolved_IDTM_rndGamma_L0MuonEfficiency_2017_PHSP.eps}
\includegraphics[width=0.45\textwidth]{./Data/Efficiencies/L0Muon/2018/KplusPi0Resolved_IDTM_rndGamma_L0MuonEfficiency_2018_PHSP.eps}
\captionof{figure}[\lone trigger efficiency.]{\lone trigger efficiency, PHSP. The events passed events are triggered either by L0MuonDecision (TOS).}
\end{figure}

205
Chapters/EventSelection/MVA.tex

@ -0,0 +1,205 @@
\subsection{Multi-variate analysis selection}\label{sec:sel-TMVA}
After the cut-based preselection, a rather large amount of combinatorial background is still present (see \refFig{ANA-nonPrese}). To reduce the amount of background while maintaining high signal selection efficiency, a multi-variate analysis (MVA) is performed~\cite{ANA-MVA}. Generally, an MVA is a set of statistical methods that examine patterns in multidimensional data.\vspace{\baselineskip}
\begin{figure}[hbt!]\centering
\includegraphics[width=0.58\textwidth]{./AnalysisSelection/NeuralNetwork.png}
\captionof{figure}[Multilayer feedforward backpropagation neural network principles.]{Sketch of multilayer feedforward backpropagation neural network principles. The input layer distributes the input data by weighting them and sending them to the hidden neurons (nods). The hidden neurons sum the signal from the input neurons and project this sum on an \emph{activation function} $f_h$. The activation function is typically a binary step (treshold) or rectified linear unit funcion $f(x) = max(0,x)$. The projected numbers are weighted and sent to the output layer, where they are summed again. There can be an arbitrary number of neurons and hidden layers. } \label{fig:ANA-NeuralNetwork}
\end{figure}
There is a vast list of methods that can be considered a multi-variate analysis, the most commonly used ones are \emph{decision trees} or \emph{multiple regression} methods. In this analysis, the multilayer perceptron analysis is used.
A multilayer perceptron (MLP) is an artificial neural network. Neural networks were proposed as early as 1943~\cite{ANA-MLP}. A simple sketch of its principle is presented in \refFig{ANA-NeuralNetwork}. It consists of three layers: input layer, hidden layer and output layer. Each layer consists of several (or many) nodes that are interconnected. A node receives a data item (a number) from each of its connections, multiplies it by an associated weight and returns the sum of these products.
This sum is then transformed by an \emph{activation function}. During the training process, the associated weights are random: by examining examples with known input and/or output layer, the weights are associated in a way that the training data with same labels consistently yield similar output.
An MLP is a special kind of neural network: it is a supervised-learning network that uses backpropagation for training. It is used to distinguish data categories that are not linearly separable: in this case signal and background. Supervised learning means the neural network is trained with a set of input-output pairs (while unsupervised is trained only using the input data). Backpropagation means the gradient of the loss function with respect to the weights of the network is computed. The loss function represents the discrepancy between the desired output and the output calculated by the neural network. This \emph{error} is then sent through the network backwards, updating the weights according to the \emph{error}, leading to a quick reduction of the difference between the expected and calculated outputs.
The MLP tool provided by the Toolkit for Multivariate Data Analysis (TMVA)~\cite{ANA-TMVA} is used. The samples used for training have to be clearly labeled as signal or background and be as close to the real signal and background as possible. Hence, the MLP is trained using \BuToKstmm decay candidates in the simulation sample for signal with the requirement of the reconstructed \Bu meson mass to be close to the \Bu rest mass ($|m_{\Bu}^{\rm reco} - \mBu| <100\mev$). The background training sample is taken from the recorded data: the \Bu meson upper-mass sideband, with the requirement of the reconstructed \Bu meson mass to be larger than 5700\mev. The requirement of $m_{\Bu}^{\rm reco} > 5700\mev$ enforces no (partially) reconstructed events in the background sample. The numbers of available signal and background events are listed in \refTab{TMVAevents}. The MLP is trained separately for \runI and \runII, as the Run conditions differed.
\begin{table}[hbt!]
\centering
\begin{tabular}{l|cc}
& \runI & \runII \\ \hline
Signal events & 4531 & 19152\\
Background events & 511 & 1748
\end{tabular}
\captionof{table}[Number of events used for the MLP training.]{Number of events used for the MLP training. \label{tab:TMVAevents}}
\end{table}
The list of variables that serve as an input to the MLP are presented in \refTab{TMVA}. These variables were identified as the variables with the largest discrimination power. The agreement between the simulated and recorded data in the listed variables becomes extremely important, as the MLP could pick up on differences between the data and simulation instead of separating background from the signal. As mentioned in \refSec{sel-SimulationCorrection}, the \sWeight ed data and weighted simulation distributions of variables listed in \refTab{TMVA} are carefully checked to be in agreement. The distributions agree very well. Small discrepancies are acceptable as they are only minor and present in regions where the MLP does not differentiate between signal and background.
\begin{table}[hbt!]
\centering
\begin{tabular}{c}
$\ln{\pt^{\Bu}} $\\
\Bu Cone-$\pt$ asymmetry \\
\Bu \chisqip\\
$\ln(1-\Bu\text{DIRA})$\\
$\ln{\pt^{\Kp}} $\\
$|\eta(\piz)-\eta(\Kp)|$ \\
$CL_\piz$ \\
max$\left[\ln(\pt^{\g_1}),\ln(\pt^{\g_2})\right]$ \\
min$\left[\ln{ \mun \chisqip },\ln{ \mup \chisqip }\right]$\\
\end{tabular}\\ \vspace{5pt}
\captionof{table}[List of variables used for the MLP training.]{List of variables used for the MLP training. The confidence level of the neutral pion is a product of photon confidence levels, $CL_\piz = CL_{\gamma_1} CL_{\gamma_2}$. The list is identical in \runI and \runII. \label{tab:TMVA}}
\end{table}
In order for the MLP to select signal over background as efficiently as possible, the input variables should not be correlated among each other both in the signal and the background samples, as they lower the separation power of the MLP. The correlations between the input variables for the training signal and background samples are depicted in \refFig{ANA-MLP_corr}.
The TMVA toolkit returns MLP response value between 0 and 1, where the number represents the probability of an event being a signal event. The optimal cut value is discussed later in \refSec{sel-SignalEstimation}.
\begin{figure}[hbt!]
\centering
\includegraphics[width=0.48\textwidth]{./Data/MVA/Run1/CorrelationS_new.eps}
\includegraphics[width=0.48\textwidth]{./Data/MVA/Run1/CorrelationB_new.eps}
\captionof{figure}[The correlations between the input variables for the MVA training.]{The correlations between the input variables for the MVA training signal (left) and background samples (right). It is clear there is no significant correlation between the input variables both in the signal nad the background samples. } \label{fig:ANA-MLP_corr}
\end{figure}
%%%%% Run I
%--- DataSetFactory : Signal -- training events : 4531 (sum of weights: 4531) - requested were 0 events
%--- DataSetFactory : Signal -- testing events : 4531 (sum of weights: 4586.78) - requested were 0 events
%--- DataSetFactory : Signal -- training and testing events: 9062 (sum of weights: 9117.78)
%--- DataSetFactory : Signal -- due to the preselection a scaling factor has been applied to the numbers of requested events: 0.330549
%--- DataSetFactory : Background -- training events : 511 (sum of weights: 511) - requested were 0 events
%--- DataSetFactory : Background -- testing events : 511 (sum of weights: 511) - requested were 0 events
%--- DataSetFactory : Background -- training and testing events: 1022 (sum of weights: 1022)
%--- DataSetFactory : Background -- due to the preselection a scaling factor has been applied to the numbers of requested events: 0.0103976
%
%--- MLP : Ranking result (top variable is best ranked)
%--- MLP : ----------------------------------------------
%--- MLP : Rank : Variable : Importance
%--- MLP : ----------------------------------------------
%--- MLP : 1 : gamma_max_log_PT_DTF : 1.282e+01
%--- MLP : 2 : K_plus_PI0_ETA_DTF : 1.066e+01
%--- MLP : 3 : B_plus_log_DIRA : 8.585e+00
%--- MLP : 4 : pi_zero_resolved_CL : 5.243e+00
%--- MLP : 5 : B_plus_NEW_ConePTasym : 4.506e+00
%--- MLP : 6 : B_plus_log_PT_DTF : 4.417e+00
%--- MLP : 7 : min_mumu_IPCHI2_OWNPV : 3.755e+00
%--- MLP : 8 : B_plus_IPCHI2_OWNPV : 3.632e+00
%--- MLP : 9 : K_plus_log_PT_DTF : 2.342e+00
%--- MLP : ----------------------------------------------
%
%--- Factory : Inter-MVA correlation matrix (signal):
%--- Factory : --------------------------------
%--- Factory : BDT BDTG MLP
%--- Factory : BDT: +1.000 +0.773 +0.608
%--- Factory : BDTG: +0.773 +1.000 +0.833
%--- Factory : MLP: +0.608 +0.833 +1.000
%--- Factory : --------------------------------
%--- Factory :
%--- Factory : Inter-MVA correlation matrix (background):
%--- Factory : --------------------------------
%--- Factory : BDT BDTG MLP
%--- Factory : BDT: +1.000 +0.853 +0.852
%--- Factory : BDTG: +0.853 +1.000 +0.814
%--- Factory : MLP: +0.852 +0.814 +1.000
%--- Factory : --------------------------------
%--- Factory :
%--- Factory : Correlations between input variables and MVA response (signal):
%--- Factory : --------------------------------
%--- Factory : BDT BDTG MLP
%--- Factory : gamma_max_log_PT_DTF: +0.483 +0.385 +0.294
%--- Factory : K_plus_PI0_ETA_DTF: -0.446 -0.432 -0.382
%--- Factory : B_plus_NEW_ConePTasym: +0.408 +0.319 +0.265
%--- Factory : B_plus_log_PT_DTF: +0.363 +0.319 +0.272
%--- Factory : B_plus_IPCHI2_OWNPV: -0.281 -0.267 -0.243
%--- Factory : K_plus_log_PT_DTF: +0.346 +0.279 +0.217
%--- Factory : B_plus_log_DIRA: -0.428 -0.346 -0.274
%--- Factory : pi_zero_resolved_CL: +0.246 +0.214 +0.167
%--- Factory : min_mumu_IPCHI2_OWNPV: +0.552 +0.383 +0.295
%--- Factory : --------------------------------
%--- Factory :
%--- Factory : Correlations between input variables and MVA response (background):
%--- Factory : --------------------------------
%--- Factory : BDT BDTG MLP
%--- Factory : gamma_max_log_PT_DTF: +0.388 +0.374 +0.335
%--- Factory : K_plus_PI0_ETA_DTF: -0.548 -0.501 -0.705
%--- Factory : B_plus_NEW_ConePTasym: +0.319 +0.306 +0.280
%--- Factory : B_plus_log_PT_DTF: +0.155 +0.171 +0.205
%--- Factory : B_plus_IPCHI2_OWNPV: -0.367 -0.183 -0.309
%--- Factory : K_plus_log_PT_DTF: +0.296 +0.281 +0.323
%--- Factory : B_plus_log_DIRA: -0.204 -0.150 -0.168
%--- Factory : pi_zero_resolved_CL: +0.286 +0.220 +0.199
%--- Factory : min_mumu_IPCHI2_OWNPV: +0.342 +0.346 +0.248
%%%%% Run II
%--- DataSetFactory : Signal -- training events : 19152 (sum of weights: 19152) - requested were 0 events
%--- DataSetFactory : Signal -- testing events : 19152 (sum of weights: 18831.2) - requested were 0 events
%--- DataSetFactory : Signal -- training and testing events: 38304 (sum of weights: 37983.2)
%--- DataSetFactory : Signal -- due to the preselection a scaling factor has been applied to the numbers of requested events: 0.336993
%--- DataSetFactory : Background -- training events : 1748 (sum of weights: 1748) - requested were 0 events
%--- DataSetFactory : Background -- testing events : 1748 (sum of weights: 1748) - requested were 0 events
%--- DataSetFactory : Background -- training and testing events: 3496 (sum of weights: 3496)
%--- DataSetFactory : Background -- due to the preselection a scaling factor has been applied to the numbers of requested events: 0.0123012
%
%--- MLP : Ranking result (top variable is best ranked)
%--- MLP : ----------------------------------------------
%--- MLP : Rank : Variable : Importance
%--- MLP : ----------------------------------------------
%--- MLP : 1 : K_plus_PI0_ETA_DTF : 2.466e+01
%--- MLP : 2 : B_plus_log_DIRA : 2.394e+01
%--- MLP : 3 : gamma_max_log_PT_DTF : 1.465e+01
%--- MLP : 4 : pi_zero_resolved_CL : 6.758e+00
%--- MLP : 5 : B_plus_log_PT_DTF : 5.412e+00
%--- MLP : 6 : K_plus_log_PT_DTF : 4.794e+00
%--- MLP : 7 : B_plus_IPCHI2_OWNPV : 4.181e+00
%--- MLP : 8 : min_mumu_IPCHI2_OWNPV : 3.781e+00
%--- MLP : 9 : B_plus_NEW_ConePTasym : 2.970e+00
%--- MLP : ----------------------------------------------
%
%--- Factory : Inter-MVA correlation matrix (signal):
%--- Factory : --------------------------------
%--- Factory : BDT BDTG MLP
%--- Factory : BDT: +1.000 +0.834 +0.606
%--- Factory : BDTG: +0.834 +1.000 +0.786
%--- Factory : MLP: +0.606 +0.786 +1.000
%--- Factory : --------------------------------
%--- Factory :
%--- Factory : Inter-MVA correlation matrix (background):
%--- Factory : --------------------------------
%--- Factory : BDT BDTG MLP
%--- Factory : BDT: +1.000 +0.862 +0.848
%--- Factory : BDTG: +0.862 +1.000 +0.787
%--- Factory : MLP: +0.848 +0.787 +1.000
%--- Factory : --------------------------------
%--- Factory :
%--- Factory : Correlations between input variables and MVA response (signal):
%--- Factory : --------------------------------
%--- Factory : BDT BDTG MLP
%--- Factory : gamma_max_log_PT_DTF: +0.373 +0.366 +0.257
%--- Factory : K_plus_PI0_ETA_DTF: -0.409 -0.441 -0.330
%--- Factory : B_plus_NEW_ConePTasym: +0.367 +0.313 +0.222
%--- Factory : B_plus_log_PT_DTF: +0.263 +0.282 +0.191
%--- Factory : B_plus_IPCHI2_OWNPV: -0.344 -0.304 -0.254
%--- Factory : K_plus_log_PT_DTF: +0.280 +0.312 +0.227
%--- Factory : B_plus_log_DIRA: -0.515 -0.406 -0.269
%--- Factory : pi_zero_resolved_CL: +0.348 +0.248 +0.184
%--- Factory : min_mumu_IPCHI2_OWNPV: +0.557 +0.428 +0.294
%--- Factory : --------------------------------
%--- Factory :
%--- Factory : Correlations between input variables and MVA response (background):
%--- Factory : --------------------------------
%--- Factory : BDT BDTG MLP
%--- Factory : gamma_max_log_PT_DTF: +0.315 +0.296 +0.199
%--- Factory : K_plus_PI0_ETA_DTF: -0.562 -0.474 -0.646
%--- Factory : B_plus_NEW_ConePTasym: +0.261 +0.228 +0.197
%--- Factory : B_plus_log_PT_DTF: +0.217 +0.202 +0.189
%--- Factory : B_plus_IPCHI2_OWNPV: -0.472 -0.354 -0.410
%--- Factory : K_plus_log_PT_DTF: +0.363 +0.335 +0.387
%--- Factory : B_plus_log_DIRA: -0.287 -0.278 -0.198
%--- Factory : pi_zero_resolved_CL: +0.254 +0.238 +0.229
%--- Factory : min_mumu_IPCHI2_OWNPV: +0.291 +0.292 +0.222
%--- Factory : --------------------------------

44
Chapters/EventSelection/MultCand.tex

@ -0,0 +1,44 @@
\subsection{Treatment of multiple candidates}\label{sec:sel-MultipleCandidates}
In most \lhcb analyses, multiple candidates are not considered as a pollution due to the relatively precise charged track selection. In the case of \piz reconstruction, especially resolved \piz, multiple candidates are abundant. Multiple candidates refers to an event that is reconstructed with several signal candidates. This can happen \eg when a \piz meson is reconstructed using a random photon, especially in the case of very soft pions.
It is presented in \refFig{sel-Multiple} that the fraction of events containing multiple candidates decreases with increasing value of MLP response. This reflects the fact that MLP removes background events.
\begin{figure}[hbt!]
\centering
\includegraphics[width=0.47\textwidth]{./Data/MultipleCandidates/Run_1_tagged.png} \hspace{10pt}
\includegraphics[width=0.47\textwidth]{./Data/MultipleCandidates/Run_2_tagged.png}
\captionof{figure}[Fraction of multiple candidates in data and simulation.]{Fraction of multiple candidates in data and simulation depending on the cut on MLP response. The fraction is defined as the number of all multiple candidates divided by the number of all events. This means that if \eg there is one event with two multiple candidates in a sample of ten events, the ratio would be 0.2. This represents the number of events we actually exclude as the \emph{fake} candidates are indistinguishable from \emph{true} candidates. The blue points represent data, the orange points represent the simulation sample and the green points represent the truth-matched simulation sample. } \label{fig:sel-Multiple}
\end{figure}
Removing all multiple candidates no matter if they correspond to signal or not could negatively effect the significance $\mathcal{S}$, defined in \refEq{significance}, where $S$ is the number of signal candidates and $B$ is the amount of background candidates:
\begin{equation}\label{eq:significance}
\mathcal{S} = \frac{S}{\sqrt{S+B}}\,.
\end{equation}
However, as shown in \refFig{sel-Multiple}, the final fraction of multiple candidates in the sample is about 10\%. This means that in the worst possible case, 5\% of true candidates have exactly one fake partner. In this case, the significance is worsened by a factor 0.97. The possible gain in significance if we would remove only the fake events is negligible. As a small fraction of candidates (about a 1\%) have more than one fake partner, the removal of all events with \emph{at least} one fake partner does not worsen the significance. As the disentanglement of the \emph{true} candidate from the \emph{fake} candidate is almost impossible and the possible loss of significance negligible, all multiple candidates are removed.
%As most events have two multiple candidates (about 8.6\% of all events is fake\footnote{Which makes up in total 17.2\% of all events.}), the effective significance is then
%\begin{equation}
%\mathcal{S'} = \frac{S-F}{\sqrt{S-F+B-F}}\,,
%\end{equation}
%where $F$ denotes the number of fake candidates. The ratio of ideal significance $\mathcal{S}$ over achievable significance $\mathcal{S'}$ can be rewritten as
%\begin{equation}
% \frac{S \sqrt{1 - \frac{2 F}{B + S}}}{S -F}% \sim \frac{1}{\sqrt{(1-F/S)}}
%\end{equation}
%and assuming $F/(S+B) = 8.6\%$ and $S\sim B$, the significance is worsened by a factor of 0.98. On the other hand, as another 5.8\% have three multiple candidates\footnote{Including both fake and real events.} or more\footnote{Up to 8!}, therefore removal of all fake candidates together with the real candidates does not worsen the overall significance.
Moreover, the multiple candidates do not only affect the shape of the background. As shown in \refFig{sel-MultipleResolution}, multiple candidates typically worsen momentum resolution as they are background. As the \piz momentum is tied to \thetak (see \refFig{anglesB+}, \thetak is proportional to the asymmetry between \Kp and \piz momenta), it is important to keep the \piz resolution as good as possible. Removing multiple candidates is therefore a crucial step in this analysis, even though it is not possible to distinguish a true candidate from a fake candidate.
\begin{figure}[hbt!]
\centering
\includegraphics[width=0.325\textwidth]{Data/Resolution/MC/2016/new/measure_vs_true_MC_Run2_2016_TM_IDTM_rndGamma.eps}
\includegraphics[width=0.325\textwidth]{Data/Resolution/MC/2016/new/measure_vs_true_MC_Run2_2016_TM_IDTM_rndGamma_TMVA0.990000.eps}
\includegraphics[width=0.325\textwidth]{Data/Resolution/MC/2016/new/measure_vs_true_MC_Run2_2016_TM_IDTM_rndGamma_TMVA0.990000_removedMultiple.eps} \\
\raggedright
\captionof{figure}[Neutral pion momentum resolution, 2016 simulation sample.]{\piz momentum resolution in 2016 truth-matched simulation sample. The x-axis represents the \emph{true} \piz momentum, y-axis \emph{measured} \piz momentum. On the left, all events are shown. In the middle, events passing a cut on MLP response of 0.99 are shown\protect\footnotemark. On the right, on top of the cut on MLP response at 0.99, multiple candidates are removed. It is clear that removal of multiple candidates removes candidates with worse momentum resolution, especially for soft pions.} \label{fig:sel-MultipleResolution}
\end{figure}
\footnotetext{This number is arbitrarily chosen, as it is clear from the MLP training the optimal MLP cut will be very close to one.}

55
Chapters/EventSelection/Signal.tex

@ -0,0 +1,55 @@
\subsection{Signal estimation}\label{sec:sel-SignalEstimation}
In order to select the most events with the least background, the cut value on the MLP response is optimized. As the figure of merit, the significance $\mathcal{S}$ defined in \refEq{significance} is chosen. Therefore, it is crucial to know the number of expected signal and background candidates in the data sample. Optimizing the MLP response cut using the \BuToKstmm data sample could bias our result. Therefore, the reference decay
\mbox{\BuToKstJpsi} is used to extrapolate the expected number of signal candidates.
Let the number of \Bu mesons decaying into $\Kstarp \mumu$ be $N_\mumu$. The number of all \Bu meson decays is denoted $N_{all}$. Then, branching ratio $BR_\mumu$ can be defined as
%
\begin{equation}\label{eq:BR_definition}
BR_\mumu = N_\mumu / N_{all}\,.
\end{equation}
It is not possible to directly measure $N_\mumu$. Due to limited efficiency of the detector, $\varepsilon_{\mumu}$, the measured yield is $Y_\mumu = N_\mumu \times \varepsilon_{\mumu}$ events.
The branching ratio definition holds also for $\Bu$ decaying into $\Kstarp \jpsi$. The branching ratio, $BR_\jpsi$, is the ratio of number of \BuToKstJpsi decay events, $N_\jpsi$, relative to all decays of the \Bu meson, $N_{all}$. Let the measured yield of \BuToKstJpsi events be $Y_\jpsi$ and the efficiency of detecting the decay events $\varepsilon_{\jpsi}$. Putting this together, the following formula gives the relation between the yield of \BuToKstmm and \BuToKstJpsi:
%
\begin{equation}\label{eq:YieldEstimation1}
N_{all}
= \frac{N_{\mumu}}{BR_\mumu}
=\frac{Y_\mumu}{BR_\mumu \times \varepsilon_\mumu} = \frac{N_{\jpsi}}{BR_\jpsi}
= \frac{Y_\jpsi}{BR_\jpsi \times \varepsilon_\jpsi} \,.
\end{equation}
However, in this measurement only \BuToKstJpsi decays, where \jpsi\to\mumu, are considered. Therefore, the branching ratio of \JpsiTomm, denoted $ BR_{\jpsi\to\mumu}$, has to be added to \refEq{YieldEstimation1}. The efficiency of detecting \BuToKstJpsi, \JpsiTomm is denoted $\varepsilon_{\jpsi,\jpsi\to\mumu}$. The formula used for estimation of the signal yield $Y_\mumu$ then becomes:
%
\begin{equation}\label{eq:YieldEstimation2}
Y_\mumu = \frac{\varepsilon_\mumu}{\varepsilon_{\jpsi,\jpsi\to\mumu}}\times\frac{BR_\mumu}{BR_\jpsi \times BR_{\jpsi\to\mumu}}\times {Y_{\jpsi,\jpsi\to\mumu}}\,.
\end{equation}
%
The used values of branching fractions, taken from~Ref.\,\cite{PDG}, are presented in \refTab{BR}.
\begin{table}[hbt!]
\centering
\begin{tabular}{l|c}
$BR_{\BuToKstmm}$ & 8.668$\times10^{-7}$ \\ \hline
$BR_{\BuToKstJpsi}$ & 1.43$\times10^{-3}$ \\ \hline
$BR_{\jpsi\to\mumu}$ & 5.961$\times10^{-2}$
\end{tabular}
\captionof{table}[Branching ratios of \BuToKstmm, \BuToKstJpsi and $\jpsi\to\mumu$.]{Branching ratios of \BuToKstmm, \BuToKstJpsi and $\jpsi\to\mumu$ decays used for estimating the signal yields. The values are taken from the \flavio package~\cite{ANA-flavio}. \label{tab:BR}}
\end{table}
In order to estimate the yield of \BuToKstmm, the total efficiency of both the \BuToKstmm and the \BuToKstJpsi decay selection is needed. The total efficiency depends on many factors: the detector acceptance (acc), reconstruction (rec), tracking (tr), selection (sel) and MLP efficiencies, as expressed in~\refEq{TotEfficiency}\vspace{-0.25\baselineskip}
%
\begin{equation}\label{eq:TotEfficiency}
\varepsilon_{tot} =\varepsilon_{acc} \times \varepsilon_{rec} \times \varepsilon_{tr} \times\varepsilon_{sel} \times \varepsilon_{MLP}\,.
\end{equation}
%
However, for estimating the signal yield $Y_\mumu$, only the ratio of the signal and the reference efficiency $\varepsilon_\mumu/\varepsilon_{\jpsi}$ is needed. A lot of effects cancel out in this ratio. For practical reasons, the total efficiency is then evaluated in three steps: the acceptance efficiency $\varepsilon_{acc}$, the reconstruction, tracking, and selection efficiency $\varepsilon_{rec+tr+sel}$ and the efficiency of the multi-variate selection $\varepsilon_{MLP}$. The total efficiency then becomes \vspace{-0.25\baselineskip}
%
\begin{equation}\label{eq:EffEfficiency}
\varepsilon_{tot} =\varepsilon_{acc} \times \varepsilon_{rec+tr+sel} \times \varepsilon_{MLP}\,.
\end{equation}
The evaluation of this efficiency is described in the following section.
\input{Chapters/EventSelection/Efficiency}

73
Chapters/EventSelection/Simulation.tex

@ -0,0 +1,73 @@
\subsection{Correction to the simulation}\label{sec:sel-SimulationCorrection}
The Monte Carlo simulation sample is used to estimate the background contribution in the data and to account for detector acceptance effects. Therefore, the distributions of variables (and the correlations between them) in data and simulation have to agree. Even though there have been many recent improvements in the Monte Carlo simulation methods, the agreement is not perfect. The main difference between the simulation and the data is multiplicity: in simulation, the underlying event is under-represented.
The correction of the simulation is done by applying weights on the simulation to match the data. In order to obtain the weights, the simulated events have to pass the same selection as the data sample. On top of this selection, only \emph{true} signal candidates have to be selected: the reconstruction algorithms can reconstruct a track that does not correspond to any simulated particle. Such signal candidates have to be removed by the so-called \emph{truth-matching}.
\subsubsection[Matching of reconstructed candidates to simulated candidates]{Matching of reconstructed signal candidates to simulated candidates}\label{sec:sel-TruthMatching}
%All simulated events undergo the full detector reconstruction. That means the \emph{true} event properties are distorted by the detector effects. To obtain the true variables, dedicated tools are used. For any standard \lhcb analysis, the \texttt{BackgroundCategory} tool is typically used. This tool looks for true properties of the particles in the decay chain and categorizes the event into groups, such as combinatorial background or events with a ghost tracks. However, in this work, this tool cannot be used as the typical background categories do not cover the possible realizations of the \pizTogg decay.
As there is a neutral pion in the decay chain, it is important to make sure our \emph{true} candidates match the signal candidates we select in data.
Events where for instance a photon is converted into an electron or one of the photons in the \pizTogg decay is randomly assigned is still considered signal, as there is no way to asses the origin of the photon in the data sample. It is clearly visible in \refFig{RndGamma_angles} there are no structures in the angular distribution of the events with a random photon included. Hence, these events can be considered as signal candidates, as they do not distort the angular distributions.
In order to select the \emph{true} signal candidates, an ID-based selection is applied. Each particle type has it is own unique ID following the Monte Carlo Particle Numbering Scheme~\cite{PDG}. Each generated particle has its \emph{true} ID and the reconstructed ID, based on the PID response of the \lhcb detector. The ID-based selection is achieved by comparing the \emph{true} ID of the particles, their mother and their grandmother ID to the reconstructed ID. This check is applied on the whole decay chain \BuToKstmmFull except for the photons.
\begin{figure}[hbt!]
\centering
\includegraphics[width=0.47\textwidth]{./Data/TM/RndGammas/Run_2_KplusPi0Resolved_TMed_costhetak_AllGammaContributions_normalized_fancy.eps}
\includegraphics[width=0.47\textwidth]{./Data/TM/RndGammas/Run_2_KplusPi0Resolved_TMed_costhetal_AllGammaContributions_normalized_fancy.eps}
\includegraphics[width=0.47\textwidth]{./Data/TM/RndGammas/Run_2_KplusPi0Resolved_TMed_phi_AllGammaContributions_normalized_fancy.eps}\\
\captionof{figure}[Distributions of \ctk, \ctl and $\phi$ for events with random photons.]{Normalized \ctk (left), \ctl (middle) and $\phi$ (right) distributions for simulated events where both photons are either coming from \BuToKstmm, \KstToKpPi, \pizTogg or one photon is a random hit in \ecal reconstructed as photon. Black squares note all events passing the \emph{true} ID requirements, excluding photons parents' ID. Red stars are events, where both photons originate from \BuToKstmm, blue circles are events, where one photon is \emph{true} and one is random. At the bottom of the figures, a ratio of the number of normalized events with only \emph{true} photons over the number of normalized events with one \emph{true} and one random photon is shown. The ratio is consistent with one.} \label{fig:RndGamma_angles}
\end{figure}
\subsubsection{Reweighting and the \sPlot technique}\label{sec:sel-sWeight} %sWeights
To account for the simulation imperfections listed above, a correction has to be applied. Very good agreement between the data and the simulation is achieved when the Monte Carlo simulation is weighted in $\pt^{\Bu}$ and \texttt{nLongTracks}, which represents the number of tracks traversing \velo, \ttracker and the T-stations. This number is strongly correlated with the overall event multiplicity. The weighting is performed as two independent weightings, as there is no correlation between $\pt^{\Bu}$ and \texttt{nLongTracks}, as can be seen in \refFig{ANA-pt_long_corr}.
\begin{figure}[hbt!]
\centering
\includegraphics[width=0.62\textwidth]{./Data/weightPlots/2018/2018_KplusPi0Resolved_nLongTracks_B_plus_PT_DTF_Correlation_.eps}
\captionof{figure}[Correlation between $\pt^{\Bu}$ and the number of long tracks.]{Correlation between $\pt^{\Bu}$ and the number of long tracks in the 2018 data sample. The correlation coeficcient is $\simeq0$, proving the variables are not correlated. \vspace{0.5\baselineskip}} \label{fig:ANA-pt_long_corr}
\end{figure}
The weights cannot be calculated directly using the data sample: the simulation sample consists only of signal candidates while in the data sample the background is also present. Therefore, the data sample has to be weighted to mimic the signal as much as possible. This is done using the \sPlot technique~\cite{sPLOT1,sPLOT2}. This technique is used to unfold the signal decay from the background by exploiting likelihood fits. The \sPlot technique is a more general case of \emph{sideband subtraction}: it provides a weight for every data point in a way the weighted distribution is re-sampling the background-subtracted distribution.
% A \emph{discriminating} variable, typically invariant mass, is chosen. This variable needs to be uncorrelated with the \emph{control} variable: variable which behavior \sPlot infers from the \emph{discriminating} variable. In the \emph{discriminating} variable a sideband region is selected, where there is no or negligible amount of signal events. \emph{Control} variable is then determined from the background and extrapolated into the signal region of the \emph{discriminating} variable. Scaled distribution of the \emph{control} variable in the sideband is then subtracted from the distribution in the signal region, resulting in pure-signal distribution of the \emph{control} variable.
Mathematically, it can be expressed using the number of signal $N_s$ and background $N_b$ events with probability density functions $s(d,c)$ and $b(d,c)$ respectively:\vspace{-0.25\baselineskip}
%
\begin{equation}\label{eq:sPlot_1}
N_s s(d,c) + N_b b(d,c) = (N_s+N_b) f(d,c)\,,
\end{equation}
%
where $d$ is the \emph{discriminating} variable and $c$ is the \emph{control} variable. $f(d,c)$ is the Probability Density Function (PDF) of combined distribution of signal and background. As the \emph{control} and \emph{discriminating} variables are uncorrelated, one can rewrite their PDFs as\vspace{-0.25\baselineskip}
%
\begin{align}\begin{split}\label{eq:sPlot_2}
s(d,c) &= s(d)\,s(c)\,,\\
b(d,c) &= b(d)\,b(c)\,.
\end{split}\end{align}
%
The goal is to obtain an arbitrary weight function $w(d)$ fulfilling\vspace{-0.25\baselineskip}
\begin{equation}\label{eq:sPlot_3}
N_s\,s(c) = (N_s+N_b) \int{f(d,c)w(d)\,\deriv{d}} = N_s s(c) \int{s(d)w(d)\,\deriv{d}} + N_n b(c) \int{b(d)w(d)\,\deriv{d}} \,.
\end{equation}
%
Therefore, the $w(d)$ function is chosen in a way that:\vspace{-0.25\baselineskip}
%
\begin{align}\begin{split}\label{eq:sPlot_4}
\int{s(d)w(d)\,\deriv{d}} &= 1\,,\\
\int{b(d)w(d)\,\deriv{d}} &= 0
\end{split}\end{align}
%
To have the smallest statistical uncertainty on the weights, the variation given by \refEq{sPlot_5} of the weights have to be minimized \vspace{-0.25\baselineskip}
\begin{equation}\label{eq:sPlot_5}
\int{ f(d,c) w(d)^2\,\deriv{c}\,\deriv{d}} \,.
\end{equation}
The three conditions assure an unique determination of the function $w(d)$. That allows for calculating the weights for any event with property $d$, resulting in signal-only distribution of the \emph{control} variable. These weights are then called \sWeight s.
\sWeight ed \emph{data} events in the resonant \qsq region are then used to obtain weights to correct the simulated sample. The data sample is dominated by the resonances (\refSec{sel-Charmonium}). Hence, the \BuToKstJpsi simulation sample is used to obtain the weights needed for correcting the simulation distributions. The agreement between the \sWeight ed data and weighted simulation is crucial for the next step, the multi-variate analysis. The distributions used for the multi-variate analysis are carefully validated, see \refApp{CompareVariables}, where the comparison of the \sWeight ed \emph{data} and weighted simulation for each data taking year are given. The distributions of the \sWeight ed \emph{data} and weighted simulation agree very well.

125
Chapters/EventSelection/TrigStrip.tex

@ -0,0 +1,125 @@
\subsection{Trigger selection}\label{sec:sel-TriggerSelection}
The selection of events begins at the hardware level, as described in \refSec{det_trig}. %The \BuToKstmm candidate events have to pass both stages of the trigger. %While the trigger selection is very similar in \runI and \runII, there are small differences at the high-level trigger level.
As a first step, the \BuToKstmm candidate event has to be triggered by the L0 trigger by identifying a single muon. In \hltone, the event has to be triggered either by a single detached high \pt track~\cite{ANA-SingleTrackTrig} or a muon~\cite{ANA-MuonTrig}. In \hlttwo, the events have to pass several topological criteria~\cite{ANA-HLT2Topo} or pass a tighter muon track cut. In \runII, the requirement on a single detached high \pt muon in \hltone is replaced by a more efficient kinematic cut applied on all tracks. Moreover, topological trigger selection algorithms, or lines, using two muons as input are exploited. The full list of \lhcb trigger lines used for this analysis is presented in \refTab{presel_trigList}. For detailed description of \runI trigger lines see~Ref.\,\cite{ANA-TrigAll}.
\begin{table}[hbt!]
\centering
\begin{tabular}{l|l}
\lone & L0Muon \\ \hline
\hltone & Hlt1TrackAllL0 (\runI) \\
& Hlt1TrackMVA (\runII)\\
& Hlt1TrackMuon\\ \hline
\hlttwo & Hlt2TopoMu2BodyBBDT\\
& Hlt2TopoMu3BodyBBDT\\
& Hlt2Topo2BodyBBDT\\
& Hlt2Topo3BodyBBDT\\
& Hlt2DiMuonDetached\\
& Hlt2TopoMuMu3BodyBBDT (\runII)\\
& Hlt2TopoMuMu2BodyBBDT (\runII)
\end{tabular}
\captionof{table}[List of applied trigger selection requirements.]{List of applied trigger requirements. For a detailed definitions of the applied trigger selection algorithms see~Ref.\,\cite{ANA-TrigAll}. \label{tab:presel_trigList}}
\end{table}
The trigger decision can be either \emph{triggered on signal} (TOS) or \emph{triggered independent of signal} (TIS). That means that if the event is TOS, the signal candidate directly affected the trigger decision, while TIS means that the trigger decision is driven by a different element of the event. The simulation of TIS events is rather complicated. As the contribution of the \Bu meson TIS decisions to the signal candidates is negligible, only the \Bu meson TOS decisions are used in this analysis.
\subsection{Central selection (stripping)}\label{sec:sel-StrippingSelection}
As the trigger requires events to pass only basic topological and kinematical constrains, it is necessary to filter the events selected by the trigger lines further. Due to the size of the dataset and due to computational constraints, an additional central selection is applied. This process is called \emph{stripping} and one set of selection algorithms within stripping is called a \emph{line}. Typically, a stripping line is used by several analyses, hence the selection is still rather loose at this step.
%\url{http://lhcbdoc.web.cern.ch/lhcbdoc/stripping/config/stripping34r0p1/leptonic/strippingb2xmumu_line.html}
The cuts applied in the stripping line used in this analysis are summarized in \refTab{stripping_cuts}. Most of the requirements are kinematical, however several more specific properties of the candidates are exploited:
\begin{itemize}
\item \texttt{IsMuon} requires a track to penetrate through the detector up to the muon stations. This reduces the probability of misidentifying a hadron as a muon to 1\% while maintaining high efficiency of muon reconstruction~\cite{ANA-IsMuon}. Depending on the momentum of the track, hits in different muon stations are required. The summary of the required hits based on the track momentum is in \refTab{IsMuon}.
%
\stepcounter{table}
\begin{table}[hbt!]
\centering
\begin{tabular}{r|c}%In order to reference it after the stripping cuts table
track momentum & muon stations hit requirement\\\hline
$3\gev < p_\mu < 6\gev$ & M2 and M3 \\
$6\gev < p_\mu < 10\gev$ & M2 and M3 and (M4 or M5)\\
$ p_\mu > 10\gev$ & M2 and M3 and M4 and M5 \\
\end{tabular}
\captionof{table}[Muon stations required to trigger the IsMuon decision.]{Muon stations required to trigger the IsMuon decision as a function of momentum range. Taken from~Ref.\,\cite{ANA-IsMuon}. \label{tab:IsMuon}}
\end{table} \vspace{15pt}
\begin{figure}[hbt!]
\centering
\includegraphics[width=0.7\textwidth]{./AnalysisSelection/IP_definition3.png}
\captionof{figure}[The impact parameter definition.]{The impact parameter definition in the specific case of \BuToKstmm. For readers convenience, the \emph{DIRA} angle of the \Bu meson is also shown.} \label{fig:ANA-IP-def}
\end{figure}
\item Using the established \lhcb convention in notation, impact parameter (IP) is the transverse distance of closest approach between a particle trajectory and a vertex. A naive sketch of this quantity is shown in \refFig{ANA-IP-def}.
\item \emph{DIRA} angle (direction angle) is the angle between the reconstructed momentum of the particle and the line joining the primary vertex and the \Bu decay vertex. The \Bu meson DIRA angle is shown in \refFig{ANA-IP-def}.
\item Despite \lhcb convention in notation, $\chisq_{FD}$ is not exactly the $\chisq$ of the flight distance, but the \chisq of separation of two vertices. It is calculated as the $\chisq$ of the common vertex of all tracks minus the sum of $\chisq$ for two distinct vertices.
\item At \lhcb, the particles fly through the dipole magnet. Tracks are reconstructed from hits downstream and upstream of the magnet. Due to this 'gap' in the detector, the algorithm matching the tracks from subdetectors downstream and upstream of the magnet might reconstruct a track, which is not induced by a real particle flying through \lhcb. These fits have typically low track fit quality. Such tracks are called \emph{ghosts}. A dedicated variable related to the track fit quality with values between 0 and 1, \emph{ghost probability}, is assigned to each track and represents the possibility of the track being a \emph{ghost} track.
\end{itemize}
\addtocounter{table}{-2} %In order to reference it before the muon stations table
\begin{table}[hbt!] \centering
\begin{tabular}{c|c}
candidate &selection\\
\hline\hline
\Bpm &4700\mev $<$ m(\Bpm) $<$ 7000\mev\\
& $\sum_{i\in daughters}\text{daughter charge} < 3$\\ %Sum of charge of particles <3
&\chisqvtxndf $<$ 8 \\
&\chisqip $<$ 16 (best PV) \\
&DIRA angle $<$ 14\mrad\\
&$\chisq_{FD}>$ 64\footnotemark[1]\\
&min(\chisqip)$>$ 9.0\\
\hline
\mupm &\pt$>$250\mev\\
&track \textit{ghost prob} $<$ 0.5\\
&min(\chisqip)$>$ 6.0\footnotemark[2]\\
&\dllmupi $>$-3\footnotemark[3]\\
\hline
\mup\mun & $m(\mup\mun)<$ 7100\mev\footnotemark[4]\\
& \chisqvtxndf $<$ 12\\
& DIRA angle $\in (2.69\rad,3.59\rad)$ \\
&min(\chisqip)$>$ 6.0\footnotemark[5]\\
& flight distance $\chisq> 9.0$\\
&\texttt{isMuon} \\
\hline
\Kstarpm & 592\mev $<$ m(\Kstarpm) $<$ 1192\mev \\
\hline
$K^+$ & track \textit{ghost prob} $<$ 0.5\\ %check
& min(\chisqip)$>$ 6.0\\
&\texttt{hasRich}\footnotemark[6]\\
\hline
$\pi^0$ &105\mev $<$ m(\piz) $<$ 165\mev \\
&$\pt(\piz)>800\mev$\footnotemark[7]\\
\hline
$\gamma$ & $\pt(\g)>$200\mev\\
% & CL(\g)$>$-99.0\\
\hline
GEC & nSPDHits$<$600 \\
& at least one PV \\
\end{tabular}
\begin{multicols}{2}
\begin{itemize} \footnotesize\setlength{\parskip}{-2pt}
\item [$^1$] In S21r0p1 and S29r2 \chisq $>$ 121.
\item [$^2$] In S21r0p1 and S29r2 min(\chisqip)$>$ 9.0.
\item [$^3$] For definition see \refSec{det_RICH}.
\item [$^4$] Only in S24r2, S28r2 and S34r0p1.
\item [$^5$] In S21r0p1 and S29r2 min(\chisqip)$>$ 9.0.
\item [$^6$] RICH subsystem registered a track in the candidate event.
\item [$^7$] In S21r0p1, $\pt(\piz)>700\mev$.
\end{itemize}
\end{multicols}
\captionof{table}[Central selection (stripping) cuts.]{Central selection (stripping) cuts for the \texttt{B2XMuMu} line. For each data-taking year, there is a dedicated version of the software. The stripping cuts slightly differ between different version of the software: S21r1p2\,(2011), S21r0p1\,(2012), S24r2\,(2015), S28r2\,(2016), S29r2\,(2017) and S34r0p1\,(2018). \label{tab:stripping_cuts}}
\end{table}

104
Chapters/EventSelection/eventSelection.tex

@ -0,0 +1,104 @@
\section{Event Selection}\label{sec:sel-EventSelection}
LHC collisions occur with a frequency of 40\mhz. Storing all the data coming into \lhcb would require storing data rates of 1\tbyps, which would require 3.6 petabytes per every hour of collisions\footnote{The whole current \lhcb dataset would then contain more than 30 exabytes of data. To put this into perspective, it is estimated that Google, Microsoft, Amazon and Facebook combined together store 1.2 exabytes of data~\cite{ANA-data}.}. However, only in about one of 400 collisions a \bbbar quark pair is produced and the chance of a \B meson decaying into \Kstar\mumu is circa one in a million. Therefore, it is needed to process the data quickly and select only the required events, while maintaining very high purity and efficiency of this selection.
The selection of \BuToKstmm candidates is realized in several steps. First, the events have to pass the online (\emph{trigger}) selection. Then, tighter selection criteria are applied in a centralized (\emph{stripping}) selection. The criteria applied in these two steps are common for many \lhcb analyses. Next step is an even tighter preselection that is specific for this work. These events are further filtered by utilizing a multi-variate analysis. In order to utilize the simulation sample in the multi-variate analysis, the simulated sample is validated and corrected to match the data. The final selection is validated using available simulation samples and exploiting the reference channel \BuToKstJpsi.
\input{Chapters/EventSelection/TrigStrip}
\input{Chapters/EventSelection/Cut}
\input{Chapters/EventSelection/Simulation}
\input{Chapters/EventSelection/MVA}
\input{Chapters/EventSelection/MultCand}
\input{Chapters/EventSelection/Signal}
\input{Chapters/EventSelection/Backgrounds}
\subsection{Final selection}\label{sec:sel-FinalSelection}
Using \refEq{significance} and \refEq{YieldEstimation2}, the expected significance for each Run is estimated for many values of the MLP response. This is shown in \refFig{sel-MLPscan}. The maximum expected significance corresponds to cut on the MLP response at a value of 0.9985 for \runI and of 0.996 in \runII. \vspace{-0.5\baselineskip}
\begin{figure}[hbt!]
\centering
\includegraphics[width=0.67\textwidth]{./Data/FinalSelection/ExpectedSignificance.eps}
\captionof{figure}[Expected significance of the \BuToKstKppizmm selection.]{Expected significance of the \BuToKstKppizmm decay yield. The significance is obtained using \refEq{significance} and \refEq{YieldEstimation2}. The maximum significance value is 0.9985 for \runI and 0.996 for \runII.}. \label{fig:sel-MLPscan}
\end{figure}
The resulting mass distribution after applying the optimal MLP response cut on the full dataset obtained during \runI and \runII datat aking in the \jpsi resonance region is shown in \refFig{sel-JpsiYield}. The expected signal and background yields obtained from the reference channel in the \BuToKstmm channel are depicted in \refTab{sel-yields}. The measured signal yield in the table is obtained from the fits to the signal channel presented in \refFig{sel-SignalYield}. It is worth noting here that the optimization was performed on the \BuToKstJpsi decay. As shown in \refFig{sel-MLPscan}, the MVA response value corresponding to the highest significance is not distinct and susceptible to fluctuations. Therefore, the expected \BuToKstmm decay yields are higher than the actual fitted yields.
\begin{figure}[hbt!]
\centering
% \includegraphics[width=0.49\textwidth]{./Data/FinalSelection/Run1_KplusPi0Resolved_JpsiOnly_BplusMassModel_OneCB_SingleExponential_DTF_removedMultiple_TMVA0.999133_constrained_fixShape_fixedMassWindow.eps} \hspace{-5pt} \includegraphics[width=0.49\textwidth]{./Data/FinalSelection/Run2_KplusPi0Resolved_JpsiOnly_BplusMassModel_OneCB_SingleExponential_DTF_removedMultiple_TMVA0.997074_constrained_fixShape_fixedMassWindow.eps}
\includegraphics[width=0.49\textwidth]{./Data/FinalSelection/Jpsi_Run1.eps} \hspace{-5pt} \includegraphics[width=0.49\textwidth]{./Data/FinalSelection/Jpsi_Run2.eps}
\captionof{figure}[Signal yield of the \BuToKstJpsi decay.]{Signal yield of the \BuToKstJpsi decay. The symbol $\mu(m_B)$ stands for the mean of the signal distribution, $\sigma(m_B)$ is the width of the peak. All parameters of the fit are left floating. The signal (blue) is fitted by two-sided Crystal Ball function (for the definition, see \refApp{CrystalBall}), background (red) is described by an exponential function. The fitted signal and background yields are consireded in $\pm$100\mev around the \Bu meson mass.} \label{fig:sel-JpsiYield}
\end{figure}
\begin{table}[hbt!]
\centering
\begin{tabular}{l|l|l} %ALREADY UPDATED TO THE NEW MLP
& Expected & Fitted \\
\runI & & \\ \hline
Signal & 67 & 37$\pm$10\\
Background & 14 & 49$\pm$7\\
S/sqrt(S+B) & 7.47 & 4.03 \\
& & \\
\runII & & \\ \hline
Signal &298 & 233$\pm$26\\
Background &203 & 262$\pm$17\\
S/sqrt(S+B) &13.30 & 10.49
\end{tabular}
\captionof{table}[Expected and measured signal yields in the \BuToKstmm decay.]{Expected and measured signal yields in the \BuToKstmm decay. The expected \BuToKstmm decay yields are higher than the actual fitted yields due to the optimization of the MVA response cut using only the reference \BuToKstJpsi decay. \label{tab:sel-yields}}
\end{table}
%
\begin{figure}[hbt!]
\centering
% \includegraphics[width=0.48\textwidth]{./Data/FinalSelection/Run1_KplusPi0Resolved_mumu_BplusMassModel_OneCB_SingleExponential_DTF_removedMultiple_TMVA0.9985000_constrained_fixShape_fixedMassWindow.eps} \hspace{10pt}
% \includegraphics[width=0.48\textwidth]{./Data/FinalSelection/Run2_KplusPi0Resolved_mumu_BplusMassModel_OneCB_SingleExponential_DTF_removedMultiple_TMVA0.996000_constrained_fixShape_fixedMassWindow.eps}
\includegraphics[width=0.49\textwidth]{./Data/FinalSelection/mumu_Run1.eps} \hspace{-10pt}
\includegraphics[width=0.49\textwidth]{./Data/FinalSelection/mumu_Run2.eps}
\captionof{figure}[Signal yield of the \BuToKstKppizmm decay.]{Signal yield of the \BuToKstKppizmm decay. $\mu(m_B)$ stands for the mean of the signal distribution, $\sigma(m_B)$ is the width of the peak. The signal shape is constrained to the shape of the signal yield in the resonance region shown in \refFig{sel-JpsiYield}. The signal (blue) is fitted by two-sided Crystal Ball (for the definition, see \refApp{CrystalBall}), background (red) is described by exponential function.} \label{fig:sel-SignalYield}
\end{figure}
The selected \BuToKstmm candidates are divided in \emph{four} \qsq bins: [0.1-4.0]\gevgev (excluding 0.98-1.1\gevgev in order to remove $\phi\to\mumu$ contribution), [4.0-8.0]\gevgev, [11.0-12.5]\gevgev and [15.0-18.0]\gevgev. The measured mass distributions in these bins are presented in \refApp{yield_q2}. The measured signal and background yields together with their significance are shown in \refFig{sel-q2Yield}. The significance is also compared to a study by the \cms collaboration of \runI data exploiting \BuToKstKspimm ~\cite{ANA-CMS-angular}\footnote{This comparison is chosen as the significance in the analysis by the \cms collaboration is comparable to the significance presented here. The other previous measurements discussed in \refSec{ANA_previous} were performed either in experimentally cleaner environment or with only charged particles in the final state, reaching higher significance values.}
The \KstToKsPi, \KS\to\pip\pim channel is detected more efficiently with better resolution, as the final state consists of charged particles only. The measured significance is higher than the one in the study done by the \cms collaboration, proving the potential of this analysis to measure all angular observables mentioned in \refEq{Si_definition}. It is worth noting here that the \cms collaboration successfully measured only the \FL and \AFB angular parameters.
\begin{figure}[hbt!]
\centering
\includegraphics[width=0.75\textwidth]{./Data/FinalSelection/KplusPi0Resolved_Q2_Run12.eps}
\captionof{figure}[Yields and siginificance compared to a CMS meaurement.]{\textcolor{red}{Signal} (red) and \textcolor{black}{background} (black) yields and measured \textbf{\textcolor{ao}{significance}} (blue) in the combined \runI and \runII dataset. In the first bin $\qsq\in[0,4]$, the $\phi\to\mu\mu$ contribution ($\qsq\in[0.98,1.1]$) is removed. The \textbf{\textcolor{ao(english)}{green}} data are taken from a study done by \cms. The study uses the \BuToKstKspimm decay data from \runI~\cite{ANA-CMS-angular}.} \label{fig:sel-q2Yield}
\end{figure}
The numbers of signal candidates per data-taking year are given in \refTab{sel-selection_yields} for the reference \BuToKstJpsi and in \refTab{sel-selection_yields_rare} for the signal \BuToKstmm channel. To put the final number of candidates in perspective, the number of candidates after each selection step is included as well.
\begin{table}[hbt!]
\centering
\begin{tabular}{p{3.4cm}|cccccc}
selection $\setminus$ year & 2011 & 2012 & 2015 & 2016 & 2017 & 2018 \\ \hline
Trigger and online & 23\,718\,772 & 58\,047\,021 & 9\,822\,137 & 57\,955\,614 & 32\,702\,706 & 54\,868\,587\\
Preselection & 31197 & 67191 & 13769 & 89310 & 90460 & 90660\\
MLP selection & 4409 & 8637 & 2483 & 16475 & 17885 & 17659\\
\end{tabular}
\captionof{table}[Number of reference channel candidates for each selection stage.]{Number of the \textbf{reference channel} event candidates after the trigger and the stripping selection, preselection and MLP selection. Event candidate is any event passing the selection step, therefore this includes background candidates. In the last step, the background is mostly combinatorial. Note the discrepancy between the events passing the trigger and online selection in 2017 data-taking year compared to 2016 and 2018: this is a result of stricter stripping selection. For details see \refTab{stripping_cuts}. \label{tab:sel-selection_yields}}
\end{table}
\begin{table}[hbt!]
\centering
\begin{tabular}{p{3.4cm}|cccccc}
selection $\setminus$ year & 2011 & 2012 & 2015 & 2016 & 2017 & 2018 \\ \hline
Trigger and online & 10\,972\,833 & 28\,455\,565 & 5\,322\,454 & 31\,999\,312 & 16\,969\,963 & 30\,897\,345\\
Preselection & 3134 & 6881 & 1288 & 10017 & 10016 & 10090\\
MLP selection & 42 & 101 & 39 & 216 & 242 & 241\\
\end{tabular}
\captionof{table}[Number of signal channel candidates for each selection stage.]{Number of the \textbf{signal channel} event candidates after the trigger and the stripping selection, preselection and MLP selection. Event candidate is any event passing the selection step, therefore this includes background candidates. In the last step, the background is mostly combinatorial. Note the discrepancy between the events passing the trigger and online selection in 2017 data-taking year compared to 2016 and 2018: this is a result of stricter stripping selection. For details see \refTab{stripping_cuts}. \label{tab:sel-selection_yields_rare}}
\end{table}
\clearpage

21
Chapters/Introduction/intro.tex

@ -0,0 +1,21 @@
\raggedbottom
\section*{Introduction}
\addcontentsline{toc}{section}{\protect\numberline{}Introduction}
The Standard Model of particles is currently the best theory describing the very basic building blocks of the universe. Except for gravity, it describes all fundamental interactions between the elementary particles. In the last decades, it has been improved, probed and many of its predictions have been confirmed. Despite the tremendous success of the Standard Model, there are several unexplained phenomena: the non-zero mass of neutrinos, the excess of matter over antimatter in the universe or the presence of dark matter in the universe.
Today, in the high-energy experiment era, the focus is not on confirming the Standard Model, but on finding inconsistencies and processes where the theory breaks down. The Large Hadron Collider at CERN, the most powerful particle accelerator up-to-date, is designed to test the Standard Model parameters and its boundaries.
There are four large experiments at the Large Hadron Collider, \alice , \atlas, \cms and the \lhcb experiment. \alice main design goal is to investigate the origins of the universe, \atlas and \cms are focused on measuring the Standard Model properties, especially the ones of the Higgs and electroweak bosons, and \lhcb focuses on precise measurements of the predicted Standard Model parameters, searching directly for deviations from the predictions. Possible extensions of the Standard Model, New Physics, can contribute to the quantum loops of the Standard Model. As the particles contributing to the quantum loops are not limited by the available collision energy, the energy scale probed is beyond the energy scale probed by direct searches. This approach requires Standard Model predictions or constraints with similar or better accuracy than experimental measurements.
One of the smoking guns of New Physics contribution to the Standard Model are the decays of the \bquark quark to an \squark quark and a pair of leptons. These decays can be measured through the decays of a \B meson into \Kstar\mumu. Many previous measurements of this decay show tensions with the Standard Model predictions: either the branching fraction measurements~\cite{SM-LHCb_BR, ANA-BR-BPHI-LHCb} or the angular analyses~\cite{ANA-LHCb-angular3, ANA-LHCb-angular4, ANA-LHCb-angular1, ANA-LHCb-angular2, ANA-Belle_P5, ANA-CMS_P5, ANA-ATLAS_P5}. This work represents a significant step towards the angular analysis of the \BuToKstmm decay, where the the \Kstarp decays into \Kp\piz using the \lhcb dataset. The aim is to validate the observed anomalies, adding another jigsaw puzzle piece into the physics beyond the Standard Model picture.
In this thesis, the Standard Model is introduced with an emphasis on $\decay{B}{\Kstar\mumu}$ decays. Then, the \lhcb experimental setup is explained. In the third section, the \BuToKstmm decay topology and observables are described. Then, the analysis method is briefly explained with an emphasis on the difficulties of this analysis. Section five describes the methods used for the signal candidates selection.
The angular acceptance resolution and correction to the signal candidates is discussed in section six. The seventh section introduces the angular modeling that is validated in section eight by fitting the simulation sample and the \BuToKstJpsi channel in data. The angular model is further tested by pseudoexperiments as explained in section nine. The expected statistical uncertainty of the fit to the rare \BuToKstKppizmm decay is estimated. Lastly, the tenth section is dedicated to the authors work on the tracking efficiency measurement at the \lhcb experiment. Throughout the thesis, natural units are assumed.
\clearpage
%\newpage\null\thispagestyle{empty}\newpage

156
Chapters/LHCbExperiment/lhcb.tex

@ -0,0 +1,156 @@
\section{Experimental setup}\label{sec:detector}
In this section, the experiment used for the analysis presented here is described. The detector is placed in an accelerator facility, which is briefly introduced. The \lhcb subdetectors used in the analysis are presented, as well as the data-acquisition procedure.
\subsection{The Large Hadron Collider}\label{sec:det_LHC}
The story of the Large Hadron Collider (\lhc)~\cite{LHCdesign} reaches all the way back to 1976, when the particle physics community started to think about building the \lep (Large Electron Positron) collider. \lep was a 27 kilometer circular collider, placed in a tunnel at \cern (Conseil europ\'{e}en pour la recherche nucl\'{e}aire) near Geneva, Switzerland. CERN is the largest physics laboratory in the world. Its main purpose it to provide \emph{a unique range of particle accelerator facilities that enable research at the forefront of human knowledge}, \emph{perform world-class research in fundamental physics} and \emph{unite people from all over the world to push the frontiers of science and technology, for the benefit of all}~\cite{CERNmission}.
\lep started its full operation in 1989 and was functioning until 2001, when the dismantling started, making room for the LHC~\cite{LEPstory} to be placed in the same tunnel.
The first discussions about replacing the \en-\ep collider by a hadron collider started as early as 1984. The construction of \lhc was approved 20 years later, in December 1994~\cite{LHChistory}. The construction began in 1998 and the first collisions were delivered in 2008. However, during the initial testing, one of the superconducting magnets quenched\footnote{Quenching is when a part of the superconducting coil returns to its conducting state.}. As a result, 53 magnets were damaged, postponing the data taking to 2009.
%\todoblue[inline]{They found Gallo-Roman ruins at CMS dig site in 1998, leading to a half-year delay. First beam happened on September 10 2008 }
The \lhc with its circumference of 27\km is up to this day the largest and most powerful particle collider in the world. The \lhc sits on the top of the \cern's accelerator complex, a succession of machines that accelerate the beam of particles to higher and higher energies, as illustrated in \refFig{CERNaccelerator}. The accelerated particles are in the \lhc's main operation mode protons, however there are periods where other heavier ions are accelerated. The \lhc itself consists of two circular storage rings where protons are injected with an energy of 450\gev and they are accelerated to energies up to 7\tev. The accelerated protons are collided at four main interactions points each surrounded by a large detector: \alice, \atlas, \cms and \lhcb.
So far, the \lhc has operated in two longer periods, called \runI (2010-2012) and \runII (2015-2018). In \runI, the maximal nominal energy was 3.5\tev with increase to 4\tev in 2012. \runI was followed by a maintenance period, long shut-down one (LS1), where the dipoles of the \lhc were improved, resulting in a maximal nominal energy of 6.5\tev in \runII. At the moment, the \lhc is in the second long shut-down period (LS2), where \alice and \lhcb are undergoing major upgrades. This will be followed by \runIII with a maximal nominal energy of 7\tev~\cite{LHCschedule}. The duration of \runIII is foreseen to be three years and the expected performance can be found in~Ref.\,\cite{RunIIIperformance}.
\begin{figure}[hbt!]
\centering
\includegraphics[width=0.99\textwidth]{./Detector/CERNacceleratorComplex.jpg}
\captionof{figure}[\cern's accelerator complex.]{\cern's accelerator complex. The protons are initially accelerated by \textcolor{col_LINAC2}{\textbf{LINAC\,2}} and brought to \textcolor{col_BOOSTER}{\textbf{BOOSTER}}. They continue to \textcolor{col_PS}{\textbf{PS}}, \textcolor{col_SPS}{\textbf{SPS}} and from there they are finally steered into \textcolor{col_LHC}{\textbf{\lhc}}. Heavier ions follow a similar path, they are initally accelerated in \textcolor{col_LINAC3}{\textbf{LINAC\,3}}, they continue to \textcolor{col_LEIR}{\textbf{LEIR}}. From there they are step by step brought to \textcolor{col_PS}{\textbf{PS}}, \textcolor{col_SPS}{\textbf{SPS}} and \textcolor{col_LHC}{\textbf{\lhc}}. Taken from~Ref.\,\cite{CERNaccelerator}.} \label{fig:CERNaccelerator}
\end{figure}
\clearpage
\subsection{The LHCb experiment}\label{sec:det_LHCb}
The \lhcb detector~\cite{LHCb-TDR-OG,LHCb-TDR-NEW,LHCb-LHC} is a single arm forward spectrometer located at Point~8 (alongside the Geneva airport runway) at the \lhc ring. The detector was mainly designed for precision measurements of CP violation and to study rare decays in the B and D meson systems~\cite{LHCb-TDR-OG}.%\footnote{The technical proposal also mentions studies of rare \tauon decay. However, with the current LHCb design, the measurement is not possible due to \todo[inline]{Ask why we can't measure taons}.}.
\begin{wrapfigure}[19]{r}{0.5\textwidth} \vspace{-20pt}
\centering
\includegraphics[width=0.48\textwidth]{./Detector/bb_acc_scheme_14TeV.pdf}
\captionof{figure}[The production angle of the \bbbar pair at $\sqs=14\tev$.]{The production angle of the \bbbar pair at the center-of-mass energy of $14\tev$. Red color represents the \lhcb acceptance. 27\% of all produced \bquark or \bquarkbar quarks are produced in the LHCb acceptance. In a standard general-purpose detector (asusming the acceptance along the beam as $-180 - 180$\mrad% $|\eta|<2.4$
), 49\% of \bquark or \bquarkbar quarks are produced in its acceptance. Taken from\,\cite{bbangles}.} \label{fig:lhc_bb_acc}
\end{wrapfigure}
The full \lhcb design is described in \refFig{LHCb-layout}. The LHCb coordinate system originates at the nominal interaction point. The $z$-axis is defined along the center of the beam, its positive part pointing from the interaction point into the detector and negative part pointing from the interaction point away from the detector. The $y$-axis is defined upwards in vertical direction from the interaction point, the $x$-axis similarly in horizontal direction. In order for the coordinate system to be right-handed, the positive $x$-axis is defined pointing to the left side, viewing in the positive direction of the $z$-axis. This allows for the definition of azimuthal angle $\phi$, spherical angle $\theta$ and pseudorapidity\footnote{ Pseudorapidity $\eta$ is defined as $\eta=\ln\left(\tan\sfrac{\theta}{2}\right)$, where the spherical angle $\theta$ is the angle between the beam-pipe and particle's trajectory.}. For readers convenience, terms \emph{downstream} (in the direction of beam into the \lhcb acceptance, \ie beam direction towards the \atlas experiment) and \emph{upstream} (beam direction towards the \cms experiment) are defined~\cite{LHCb-beamMon}.
Since heavy quarks are mainly produced in the forward direction~\cite{QQproduction}, the \lhcb is designed to cover the very forward region as illustrated in \refFig{lhc_bb_acc}. The \lhcb geometrical acceptance is $10-300$\mrad in the $x-z$ plane and $10-250$\mrad in the $y-z$ plane.% The acceptance along the beam is $15-327$\mrad.
\begin{figure}[hbt!]
\centering
\includegraphics[width=0.99\textwidth]{./Detector/LHCbDetector2.png}
\captionof{figure}[The \lhcb detector.]{The \lhcb detector. Taken from\,\cite{LHCb-layout}.} \label{fig:LHCb-layout}
\end{figure}
In order to fulfill the design goals, the \lhcb detector has to have very high track reconstruction efficiency, good \pion-\kaon separation over a large energy range and excellent decay-time resolution. This is realized by several subsystems described in the following sections. Thanks to this universal detector design \lhcb does not only excel in precision measurements of B and D mesons, but also studies many new exotic states and particles~\cite{Xi2_states, Pentaquark, Pentaquark2, Tetraquark}, and performs precision measurements of gauge boson properties~\cite{Wev, WandZ, Ztobb}. Recent developments also allow for studies of heavy-ion collisions, for example excited \bbbar resonance states $\Upsilonres(nS)$ are observed to be suppressed in proton-lead collisions compared to proton-proton collision suppression, more so with larger $n$ (corresponding to higher excited states)~\cite{IFT-bottomium}. Moreover, \lhcb is the only experiment at the \lhc that is able to operate also in a fixed target mode. In the fixed target mode the proton beam collides with a gas target in the beam pipe. This was initially intended as a luminosity measurement~\cite{SMOG}. Exploiting this program, \eg a measurement of the antiproton production cross-section in proton-helium collisions was carried out, impacting the interpretation of results on antiproton cosmic rays from space-borne experiments~\cite{IFT-Antiproton}.
\subsubsection{Tracking system and vertex reconstruction}\label{sec:det_tracking_vertexing}
%Vertex and track reconstruction are the core of \lhcb measurements.
Precise vertex reconstruction is crucial for precision measurements of \bquark hadron decays as displaced secondary vertices are typical for them.
\lhcb also has to have high event reconstruction efficiencies while maintaining high-speed online (trigger) selection (for more details see \refSec{det_trig}) in order to fully exploit its physics potential. Furthermore, since the main limitation for the momentum resolution is multiple scattering, the amount of material in the detector has to be minimal.
\clearpage
The vertex reconstruction is realized by the \velo (VErtex LOcator) detector surrounding the interaction point~\cite{LHCb-TDR-VELO}. The \velo is consisting of two retractable halves placed along the beam direction, each consisting of 21 silicon micro-strip stations. The strips are arranged in the $r-\phi$ plane~\cite{LHCb-Performance}. An illustration of the strips arrangement is shown in \refFig{LHCb-VELO}. This arrangement has the natural advantage of having the smallest segments closest to the beam. The retractable halves are open during beam setup. Once the beam in the \lhc is stable, the detectors halves close around the beam, placing the closest sensors only 8\mm away from the beam itself.
\begin{figure}[hbt!]
\centering
\includegraphics[width=0.418\textwidth]{./Detector/VELO_right.png}% \hspace{1cm}
\includegraphics[width=0.418\textwidth]{./Detector/VELO_left.png}
\captionof{figure}[The \velo silicon sensor sketch.]{The \velo silicon sensor sketch
with $R$ sensors in blue and $\phi$ sensors in red. Taken
from \,\cite{LHCb-LHC}.} \label{fig:LHCb-VELO}
\end{figure}
For the physics program of \lhcb it is important to reconstruct the vertex position, displaced tracks and vertices, impact parameter and decay time with high resolution and precision. The impact parameter (for the definition see \refSec{sel-StrippingSelection}) resolution in \mum of the \velo in the $x$ and $y$ direction is (16+24/$\pt[\text{GeV}]$)\mum, the vertex resolution goes down to 10\mum in the $x$ and $y$ direction and 50\mum in the $z$-direction~\cite{LHCb-Run2Performance}. The decay time resolution for \B meson decays is around 40\fs~\cite{LHCb-Run2Performance}. Despite operating in an environment with very high radiation, the \velo detector's performance is stable throughout the years.
The momentum information for charged tracks is obtained by combining information from the \velo and three subdetectors downstream of the \velo: \ttracker (trigger tracker), \intr (Inner Tracker) and \ot (Outer Tracker). The \velo can measure particle tracks and decay vertices, however, there is no momentum information. For this, a large magnet upstream of the \velo is used. The magnet has bending power of 4\,Tm. This field is strong enough to allow the tracking system to perform momentum measurements with a good precision of tracks with momenta up to 200\gev~\cite{LHCb-TDR-MAG}. The magnetic field has two configurations, \emph{down}, when the dipole field is along the positive $y$-axis, and \emph{up}, when the dipole field is along the negative $y$-axis. The $x-z$ plane is then referred to as \emph{bending plane} and $y-z$ as the \emph{non-bending plane}. The polarity of the magnetic field is periodically changed in order to control the detection asymmetries. The detection asymmetries need to be as small as possible for CP violation studies~\cite{Polarity}.
The \ttracker detector is placed upstream of the magnet and consists of four silicon microstrip planes~\cite{LHCb-TDR-TT}. The \ttracker is especially important for fast trigger selection, as explained later in \refSec{det_trig}.
The Inner Tracker~\cite{LHCb-TDR-IT}, and the Outer Tracker~\cite{LHCb-TDR-OT} are located upstream of the magnet. The Inner Tracker is made of three silicon microstrip stations surrounding the beam pipe. The hit resolution of the \intr is 50\mum. The \ot surrounds the \intr. It consists of straw tubes and has a hit resolution 170 \mum. As mentioned previously, the limiting factor in the momentum resolution is multiple scattering and not the spatial resolution of the tracking detectors. The hits from these events are matched to the hits in the \velo and the \ttracker, allowing for momentum measurement. The overall relative momentum resolution ranges from 0.4\% (tracks with momentum $\sim$ 5\gev) to 0.6\% (tracks with momentum $\sim$ 100\gev).
\subsubsection{RICH detectors}\label{sec:det_RICH}
As mentioned in the beginning of this section, for precision measurement of rare \bquark and \cquark decays as well as CP violation measurements, it is crucial to distinguish charged pions from charged kaons. The particle identification (PID) is achieved using two ring-imaging Cherenkov (\rich) detectors~\cite{LHCb-TDR-RICH}. One is placed upstream the magnet, one is placed downstream the magnet, as shown in \refFig{LHCb-layout}. This placement allows for PID of tracks with momentum ranging from 1\,\gev to 150\gev while covering the whole \lhcb geometrical acceptance. The efficiency and fake rate of the \rich discrimination between pions and kaons is displayed in \refFig{LHCb-PID_Kpi}. In the figure, two configurations are shown, $\Delta LL(\kaon-\pion) > 0$ and $\Delta LL(\kaon-\pion) > 5$\footnote{In the analysis, $\Delta LL(\kaon-\pion)$ is typically denoted as \dllkpi.}, where $\Delta LL(\kaon-\pion)$ is the difference in logarithmic likelihood obtained by combining information from all PID detectors between the kaon and pion hypotheses: $\log\mathcal{L}_{\kaon}-\log\mathcal{L}_{\pion}$. The reader can imagine $\Delta LL(\kaon-\pion)$ as a measure of the probability that hypothetic kaon is not a pion.
The \rich detector does not only discriminate between pions and kaons, but also deuterons and protons. The PID of electrons, muons, and photons is obtained using the muon system and the calorimeters.
\begin{figure}[hbt!]
\centering
\includegraphics[width=0.48\textwidth]{./Detector/RICHPerf2012MagDown.pdf}
\includegraphics[width=0.48\textwidth]{./Detector/RICHPerf2016MagDown.pdf}
\captionof{figure}[Efficiency and fake rate of the \rich identification.]{Efficiency (open points) and fake rate (full points) of the \rich identification for the 2012 (left) and the 2016 (right) data as a function of on momentum. Two settings are shown, $\Delta LL(\kaon-\pion) > 0$ and $\Delta LL(\kaon-\pion) > 5$ (see the full text for the definition). There is a small improvement in \runII for particles below 15\gev. Modified from\,\cite{LHCb-Performance}.} \label{fig:LHCb-PID_Kpi}
\end{figure}
\subsubsection{Calorimeter system}\label{sec:det_CALO}
The \lhcb calorimeter system consists of four calorimeters: \spd (Scintillating Pad Detector), \presh (Preshower), \ecal (electromagnetic calorimeter) and \hcal (hadronic calorimeter). The main goal of the system is fast identification and energy measurement of electrons, photons and hadrons~\cite{LHCb-TDR-CALO}. A sketch of the calorimeter system is in \refFig{LHCb-CALO-sys}.
\begin{wrapfigure}[17]{r}{0.5\textwidth}
\centering
\includegraphics[width=0.45\textwidth]{./Detector/LHCbCALO2.png}
\captionof{figure}[\lhcb calorimeter system.]{\lhcb calorimeter system. Electrons and hadrons are registered in \spd. \spd readout is limited to 0 (no hit) and 1 (hit). Electrons and photons are stopped in a lead wall ($X_0$ denotes the radiation lenght), creating a shower registered by \presh and stopped in \ecal. Hadrons leave signal in all the detectors in the calorimeter system.} \label{fig:LHCb-CALO-sys}
\end{wrapfigure}
The Scintillating pad detector is located upstream of a lead wall that creates electron and photon showers, while the \presh is located downstream of the wall. They allow for clear separation between electron and photon showers, as photons do not leave a signal in the \spd~\cite{LHCb-TDR-CALO}.
The electromagnetic calorimeter is made of 66 layers of 4\mm thick scintillator layers between 2\mm thick lead, corresponding to 25 radiation lengths. The \emph{shashlik} design is budget-wise, reliable and allows for fast response time (25\ns corresponding to a 40\mhz read-out), as the \ecal is crucial for the trigger selection. This design also has good radiation resistance~\cite{LHCb-TDR-CALO}.
In order to separate signal from background in \bquark decays with \piz mesons and photons or to study lepton-flavor-universality, the photon and electron reconstruction has to be accurate: spatial and energy resolution has to be very good. The \ecal energy resolution is $\sfrac{\sigma}{E} = \sfrac{0.1}{\sqrt{E[\text{GeV}]}} + 0.01$, which satisfies this requirement. The \ecal transverse granularity varies as particle flux increases towards the beam in the $x-y$ plane in order to minimize pile-up of hits in the detector, ensuring good signal-background separation~\cite{LHCb-TDR-CALO}.
The main purpose of the \hcal is to provide very fast response while having minimal detector dead-time. It measures the energy deposited by hadrons, contributing significantly to the first stage of the trigger selection of events, where the selection of high-energy events is performed.
\subsubsection{Muon system}\label{sec:det_MUON}
The most downstream \lhcb subdetector is the muon system. It consists of five rectangular stations M1-M5: M1 is equipped with triple gas-electron-multipliers, M2-M5 are equipped with multi-wire-proportional chambers and interleaved with iron absorbers to stop very-high-energy hadrons that reach the muon station and to select penetrating muons~\cite{LHCb-TDR-MUON}.
The main purpose of this system is the trigger selection and the PID of muons. Muons detected in the muon system have minimal momentum of 3\gev, as they have to pass the other \lhcb subdetectors. Muons are reconstructed with an efficiency of 97\%, while the pion misidentification probability varies with momentum between one and three percent~\cite{LHCb-TDR-MUON}.
\subsubsection{Trigger system and data flow}\label{sec:det_trig}
At \lhcb, the proton bunches collide at a rate of 40\mhz. With every bunch crossing, one or two inelastic proton-proton collisions occur. In 2.5\textperthousand\xspace of the collisions a \bbbar pair is produced. In about 15\% of such events at least one B meson is produced with all its decay products in the \lhcb acceptance~\cite{LHCb-LHC}. Moreover, the typical branching ratios of B mesons used in CP violation studies are less than $10^{-3}$ and in the case of rare \bquark decays the branching ratios are less than $10^{-6}$. %Totalling at 3.75\times 10^{-10}
Therefore, a fast and efficient online selection of events is essential to fully exploit the data while keeping the data flow level manageable~\cite{LHCb-TDR-TRIG}. The rate of events saved for physics analysis is 2-5\khz in \runI and 12.5\khz in \runII~\cite{LHCb-Performance,LHCb-Run2Performance}.
The \lhcb online selection, commonly called \emph{trigger}, is composed of a set of algorithms that classify (a part of) events as interesting for further analysis called \emph{lines}~\cite{LHCb-Run2Performance}. The lines are applied in two stages: Hardware level-0 trigger and software high-level trigger.
The Level-0 (\lone) trigger's purpose is to achieve a readout rate of 1.1\mhz with a fixed latency of 4\mus~\cite{LHCb-TDR-DAQ}. \lone trigger lines use information about the deposited energy from the calorimeters and muon stations, selecting events with high \pt or \et signatures~\cite{LHCb-TDR-ONLINE}.
The High-level trigger (\hlt) was significantly improved during LS1 as the computing resources doubled~\cite{TriggerResources}. In \runI, the \hlt was divided into two levels: \hltone and \hlttwo. In \hltone, partial reconstruction of the event was performed, reducing the event rate to about 80\khz. In \hlttwo the full event reconstruction was executed. Where possible, \hlt used offline-like algorithms with some simplifications due to time constraints~\cite{LHCb-Performance}. \hltone and \hlttwo were processed independently in \runI. In \runII however, the events passing \hltone were buffered into disk, online alignment and calibration of the detector were performed and \hlttwo then performed a \emph{full offline-like event reconstruction}. This allowed for better exploitation of \emph{exclusive} lines (lines selecting a specific final state)\footnote{\emph{Inclusive} lines select events with typologies typical for a given decay, looking for signatures such as displaced vertex or dimuons.}. As the reconstruction is performed online in a timely manner, large quantities of data can be processed fast, leading to more efficient data taking and faster publication of early measurement results, \eg~Ref.\,\cite{EM-jpsi,EM-charm}. The \runI and \runII trigger schemes are shown in \refFig{LHCb-trig}.
\begin{figure}[hbt!]
\centering
\includegraphics[width=0.4\textwidth]{./Detector/LHCb_Trigger_RunI_May2015.pdf} \hspace{0.05\textwidth}
\includegraphics[width=0.4\textwidth]{./Detector/LHCb_Trigger_RunII_May2015.pdf}
\captionof{figure}[\lhcb trigger scheme.]{\lhcb trigger scheme in 2012 (left) and 2015 (right). The data aquisition starts at 40\mhz bunch crossing rate. This is reduced by a \lone hardware triger to 1.1\mhz by selecting events with high \pt or \et signatures. These events are further selected by a software trigger: in 2012, this was done as a selection of inclusive and exclusive trigger lines, while in 2015, full offline-like event selection is performed thanks to full online detector calibration and alignment. In 2012, final output of 5\khz was written to storage in three streams, in 2015 the final output was 12.5\khz of fully-reconstructed events. Taken from~Ref.\,\cite{LHCb-trigger-diagram}.} \label{fig:LHCb-trig}
\end{figure}
\subsubsection{Simulation}\label{sec:det_Sim}
In any high energy experiment, Monte Carlo~\cite{Monte-Carlo} simulation samples are needed to understand experimental conditions and the detector performance. Today, a simulation is a complicated project requiring vast computing power~\cite{LHCb-TDR-SOFT}. The generation of events used by the \lhcb collaboration~\cite{LHCb-TDR-COMP} is realized by the \gauss\footnote{Named after C. F. Gauss, German mathematician, making significant contributions to number theory, geometry, probability theory and other fields.} simulation framework~\cite{LHCb-Gauss}. The events are initially generated using \pythia\footnote{Named after Pythia, Oracle of Delphi. Pythia was channeling prophecies from the Greek god Apollo.}~\cite{Pythia1,Pythia2}. \pythia simulates the proton-proton collision according to the Standard Model (although it is also possible to simulate New Physics processes) and the hadronization of the produced quarks and gluons. The decays of B mesons are generated via \evtgen~\cite{EvtGen}. For this work, generating full dataset containing all kinds of final-state particles is not feasible. Therefore, events not containing a \Bu meson are immediately disregarded. Once a \Bu meson is found, they always decay into \Kstarp\mumu. The generated events then interact with the detector, which is simulated by \geant\footnote{\geant stands for GEometry ANd Tracking.}\cite{Geant1,Geant2}. \vspace{\baselineskip}
The digitization of the detector response is simulated using \boole\footnote{Named after G. Boole, the founder of boolean algebra.}~\cite{Boole}. At this step, noise is added to the detector response. The Level-0 trigger is also simulated in \boole, as it is purely a hardware trigger. From there on, the simulation is steeered into the same flow as the real data: the high-level trigger response is emulated by \moore\footnote{G. E. Moore is the author of Moore's law, the observation that the number of transistors on a microchip doubles every two years.}\cite{Moore}, and the events are reconstructed using \brunel\footnote{I. K. Brunel was a British engineer, playing an important role in the industrial revolution.}~\cite{Brunel}. The simulation then mimics the real data and its reconstruction. \vspace{\baselineskip} \vspace{\baselineskip}
\clearpage

9
Chapters/ParameterMeasurement/MainFit_Ref_FinalToys_Run12.tex

@ -0,0 +1,9 @@
\begin{figure}[hbt!]
\centering
\includegraphics[width=0.32\textwidth]{FCNC/MainFit/ctk_JpsiFit_FinalToys_1BIN_bin0_SimultaneousFit_Run12_AllPDFs.eps}
\includegraphics[width=0.32\textwidth]{FCNC/MainFit/ctl_JpsiFit_FinalToys_1BIN_bin0_SimultaneousFit_Run12_AllPDFs.eps}
\includegraphics[width=0.32\textwidth]{FCNC/MainFit/phi_JpsiFit_FinalToys_1BIN_bin0_SimultaneousFit_Run12_AllPDFs.eps}\\
\includegraphics[width=0.32\textwidth]{FCNC/MainFit/m_JpsiFit_FinalToys_1BIN_bin0_SimultaneousFit_Run12_AllPDFs.eps}
\includegraphics[width=0.32\textwidth]{FCNC/MainFit/mkpi_JpsiFit_FinalToys_1BIN_bin0_SimultaneousFit_Run12_AllPDFs.eps}
\captionof{figure}[FILL ME]{FILL ME}\label{fig:MainFit-Ref-toy}
\end{figure}

9
Chapters/ParameterMeasurement/MainFit_Ref_Run12.tex

@ -0,0 +1,9 @@
\begin{figure}[hbt!]
\centering
\includegraphics[width=0.32\textwidth]{FCNC/MainFit/ctk_JpsiFit_1BIN_bin0_SimultaneousFit_Run12_AllPDFs.eps}
\includegraphics[width=0.32\textwidth]{FCNC/MainFit/ctl_JpsiFit_1BIN_bin0_SimultaneousFit_Run12_AllPDFs.eps}
\includegraphics[width=0.32\textwidth]{FCNC/MainFit/phi_JpsiFit_1BIN_bin0_SimultaneousFit_Run12_AllPDFs.eps}\\
\includegraphics[width=0.32\textwidth]{FCNC/MainFit/m_JpsiFit_1BIN_bin0_SimultaneousFit_Run12_AllPDFs.eps}
\includegraphics[width=0.32\textwidth]{FCNC/MainFit/mkpi_JpsiFit_1BIN_bin0_SimultaneousFit_Run12_AllPDFs.eps}
\captionof{figure}[FILL ME]{FILL ME}\label{fig:MainFit-Ref}
\end{figure}

9
Chapters/ParameterMeasurement/MainFit_Ref_folding0_Run12.tex

@ -0,0 +1,9 @@
\begin{figure}[hbt!]
\centering
\includegraphics[width=0.32\textwidth]{FCNC/MainFit/ctk_JpsiFit_1BIN_bin0_SimultaneousFit_folding0_Run12_AllPDFs.eps}
\includegraphics[width=0.32\textwidth]{FCNC/MainFit/ctl_JpsiFit_1BIN_bin0_SimultaneousFit_folding0_Run12_AllPDFs.eps}
\includegraphics[width=0.32\textwidth]{FCNC/MainFit/phi_JpsiFit_1BIN_bin0_SimultaneousFit_folding0_Run12_AllPDFs.eps}\\
\includegraphics[width=0.32\textwidth]{FCNC/MainFit/m_JpsiFit_1BIN_bin0_SimultaneousFit_folding0_Run12_AllPDFs.eps}
\includegraphics[width=0.32\textwidth]{FCNC/MainFit/mkpi_JpsiFit_1BIN_bin0_SimultaneousFit_folding0_Run12_AllPDFs.eps}
\captionof{figure}[FILL ME]{FILL ME}\label{fig:MainFit-Ref-fld0}
\end{figure}

9
Chapters/ParameterMeasurement/MainFit_Ref_folding1_Run12.tex

@ -0,0 +1,9 @@
\begin{figure}[hbt!]
\centering
\includegraphics[width=0.32\textwidth]{FCNC/MainFit/ctk_JpsiFit_1BIN_bin0_SimultaneousFit_folding1_Run12_AllPDFs.eps}
\includegraphics[width=0.32\textwidth]{FCNC/MainFit/ctl_JpsiFit_1BIN_bin0_SimultaneousFit_folding1_Run12_AllPDFs.eps}
\includegraphics[width=0.32\textwidth]{FCNC/MainFit/phi_JpsiFit_1BIN_bin0_SimultaneousFit_folding1_Run12_AllPDFs.eps}\\
\includegraphics[width=0.32\textwidth]{FCNC/MainFit/m_JpsiFit_1BIN_bin0_SimultaneousFit_folding1_Run12_AllPDFs.eps}
\includegraphics[width=0.32\textwidth]{FCNC/MainFit/mkpi_JpsiFit_1BIN_bin0_SimultaneousFit_folding1_Run12_AllPDFs.eps}
\captionof{figure}[FILL ME]{FILL ME}\label{fig:MainFit-Ref-fld1}
\end{figure}

9
Chapters/ParameterMeasurement/MainFit_Ref_folding2_Run12.tex

@ -0,0 +1,9 @@
\begin{figure}[hbt!]
\centering
\includegraphics[width=0.32\textwidth]{FCNC/MainFit/ctk_JpsiFit_1BIN_bin0_SimultaneousFit_folding2_Run12_AllPDFs.eps}
\includegraphics[width=0.32\textwidth]{FCNC/MainFit/ctl_JpsiFit_1BIN_bin0_SimultaneousFit_folding2_Run12_AllPDFs.eps}
\includegraphics[width=0.32\textwidth]{FCNC/MainFit/phi_JpsiFit_1BIN_bin0_SimultaneousFit_folding2_Run12_AllPDFs.eps}\\
\includegraphics[width=0.32\textwidth]{FCNC/MainFit/m_JpsiFit_1BIN_bin0_SimultaneousFit_folding2_Run12_AllPDFs.eps}
\includegraphics[width=0.32\textwidth]{FCNC/MainFit/mkpi_JpsiFit_1BIN_bin0_SimultaneousFit_folding2_Run12_AllPDFs.eps}
\captionof{figure}[FILL ME]{FILL ME}\label{fig:MainFit-Ref-fld2}
\end{figure}

9
Chapters/ParameterMeasurement/MainFit_Ref_folding3_Run12.tex

@ -0,0 +1,9 @@
\begin{figure}[hbt!]
\centering
\includegraphics[width=0.32\textwidth]{FCNC/MainFit/ctk_JpsiFit_1BIN_bin0_SimultaneousFit_folding3_Run12_AllPDFs.eps}
\includegraphics[width=0.32\textwidth]{FCNC/MainFit/ctl_JpsiFit_1BIN_bin0_SimultaneousFit_folding3_Run12_AllPDFs.eps}
\includegraphics[width=0.32\textwidth]{FCNC/MainFit/phi_JpsiFit_1BIN_bin0_SimultaneousFit_folding3_Run12_AllPDFs.eps}\\
\includegraphics[width=0.32\textwidth]{FCNC/MainFit/m_JpsiFit_1BIN_bin0_SimultaneousFit_folding3_Run12_AllPDFs.eps}
\includegraphics[width=0.32\textwidth]{FCNC/MainFit/mkpi_JpsiFit_1BIN_bin0_SimultaneousFit_folding3_Run12_AllPDFs.eps}
\captionof{figure}[FILL ME]{FILL ME}\label{fig:MainFit-Ref-fld3}
\end{figure}

9
Chapters/ParameterMeasurement/MainFit_Ref_folding4_Run12.tex

@ -0,0 +1,9 @@
\begin{figure}[hbt!]
\centering
\includegraphics[width=0.32\textwidth]{FCNC/MainFit/ctk_JpsiFit_1BIN_bin0_SimultaneousFit_folding4_Run12_AllPDFs.eps}
\includegraphics[width=0.32\textwidth]{FCNC/MainFit/ctl_JpsiFit_1BIN_bin0_SimultaneousFit_folding4_Run12_AllPDFs.eps}
\includegraphics[width=0.32\textwidth]{FCNC/MainFit/phi_JpsiFit_1BIN_bin0_SimultaneousFit_folding4_Run12_AllPDFs.eps}\\
\includegraphics[width=0.32\textwidth]{FCNC/MainFit/m_JpsiFit_1BIN_bin0_SimultaneousFit_folding4_Run12_AllPDFs.eps}
\includegraphics[width=0.32\textwidth]{FCNC/MainFit/mkpi_JpsiFit_1BIN_bin0_SimultaneousFit_folding4_Run12_AllPDFs.eps}
\captionof{figure}[FILL ME]{FILL ME}\label{fig:MainFit-Ref-fld4}
\end{figure}

270
Chapters/ParameterMeasurement/parameterMeasurement.tex

@ -0,0 +1,270 @@
\section{Parameter measurement}\label{sec:parMeas}
The angular moments \allAng described in \refEq{decay_rate_final} are extracted from the selected events using a \cpp based fitter framework \fcncfitter. It performs maximum-likelihood fits using the \tminuit~\cite{FIT-TMINUIT} minimization class of the \root framework~\cite{ROOT}\footnote{The discrepancy between maximum likelihood and its minization will be explained in the following section.}. This framework has been developed in the \lhcb collaboration and successfully used in previous analyses described in~Ref.\,\cite{ANA-LHCb-angular3,ANA-LHCb-angular4,ANA-LHCb-angular2}. The framework is further developed to accommodate for the needs of this particular analysis and to improve the user experience.
\subsection{Maximum likelihood}\label{sec:parMeas-maximumLikelihood}
Maximum-likelihood is a method of estimating the parameters of a probability distribution. As the name suggests, the method maximizes the likelihood function in a way that the assumed statistical model of the observed data is most probable.
In order to fully understand the concept of maximum-likelihood in the multi-dimensional fit, it is useful to start with a simple example of a one parameter fit and expand it to the multi-dimensional space.
%Let $\{x_1,\dots,x_N\}$ be a set of random phenomenon and $x$ a random variable $x\in\{x_1,\dots,x_N\}$
Let $h$ represent a hypothesis, $\{D\}={x_1,x_2,...,x_n}$ measured data. Then, using bayes theroem~\cite{FIT-bayes}, the probability density function PDF representing that the hypothesis is valid given the data $\{D\}$ can be rewritten as
%
\begin{equation}\label{eq:likelihood-simple}
\text{PDF}(h\,|\,\{D\}) = \frac
{\text{PDF}( \{D\}\,|\,h)\,\text{PDF}(h)}
{\text{PDF}(\{D\})}\,.
\end{equation}
The first part of the numerator, $\text{PDF}( \{D\}\,|\,h)$, is refereed to as \emph{likelihood}, the second part, $\text{PDF}(h)$, as \emph{prior}. The prior is assumed to be a uniform distribution. The numerator represents the probability of the data averaged over all parameters. As $\text{PDF}(\{D\})$ is a constant that is not relevant for maximizing the probability that the hypothesis is true.
As the goal is to maximize the probability $\text{PDF}(h\,|\,\{D\})$, the first order derivative at observable $\mu_0$ has to be zero and the second order derivative positive:
%
\begin{equation}\label{eq:max-deriv}
\left.\frac{\partial \text{PDF}}{\partial h}\right|_{\mu_0} = 0\,, \qquad
\left.\frac{\partial^2 \text{PDF}}{\partial h^2}\right|_{\mu_0} > 0\,.
\end{equation}
%To illustrate this principle, in the case of a Gaussian distribution, the maximum likelihood estimators for the mean $\mu$ and the variance $\sigma$ then are $\hat{\mu}$ and $\hat{\sigma}$
%
%\begin{displaymath}
% \hat{\mu} = \frac{1}{n}\sum_{i=1}^n x_i \,, \qquad
% \hat{\sigma} = \frac{1}{n}\sum_{i=1}^n (x_i-\hat{\mu})^2\,,
%\end{displaymath}
%
%as the likelihood maximization requires the first order likelihood derivatives to be zero:
%
%\begin{displaymath}
% \frac{\partial \mathcal{L} }{\partial \mu} = 0 = \frac{1}{\sigma^2}\sum_{i=1}^n (x_i-\hat{\mu})\,,\\
%\end{displaymath}
%%
%\begin{displaymath}
% \frac{\partial \mathcal{L} }{\partial \sigma^2} = 0
% = \frac{1}{2\sigma^2}
% \left[
% \frac{1}{\sigma^2}\sum_{i=1}^n (x_i-\hat{\mu})^2-n
%\right]\,.
%\end{displaymath}
In this work, an unbinned maximum likelihood fit is performed. Extending the previous to a multidimensional space, let $N$ be the number of events, each assigned a weight $w_n$. Let $\vec{\mu}$ be the parameter vector and $\{\vec{D}\} = {\vec{x_1},\vec{x_2},...,\vec{x_n}}$ measured data. The likelihood takes the form of
%
\begin{equation}\label{eq:likelihood}
\mathcal{L}(\{\vec{D}\}|\vec{\mu}) = \text{PDF}(\{\vec{D}\}|\vec{\mu}) = \prod_{n=1}^{N} \text{PDF}(\vec{D_n}|\vec{\mu})^{w_n}\,,
\end{equation}
%
where $\text{PDF}(\vec{D_n}|\vec{\mu})$ is the normalized probability density function according to which the data is distributed. %In the unbinned fit, the weights for each event $w_n$ are often not normalized. A factor of $\sfrac{N}{\sum_n w_n}$ then has to be added to the weights in order to extract an unbiased overall uncertainty.
The maximization problem is often reduced to a much simpler problem. Instead of maximizing the likelihood (for simplicity denoted $\mathcal{L}$) itself, it is possible to \emph{minimize} a negative logarithm of the likelihood $-\ln\mathcal{L}$. Looking at \refEq{likelihood}, the minimization problem becomes:
%
\begin{equation}
-\ln(\mathcal{L}(\{D\}|\vec{\mu})) \propto -\sum_{n=1}^N w_n \ln \left(\text{PDF}(\vec{D_n}|\vec{\mu})\right)\,.
\end{equation}
%
In the unbinned fit, the weights for each event $w_n$ are often not normalized. A factor of $\sfrac{N}{\sum_n w_n}$ then has to be added to the logarithm of the likelihood in order to extract an unbiased uncertainty.
Some of the parameters can be constrained to previously known value $v$ with some uncertainty $u$. For every such parameter $p_i$, an additional term is added to the likelihood
%
\begin{equation}
\ln \mathcal{L}_{\text{constr}} = \sum_j \left( \frac{p_i - v_i}{u_i} \right)\,.
\end{equation}
%
%In this particular analysis, the data $\vec{D_n}$ corresponds to the \Bu meson mass and the deay angles, $\vec{X} = (\mass{\Bu},$\angles$)$. The parameter vector is then $\vec{\mu} = \vec{\mu_S},\vec{\mu_P},\vec{\mu_{n}}$, $\mu_n$ being a nuisance parameter
The biggest advantage of minimizing the negative logarithm of likelihood instead of maximizing the likelihood directly is that the logarithm can be expanded using the Taylor expansion~\cite{FIT-taylor} in the maximum likelihood estimator $\vec{\mu_0}$:
%
\begin{displaymath}
\ln(\mathcal{L}(\vec{\mu})) = \mathcal{L}(\vec{\mu_0})
+ \left.\frac{\partial \mathcal{L} }{\partial \vec{\mu}}\right|_{\vec{\mu_0}} (\vec{\mu}-\vec{\mu_0})
+ \left.\frac{\partial^2 \mathcal{L} }{\partial \vec{\mu}^2}\right|_{\vec{\mu_0}}
\frac{(\vec{\mu}-\vec{\mu_0})^2}{2} + \omega_3\,,
\end{displaymath}
%
where the $\omega_3$ denotes the higher order contributions, which are \emph{typically} negligible.
The first element in the expansion is a constant, therefore not interesting for the minimization. The second element is equal to zero from \refEq{max-deriv}. Therefore, the PDF that has to be minimized, denoted for simplicity $\mathcal{P}$, becomes
%
\begin{equation}
\mathcal{P} = -\left.\frac{\partial^2 \mathcal{L} }{\partial \vec{\mu}^2}\right|_{\vec{\mu_0}}
\frac{(\vec{\mu}-\vec{\mu_0})^2}{2}\,.
\end{equation}
\subsection{Fit model}\label{sec:parMeas-fitModel}
The \fcncfitter framework offers a wide variety of fit models. Despite its versatility, further improvements are made in order to adapt for this analysis, especially due to the limited \ctk availability and the presence of a complicated background component. The fit model used to extract the parameters \allAng consists of two main components: signal and background probability density functions $P_{sig}$ and $P_{bkg}$. The PDF then can be generally described using signal and background probability density functions:\vspace{-0.25\baselineskip}
%
\begin{equation}
\text{PDF} = f_{sig} \prod_{i=1}^{D} \text{P}_{sig}^i + (1- f_{sig}) \prod_{i=1}^{D} \text{P}_{bkg}^i\,,
\end{equation}
where the $D$ represents the dimension of the fit and $f_{sig}$ is the fraction of signal candidates in the dataset to all candidates.
The fit is performed in four dimensions of \Bu meson mass, \ctl, \ctk and $\phi$. In addition in the case of the \BuToKstJpsi decay, in order to extract the $F_S$ parameter (see \refEq{decay_rate_final}) another fit is performed in two dimension of \Bu meson mass and \Kstar mass.
%In the case of the \BuToKstmm decay, the $F_S$ fraction is fixed to zero as the fit to the \Bu meson mass and \Kstar mass prefers a zero value. It is shown that due to the low amount of signal candidates in the sample, this assumption does not significantly bias the \pwave angular parameters. In both of these fits, the \Bu meson mass is used to determine the fraction of signal events $f_{sig}$.
As both the collision and the detector conditions differ between \runI and \runII, the datasets are treated separately. However, all angular observables noted in \refEq{decay_rate_final} are independent of those conditions. Hence these parameters are shared between the two datasets in the fit. Moreover, to further stabilize the fit, the angular background parameters are also shared between the two datasets.
\subsubsection{Signal component}\label{sec:parMeas-sig}
The reconstructed \Bu meson mass is described by a double-sided Crystal Ball function defined in \refApp{CrystalBall}. The parameters of the Crystal Ball function $\alpha_{1,2}$ and $n_{1,2}$ are fixed to the \Bu meson mass shape in the simulation of the reference channel decay \BuToKstJpsi. This is due to the shape of the tails of the Crystal Ball function: even when fitting the simulated \Bu meson mass sample of the rare channel decay \BuToKstmm, the parameters $\alpha_{1,2}$ and $n_{1,2}$ show large uncertainties and the fit becomes unstable. On top of this, the mean of the \Bu meson mass peak in the rare \BuToKstmm decay is fixed to the one obtained by fitting the reference decay channel data.
Due to low statistics of the signal sample, the width of the Crystal Ball function $\sigma_{\text{rare}}^{Data}$ is fixed to the width in the reference decay channel data fit $\sigma_{\text{ref}}^{Data}$ multiplied by a scaling factor obtained by fitting the simulated \Bu meson mass in the rare and reference decay channels, $\sigma_{\text{rare}}^{MC}$ and $\sigma_{\text{ref}}^{MC}$
%
\begin{equation}
\sigma_{\text{rare}}^{Data} = \sigma_{\text{ref}}^{Data} \frac{\sigma_{\text{rare}}^{MC}}{\sigma_{\text{ref}}^{MC}}\,.
\end{equation}
For the fit of the \Kstar mass, the \pwave amplitude is described by Breit-Wigner model~\cite{FIT-BreitWigner}:
%
\begin{equation}\label{eq:BW-Pwave-basic}
\begin{aligned}
\mathcal{A}_{\rm P}\left(\mKpPiz \right) =
&\sqrt{kp}\times B^{\prime}_{L_\PB}(k, k_0, d)
\left(\frac{k}{\mBu}\right)^{L_\PB}\times B^{\prime}_{L_\Kstarp}(p, p_0,d)
\left(\frac{p}{\mKstarp}\right)^{L_\Kstarp}\\
&\times\frac{1}{\mKstarp^2 - \left(\mKpPiz \right)^2 - \textit{i}\mKstarp\Gamma\left(\mKpPiz\right)}\,.
\end{aligned}
\end{equation}
%
The momentum of $\Kstarp$ in the rest-frame of \Bu meson meson is denoted $k$ with a mean peak value of $k_0$, the momentum of $\Kp$ in the rest frame of \Kstarp is denoted $p$ with a mean peak value of $p_0$. $L$ denotes the angular momentum of the corresponding meson. The factors ${B_L}^\prime$ are so-called Blatt-Weisskopf form-factors~\cite{FIT-BlattWeisskopf}
%
\begin{eqnarray}
\begin{aligned}
B_{0}^{\prime} \left(p, p_{0}, d\right) =
&1\,,\\
B_{1}^{\prime} \left(p, p_{0}, d\right) =
&\sqrt{\frac{1 + \left(p_{0}d \right)^{2}} { 1 + \left(p~d \right)^{2}} }\,,
\label{eq:blatt-weisskopf}
\end{aligned}
\end{eqnarray}
where $d$ represents the size of the decaying particle. This parameter is reported in~Ref.\,\cite{FIT-dSize} to be $d = 1.6 \gev^{-1}$ (or 0.3\fm). This is also consistent with a $\Bu\to\jpsi\rhop$ branching fraction measurement, where the fit favored $d=1.64\gev^{-1}$~\cite{FIT-JpsiRho}. However, recent \lhcb study of $Z(4430)$ favored $d\sim0$~\cite{FIT-Z}. As the determination of the $d$ parameter is not possible in this analysis, the value is fixed to $d = 1.6 \gev^{-1}$. This is also consistent with previous \Bd decay analyses~\cite{ANA-LHCb-angular3,ANA-LHCb-angular2} and the \BuToKstKspimm decay analysis~\cite{ANA-LHCb-angular4}.
As the angular momentum of the \pwave is $L_B =0$ and $L_\Kstar=1$, the \refEq{BW-Pwave-basic} becomes
%
\begin{equation}\label{eq:BW-Pwave}
\mathcal{A}_{\rm P}(\mKpPiz) = \sqrt{kp}\times \sqrt{\frac{1 + \left(p_{0}d \right)^{2}} { 1 + \left(pd \right)^{2}} } \times\frac{p}{\mKstarp}\times\frac{1}{\mKstarp^2 - \mKpPiz^2 - \textit{i}\mKstarp\Gamma(\mKpPiz)}\,.
\end{equation}
For the description of the \swave in the \mKpPiz, the \lassc parametrization~\cite{FIT-LASS} is used \vspace{-0.5\baselineskip}
%
\begin{eqnarray}\label{eq:BW-Swave-basic}
\begin{aligned}
\mathcal{A}_{\rm S}(\mKpPiz) = &\sqrt{kp}\times B^{\prime}_{L_\PB}(k, k_0, d)\left(\frac{k}{\mBu}\right)^{L_\PB}\times B^{\prime}_{L_\Kstarp}(p, p_0, d)\left(\frac{p}{\mKstarp}\right)^{L_\Kstarp}\\
&\qquad\times\left(\frac{1}{\cot \delta_\PB - \textit{i}}+\textit{e}^{2\textit{i}\delta_\PB}\frac{1}{\cot \delta_R - \textit{i}}\right)\,,\\
\cot \delta_\PB = &\frac{1}{ap}+\frac{1}{2}rp\,,\\
\cot \delta_R = &\frac{\mKstarp^2-\mKpPiz^2}{\mKstarp\Gamma(\mKpPiz)}\,.
\end{aligned}
\end{eqnarray}
%
The parameter $a$ represents the scattering length and $r$ is the effective range parameter. Their values $a = 1.95$ and $r = 1.78$ are taken from~Ref.\,\cite{FIT-Dunwoodie}. A study about the influence of these parameters is done in~Ref.\,\cite{ANA-LHCb-angular3}. The impact of varying these two parameters on the angular observables is negligible.
For the \swave, angular momenta is $L_B = 0$ and $L_\Kstarp = 0$. This simplifies \refEq{BW-Swave-basic} to
%
\begin{equation}\label{eq:BW-Swave}
\begin{aligned}
\mathcal{A}_{\rm S}(\mKpPiz) = &\sqrt{kp}\times
\sqrt{\frac{1 + \left(k_{0}d \right)^{2}} { 1 + \left(k~d \right)^{2}} } \times
\frac{k}{\mBu} \times \left(\frac{1}{\cot \delta_\PB - \textit{i}} +
\textit{e}^{2\textit{i}\delta_\PB}\frac{1}{\cot \delta_R - \textit{i}}\right)\,.
\end{aligned}
\end{equation}
%
The final amplitude in the \mKpPiz dimension is then a combination of the squared normalized P- and \swave amplitudes, using the \swave fraction $F_S$
%
\begin{equation}
\left. \frac{{\rm d}\Gamma}{{\rm d}\mKpPiz} \right |_{\rm S+P} =
(1-{F_S})\left\vert \mathcal{A}^{\prime}_{\rm P}(\mKpPiz)\right\vert^2+F_S \left\vert \mathcal{A}^{\prime}_{\rm S}(\mKpPiz)\right\vert^2\,.
\end{equation}
\subsubsection{Background component}\label{sec:parMeas-bkg}
As the background contribution is high especially at large \ctk, as discussed in \refSec{Accept-parametrizaiton}, a dedicated study on a predominantly background data sample is done. This sample consists of all events passing the selection described in \refSec{sel-EventSelection} in the resonant \jpsi dimuon invariant mass squared interval with the reconstructed \Bu meson mass higher than $5629\mev$, corresponding to the mass of a \Bu meson $+350\mev$. This rather strict cut is applied in order to make sure the signal tail does not significantly contribute to the background sample.
In the \Bu meson mass dimension, the background mostly consists of random accidental track combinations and is described by an exponential with one free parameter. Similarly, in the case of \mKpPiz, a linear model describes this combinatorial background well. The fit projections of the \Bu and \Kstar reconstructed mass distributions are depicted in \refFig{FIT-bkgMass}.
\begin{figure}[hbt!]
\centering
\includegraphics[width=0.47\textwidth]{./FCNC/BkgFit/m_Bckgnd_JpsiFit_1BIN_bin0_Run12_HighBmass.eps}
\includegraphics[width=0.47\textwidth]{./FCNC/BkgFit/mkpi_Bckgnd_JpsiFit_1BIN_bin0_Run12_HighBmass.eps}
\captionof{figure}[Fit to the background sample in the \Bu meson and \Kstar mass dimensions.]{Fit to the background sample in the \Bu meson and \Kstar mass dimensions. The black markers represent the data, the red area represents the background fit model. The reconstructed \Bu meson mass distribution (left) is fitted with an exponential function, the \Kstar meson mass distribution (right) with a linear function. } \label{fig:FIT-bkgMass}
\end{figure}
In the dimension of the decay angles \angleDist, the background is parameterized using Chebyshev polynomials $T_i$~\cite{FIT-Chebyshev}. The background in each dimension is described by a dedicated Chebyshev polynomial. This factorization is possible, as a study of the background sample shows no correlation between the angles, as shown in \refFig{FIT-bkgCorr}.
The \ctl angular background is modeled with a polynomial of order two, \ctk is modeled with a polynomial of order five in the reference channel and order of two in the signal channel (this is explained in the next paragraph), and $\phi$ angular background is flat. The angular background is then described by \refEq{bkg-angular}:
\begin{equation}\label{eq:bkg-angular}
\left.\frac{\deriv(\Gamma+\bar{\Gamma})}{\deriv\ctk\,\deriv\ctl\,\deriv\phi}\right\vert_{\mathrm{BKG}}=
\left( \sum_{i = 0}^{5(2)} c^{\thetak}_iT_i(\ctk) \right) \times
\left( \sum_{j = 0}^{2} c^{\thetal}_jT_j(\ctl) \right) \times
\left( c^{\phi}_0T_0(\phi) \right)\,.
\end{equation}
\begin{figure}[hbt!]\vspace{0.5\baselineskip}
\centering
\includegraphics[width=0.33\textwidth]{./FCNC/BkgFit/Background_Correlation_ctl_ctk_Bckgnd_JpsiFit_1BIN_bin0_Run12_HighBmass_Kpi.eps} \hspace{-5pt}
\includegraphics[width=0.33\textwidth]{./FCNC/BkgFit/Background_Correlation_ctk_phi_Bckgnd_JpsiFit_1BIN_bin0_Run12_HighBmass_Kpi.eps} \hspace{-5pt}
\includegraphics[width=0.33\textwidth]{./FCNC/BkgFit/Background_Correlation_ctl_phi_Bckgnd_JpsiFit_1BIN_bin0_Run12_HighBmass_Kpi.eps} \hspace{-5pt}
\captionof{figure}[Background factorization.]{Correlation between the decay angles in predominantly background sample. The correlation between the angular coefficients is in the order of couple of percent, proving there is no correlation. Hence three independent Chebyshev polynomials can be used for the description of the angular background.} \label{fig:FIT-bkgCorr}
\end{figure}
The fit projections to the decay angles are shown in \refFig{FIT-bkgAngles}. The crucial part of the description is the peaking structure at high \ctk values. Correct modeling of this peak is crucial for extracting the angular moments. The lowest possible order of the Chebyshev polynomial describing the \ctk background well is the order of five. However, even when considering the large statistical sample of the \BuToKstJpsi decay, the five free parameters tend to overfit the data. The \chisq of the fit to the background sample in the \ctk is equal to only 0.346. This can be avoided either by cutting even harder on the high \ctk or by describing the background by a lower order Chebyshev polynomial. Cutting away more events with high \ctk leads to lower sensitivity to the angular parameters, especially the parameter \FL. Lower order polynomial does not describe the shape of the background well, especially in the regions at $\ctk\approx-1$ and $\ctk\approx0.8$. This problem disappears when considering the low statistical power of the rare channel: Chebyshev polynomial of order of two is sufficient to describe the background contribution. The overfitting is present only in the reference \BuToKstJpsi decay and manifested by the third order polynomial parameter running into the boundary of this parameter\footnote{Of course this can be avoided by enlarging the range of this free parameter. However, this lead to other parameters running into the boundary.}. This parameter controls the shape of the plateau at $\ctk\approx-0.4$. As this is a nuisance parameter and a wide range of values describes the background well, this parameter is left floating in the fit to the reference \BuToKstJpsi decay.
\begin{figure}[hbt!]
\centering
\includegraphics[width=0.32\textwidth]{./FCNC/BkgFit/ctk_Bckgnd_JpsiFit_1BIN_bin0_Run12_HighBmass.eps} \hspace{-5pt}
\includegraphics[width=0.32\textwidth]{./FCNC/BkgFit/ctl_Bckgnd_JpsiFit_1BIN_bin0_Run12_HighBmass.eps} \hspace{-5pt}
\includegraphics[width=0.32\textwidth]{./FCNC/BkgFit/phi_Bckgnd_JpsiFit_1BIN_bin0_Run12_HighBmass.eps}
\captionof{figure}[Angular fit of the background sample.]{Angular fit of the background sample. The sample is obtained from the \BuToKstJpsi data taken during both \runI and \runII. The black markers represent the data, the red area represents the background model described by \refEq{bkg-angular}. On the left, the \ctk distribution is presented, in the middle the \ctl distribution, and on the right $\phi$ distribution is shown.} \label{fig:FIT-bkgAngles}
\end{figure}
\clearpage
\subsection[Extraction of the \texorpdfstring{${F_S}$}{Fs} parameter]{Extraction of the \texorpdfstring{$\mathbf{F_S}$}{Fs} parameter}\label{sec:parMeas-FS}
As mentioned in \refSec{ANA_Theo_SWave}, it is impossible to distinguish the contribution of the \pwave and the \swave in the $\Kp\piz$ system at the selection level. However, using the reconstructed mass of \Kstar meson, statistical selection is possible. This can be done by performing a two-dimensional fit in the \Kstar meson mass and the \Bu meson mass. First, the 2D fit is performed in the reference \BuToKstJpsi channel, as it is much more abundant than the rare \BuToKstmm channel. The projections in the \mBu and \mKpPiz dimensions are shown in \refFig{Reference-mass-fit}.
\begin{figure}[hbt!]
\centering
\includegraphics[width=0.48\textwidth]{FCNC/MassFit/m_massfit_JpsiFit_1BIN_bin0_Run1.eps}
\includegraphics[width=0.48\textwidth]{FCNC/MassFit/mkpi_massfit_JpsiFit_1BIN_bin0_Run1.eps}\\
\includegraphics[width=0.48\textwidth]{FCNC/MassFit/m_massfit_JpsiFit_1BIN_bin0_Run2.eps}
\includegraphics[width=0.48\textwidth]{FCNC/MassFit/mkpi_massfit_JpsiFit_1BIN_bin0_Run2.eps}\\
\captionof{figure}[Fit projections to the reference decay channel in \Bu and \Kstar masses.]{Fit projections to the reference decay channel in reconstructed \Bu meson (left) and \Kstar meson (right) masses. The top row represents \runI sample, the bottom row \runII sample. The black markers represent the data, the blue space represents the signal. Red surface represents the background contribution. The green dashed line represents the \pwave, the green dotted line, present under the background area, represents the \swave contribution.}\label{fig:Reference-mass-fit}
\end{figure}
\clearpage
The study of \BuToKstmm is performed in multiple \qsq bins. Looking at \refEq{BW-Pwave} and \refEq{BW-Swave}, the distributions are \qsq independent. Despite the effort to fit the $F_S$ in the \BuToKstmm channel using the full \qsq range (excluding the resonance regions), the statistical power of the sample is not large enough to find a contribution of the \swave in the $\Kp\piz$ system and the fit prefers $F_S=0$. The fit projections in the \mBu and \mKpPiz dimensions are given in \refFig{Signal-mass-fit}.
A dedicated study of pseudoexperiments is done in order to establish the sensitivity of the fit on the $F_S$ parameter in the rare \BuToKstmm channel. It is shown in \refSec{toy-sig} that fixing $F_S$ and the interference terms $S_{Si}$ to zero does not introduce bias on the measured \pwave angular parameters.
\begin{figure}[hbt!]
\centering
\includegraphics[width=0.48\textwidth]{FCNC/MassFit/m_massfit_SignalFit_1BIN_bin0_Run1.eps}
\includegraphics[width=0.48\textwidth]{FCNC/MassFit/mkpi_massfit_SignalFit_1BIN_bin0_Run1.eps}\\
\includegraphics[width=0.48\textwidth]{FCNC/MassFit/m_massfit_SignalFit_1BIN_bin0_Run2.eps}
\includegraphics[width=0.48\textwidth]{FCNC/MassFit/mkpi_massfit_SignalFit_1BIN_bin0_Run2.eps}\\
\captionof{figure}[Fit projections to the rare channel in \Bu and \Kstar masses.]{Fit projections to the signal channel in reconstructed \Bu meson (left) and \Kstar meosn (right) masses. Top row represents \runI sample, bottom row \runII sample. The black markers represent the data, the blue space represents the signal. Red surface represents the background contribution. As the fit prefers $F_S=0$, the \swave contribution is not plotted. The green dashed line represents the \pwave contribution to the \Kstar mass.}\label{fig:Signal-mass-fit}
\end{figure}
\clearpage

5
Chapters/Results/results.tex

@ -0,0 +1,5 @@
\section{Results}
\todoFill[inline]{Results: Fill}
\clearpage

293
Chapters/StandardModel/standardModel.tex

@ -0,0 +1,293 @@
\section{The Standard Model of particle physics}\label{sec:SM}
The structure of matter was a subject of contemplation of philosophers and scientists since ancient history. Particle physics as we know it today, however, exists only since the beginning of the 20th century. It was driven by two breakthrough discoveries: the discovery of electrons by J.\,J.\,Thompson in 1897~\cite{SM-Thompson} and the discovery of the atomic nucleus by E.\,Rutherford in 1911~\cite{SM-Rutherford}. Electrons and protons were considered the main constituents of matter until the 1932, when this picture was expanded by the discovery of the neutron by J.\,Chadwick~\cite{SM-Chadwick} and by the discovery of the positron by C.\,Anderson~\cite{SM-Anderson}.
Around this time, the first ideas about interactions between particles emerged: the well-known electro-magnetism was joined by the strong force holding the nuclei together and by the weak-force describing beta radioactivity, discovered in 1896 by H.\,Becquerel~\cite{SM-Becquerel}. Since the weak interaction is very important for the development of the Standard Model (SM) as well as for this work, it will be in the spotlight of this chapter.
\subsection{The beginning of the Standard Model}\label{sec:SM-hist}
The theory of the beta decay was successfully described by E.\,Fermi in 1933\,\cite{SM-Fermi}, where he predicted the existence of a neutrino\footnote{Fermi's paper suggesting the existence of neutrinos was rejected by Nature, as it was "too remote from physical reality to be of interest to the readers"~\cite{SM-Fermi-reject}.}. In this pioneering work, he suggested a direct interaction of four spin-\oneHalf quantum fields (neutron, proton, electron and antineutrino). This reflects the fact that the weak force has essentially zero-range: unlike electromagnetism, where the photon is the interaction mediator, there is no boson mediator.
At that time this was a great approximation of the beta decay \emph{at low energies}. Even though Fermi's description is different from quantum electrodynamic (QED), describing the electromagnetic force, Fermi used Lorentz four-vectors to describe the fermion fields appearing in bi-linear combinations (\emph{currents}), similarly to QED. This paved the path to electroweak interaction unification.
Fermi's theory postulated only beta decays with no change of nucleonic spin ($\Delta S=0$). However, as nuclear spectroscopy became more precise, it was clear that beta decays with a change of nucleonic spin one ($\Delta S=1$) do occur~\cite{SM-Kurie}. A generalization of the Fermi theory explaining this observations was proposed by G.\,Gamow and E.\,Teller in 1936~\cite{SM-Gamow}. Instead of using only vector currents as Fermi did, one can construct the four-fermion interaction using the whole set of scalars (S), vectors (V), tensors (T), axial vectors (A) and pseudoscalars (P). A- and T-couplings describe a spin-changing interaction, while S- and V- couplings are spin-zero transition operators.
At that time it was intuitively assumed parity symmetry holds in the quantum world in the same way as in our macroscopic world. However, two particles with very similar masses and the same spin were observed, $\tau$ and $\theta$, decaying into different final states: $\taup\to\pip\pip\pim$ and $\theta^+\to\pip\piz$. As pions have intrinsic parity of $-1$, $\tau$ and $\theta$ had to be different particles or the parity symmetry must be violated\footnote{The angular momentum $J$ is conserved. Therefore the parity of the three-pion system is equal to $(-1)^3(-1)^{J(\tau)}$ and the parity of two-pion system $(-1)^2(-1)^{J(\theta^+)}$.}. In 1954, R.\,H.\,Dalitz looked into the two decays and confirmed $\tau$ and $\phi$ are the same particle~\cite{SM-Dalitz}. Today, we call $\theta$ and $\tau$ neutral kaon $\Kz$.
The theoretical solution to this \emph{$\tau-\theta$ puzzle} was found in 1956, when T.\,D.\,Lee and C.\,N.\,Yang suggested to abandon the idea of parity symmetry conservation~\cite{SM-Lee}. The violation of parity symmetry was quickly observed by three independent measurements: C.\,S.\,Wu measured the rate of electrons originating from the decay of $^{60}$Co atoms aligned by a uniform magnetic field. If parity is conserved, the same amount of electrons should be emitted in the direction of the nucleus spin and opposite to the spin direction of the nucleus. She observed that the electrons prefer the direction opposite to the spin of the nucleus, confirming the parity violation~\cite{SM-Wu}. The other two experiments studied the pion decay to a muon and a neutrino\footnote{Muons quickly decay to an electron, serving as an analyzer for the muon polarization.}: $\pip\to\mup+\neu$~\cite{SM-Friedman,SM-Lederman}. %$\mup\to\ep+2\neu$, electron serves as an analyzer for the muon polarization.
Since pions have spin zero and spin is conserved, the muon and the neutrino have to have opposite spin sign. If parity is conserved, the polarization of the muon would be symmetric along their direction of motion. However, the muon spin direction \emph{favors} the direction of motion.
The general lagrangian proposed by Gamov and Teller expanded by the parity violating term was rather complex. %
This was reduced to only the V-A component in 1958 independently by R.\,Feynman with M.\,Gell-Mann~\cite{SM-Feynman} and R.\,Marschak with E.\,Sudurshan~\cite{SM-Sudarshan}.
They followed the idea of two-component spinor from 1920's~\cite{SM-Weyl}, applied it to neutrinos, and postulated that any elementary fermion, regardless of its mass, can participate in weak interactions only through the left-handed chiral component of the corresponding spinor field. This lead to the universal current-current form of the weak interaction:%
\begin{equation}
\mathcal{L}_{int}^{weak} = \frac{G_F}{\sqrt{2}}J^{\rho}J_{\rho}^{\dagger}\,,
\end{equation}
where $\mathcal{L}_{int}^{weak}$ is the weak interaction lagrangian, $G_F$ is the \emph{universal Fermi constant} and $J^{\rho}$ is the weak current (or probability flux). This implies the possibility of interaction of the weak current with itself, opening the path for intermediate vector bosons, similar to QED.
\clearpage
Given there is no physical reason for this, in the light of today's knowledge, the idea was a rather fortunate but very successful guess.
The problem of this assumption was that the observed beta decay until then preferred the spin change of either $\Delta S=0$ or $\Delta S=2$, which is excluded by the two-component theory. On the other hand it described very well all known particles' weak interaction and even predicted some interactions.
Another major problem of this theory is that it is \emph{not renormalizable}. This means it does not hold anymore at high energies of about $\sqrt{\frac{2\pi\sqrt{2}}{G_F}}$~\cite{SM-Lee-2, SM-Okun, SM-Appelquist}. This problem was solved by introducing an electrically charged intermediate vector boson $W$. Nonetheless, this brought a new problem: production of \Wm\Wp pairs in fermion-antifermion annihilation lead to a power-like unitarity violation~\cite{SM-GellMann-1}.
\subsection{Unification of electromagnetism with weak interaction}
As the \W boson has electromagnetic charge, it has to interact electromagnetically. As the weak interaction when exchanging the $W$ boson violates parity maximally, while QED is parity conserving, one cannot just add QED and weak lagrangians together. Instead of adding terms to the lagrangians, the unification of weak interaction with QED was realized in a very different way. Taking a step back in history, in 1954 C.\,N.\,Yang and R.\,Mills looked into non-abelian (non-commutative) gauge invariance~\cite{SM-Yang-Mills}. They showed that the transformation from global to local symmetry requires a triplet of vector fields, analogous to the photon field. However, contrary to the photon field, this field also interacts with itself.
S.\,Glashow showed in 1961 that the minimal representation of the electroweak unification indeed requires four gauge fields: the known photon, \Wp, \Wm and a new neutral vector boson~\cite{SM-Glashow-1}. The new neutral vector boson (called \Z today) is required to bridge the gap between parity-conserving electromagnetism and parity-violating weak interaction. In mathematical terms, the appropriate gauge group is a not simple \SU group\footnote{It is interesting that this \SU electroweak unification was not the only theoretical solution: by introducing new electron-type leptons, one reaches simple $SU(2)$ electroweak unification. This is however not supported by the experimental data.}. This effectively means wo independent coupling constants are needed.
This idea also led to an estimation of the \W boson mass to be 77.7\gev (the currently measured value is $80.38\pm0.012\gev$~\cite{PDG}). While massive \W bosons canceled the main divergences mentioned in the previous subsection, new divergences appeared in the interactions between the vector-boson fields with themselves.
\subsection{Renormalization problem}\label{sec:SM_renormalization}
The renormalization problem was solved by adding a scalar field to the electroweak theory. The initial idea was published by J.\,Goldstone~\cite{SM-Goldstone}, who added the so-called \emph{mexican hat} potential $V(\varphi)$ to the lagrangian density
\begin{equation} \label{eq:mexican_potential}
V(\varphi) = -\mu^2 \varphi \varphi^* + \lambda (\varphi\varphi*)^2\,,
\end{equation}
where $\mu$ is a real parameter with dimension of mass, $\lambda$ is a dimensionless coupling constant and $\varphi$ is a complex scalar field. The potential is sketched in \refFig{mexican_potential}. This potential has a minimum lying on a circle in the complex plane with radius $\sfrac{\mu}{\sqrt{\lambda}}$, therefore the minimal energy is infinitely degenerate. The ratio $\sfrac{\mu}{\sqrt{\lambda}}$ is commonly denoted as $v$ and referred to as vacuum expectation value. This means the lagrangian is no longer symmetric at its minimum. The potential effectively describes two real scalar fields with masses $\mu\sqrt{2}$ and $0$. The appearance of a massles bosonic excitation (Goldstone boson) is referred to as the \emph{Goldstone theorem}~\cite{SM-Goldstone-2}.
\begin{wrapfigure}[20]{r}{0.45\textwidth} \vspace{10pt}
\centering
\includegraphics[width=0.5\textwidth]{./StandardModel/higgs_potential.eps}
\captionof{figure}[Visualization of the Goldstone potential.]{Visualization of the Goldstone potential given by \refEq{mexican_potential}. The full potential is realized by the surface created by rotating the red curve around the y-axis.} \label{fig:mexican_potential}
\end{wrapfigure}
The Goldstone model was further extended by P.\,Higgs~\cite{SM-Higgs} and others~\cite{SM-Englert,SM-Guralnik}, who described the interaction with an Abelian gauge field in the frame of the Goldstone model. When gauged, the Goldstone boson disappears and the gauge field acquires a mass. This is the famous \emph{Higgs mechanism}. It was shown later by S.\,Weinberg that the Higgs mechanism is actually necessary for tree-level unitarity of the electroweak theory (\ie renormalizability)~\cite{SM-Weinberg}. The application of the Higgs mechanism on the Glashow model was further expanded by A.\,Salam~\cite{SM-Salam-2} and today we refer to it as the Glashow-Weinberg-Salam model. They used the Higgs mechanism to generate also lepton and fermion masses, while keeping the electromagnetic interaction parity symmetric and the weak interaction parity violating.
\clearpage
\subsection{Quark model}\label{sec:SM_quark}
The picture of the Standard Model at this point in history is relying on the \SU gauge symmetry and the Higgs mechanism realized via a complex scalar doublet.
At that time, baryons and mesons were considered to be elementary particles. That was only until 1961, when M.\,Gell-Mann and independently Y.\,Neeman proposed the \emph{Eightfold way}. They noticed that the back-then known particles match an $SU(3)$ representation~\cite{SM-Gell-Mann-2,SM-Neeman}. Gell-Mann continued to work on this model, and in 1964 he used the word quark for the first time~\cite{SM-Gell-Mann-3}. Independently of him, G.\,Zweig also proposed that "Both mesons and baryons are constructed from a set of three fundamental particles"~\cite{SM-Zweig-1,SM-Zweig-2}. They postulated that quarks have only a partial charge of $\sfrac{1}{3}$ and $\sfrac{2}{3}$ and are fermions. They called the quarks \emph{up}, \emph{down} and \emph{strange}.
In the same year, S.\,Glashow and J.\,Bjorken proposed the existence of a fourth - \emph{charm} - quark. This was appealing at that time as the existence of \tauon lepton was yet to be discovered and the existence of two generations of quarks was symmetric to two generations of leptons~\cite{SM-Bjorken}.
The charm quark was later recognized by S.\,Glashow, J.\,Iliopoulos and L.\,Maiani (\emph{GIM}) to be a crucial part of the Standard Model. The problem with the existence of three quarks was the interaction of quarks with the $Z$ boson: the occurrence of strangeness-changing neutral currents was phenomenologically much smaller than expected. They added the fourth quark to the electroweak theory, allowing only for flavor-conserving neutral currents~\cite{SM-GIM}. This gave the basics to the theory of flavor-changing neutral currents, where the divergences are cut-off by a heavy quark exchange in a loop. An example of such diagrams is shown in \refFig{KaonToMuMu}.
One of the remaining problems of the theory was CP violation. The CP violation was unexpectedly observed in 1964 in the decay of \Kz mesons~\cite{SM-Cronin}. Even though the community at that time was vary of accepting the quark model (the charm quark was still yet to be discovered), M.\,Kobayashi and T.\,Maskawa proposed the existence of two more quarks~\cite{SM-CKM}. The model with two generations of quarks is CP conserving, while the proposed three generation model is not~\cite{SM-CKM}. The matrix describing the strength of flavor-changing weak interaction is called \emph{CKM} after N.\,Cabibbo\footnote{N.\,Cabibbo postulated a similar matrix with two generation of quarks~\cite{SM-Cabibbo}. The motivation for such matrix was the fact that the $u\leftrightarrow d$, $\electron\leftrightarrow\neue$ and $\muon\leftrightarrow\neum$ transitions had similar measured amplitudes. On top of that, the transitions with strangeness change one ($\Delta s = 1$) have four times larger amplitude than processes with strangeness conserved. This was solved by Cabibbo by postulating weak universality and weak mixing angle $\theta_c$.}, M.\,Kobayashi and T.\,Maskawa.
\begin{figure}[hbt!] \centering
\includegraphics[width=0.33\textwidth]{./Feynman/Kaon_box_cropped.pdf}\hspace{0.25cm}
\includegraphics[width=0.30\textwidth]{./Feynman/Kaon_penguin1.pdf}\hspace{0.25cm}
\includegraphics[width=0.30\textwidth]{./Feynman/Kaon_penguin2.pdf}
\captionof{figure}[Feynman diagrams of kaon decay to a $\mu\mu$ pair including \cquark contribution]
{
Feynman diagrams of kaon decay to $\mu\mu$ including \cquark-quark contribution. They were described in\,\cite{SM-Gaillard}. Note that there is also a long distnace contribution from $\KL\to\g\g\to\mup\mun$. \label{fig:KaonToMuMu}
}
\end{figure}
\subsection[\texorpdfstring{${b\rightarrow s l^-l^+}$}{b to sll} transitions]{\texorpdfstring{$\boldsymbol{b\rightarrow s l^-l^+}$}{b to sll} transitions}\label{sec:SM_bsll}
The exchange of heavy quarks in loops in flavor-changing neutral currents (FCNC) is a great tool to probe New Physics at high energies. The loops are sensitive to heavy particles and precision measurement of such processes could lead us to New Physics discovery, similarly as the kaon decay to muons led to the discovery of the charm quark. Higher-order transitions, such as the $\decay{\bquark}{\squark l^-l+}$ transition, are sensitive to New Physics, as they are even more suppressed by the GIM mechanism. The price to pay is that the interactions are rather rarely occurring. The typical decay rate for such a transition is $10^{-6}$. These processes are then referred to as \emph{rare decays}.
Experimentally reachable example of such higher-order FCNC interaction are \bsll transitions. They occur through \emph{box} and \emph{penguin} diagrams, as shown in \refFig{penguin_bsll}.
\begin{figure}[htb!] \centering
\includegraphics[width=0.42\textwidth]{./Feynman/bsll_box.pdf}\hspace{1cm}
\includegraphics[width=0.42\textwidth]{./Feynman/bsll_penguin.pdf}
\captionof{figure}[Feynman diagrams of a \bsll transition.]
{
Feynman diagrams of a \bsll transition. The diagram on the left is referred to as \emph{box} diagram, the right diagram is called \emph{penguin} diagram. \label{fig:penguin_bsll}
}
\end{figure}
The processed are mediated by \g, \Wpm and \Z bosons. One of the experimentally observable variables is the invariant mass squared of the lepton pair, \qsq, as shown in \refFig{q2_theory}. The \bsll transition is dominated by several very different processes depending on the \qsq value. There are two problematic regions of \qsq: around 9\gevgev and 14\gevgev. In these regions, the process is dominated by a tree-level diagram of \bquark\to\squark\jpsi and \bquark\to\squark\psitwos, where \jpsi or \psitwos decays into two leptons. As this region is dominated by a process with different physics both theory and experiment typically omits these regions in their predictions or measurements.
\begin{figure}[htb!] \centering
\includegraphics[width=0.46\textwidth]{./StandardModel/q2regions.pdf}
\captionof{figure}[Decay rate of \bsll transition in depence on \qsq.]{
Decay rate of \bsll transition in depence on \qsq. In the low \qsq region, the decay rate is dominated by the penguin diagram with photon exchange. With increasing \qsq, contribution of other processes increases, until the decay rate is dominated by the \jpsi and \psitwos charm resonances. At very high \qsq, the decay rate is dominated by long distance contributions. For the details about the $C_i\left(\mu\right)$ variables see \refEq{EffHam}. \label{fig:q2_theory}
}
\end{figure}
\begin{figure}[htb!] \centering
\begin{minipage}{0.4\textwidth}
\centering
\includegraphics[width=1.0\textwidth]{./Feynman/bsll_box.pdf}\\
\includegraphics[width=0.1\textwidth]{./Others/plus_operator.png}\\
\includegraphics[width=1.0\textwidth]{./Feynman/bsll_penguin.pdf}
\end{minipage}
\begin{minipage}{0.1\textwidth}
\centering
\includegraphics[width=0.7\textwidth]{./Others/Rightarrow.png}
\end{minipage}
\begin{minipage}{0.4\textwidth}
\centering
\includegraphics[width=1.0\textwidth]{./Feynman/bsll_eff.pdf}
\end{minipage}
\captionof{figure}[Feynman diagrams of an effective \bsll transition.] {
Feynman diagrams of an effective \bsll transition. Instead of looking at the interaction as a set of diagrams, we can describe the \bsll transition as a point-like four-fermion interaction. \label{fig:bsll_eff}
}
\end{figure}
Similarly as Fermi described the beta decay as one interaction of four fermions, one can apply this simplification also on these processes. The exchanged energy (smaller than the mass of the \Bu meson) is much lower than the energy scale of the quantum loop (mass of the $W$ boson). Therefore, instead of looking at the interaction from the Standard Model point of view illustrated in \refFig{penguin_bsll}, one can look at it as a point interaction, as shown in \refFig{bsll_eff}.
This description is commonly referred to as \emph{effective theory}. The effective Hamiltonian of \bsll transition can be expressed as:
\begin{equation}\label{eq:EffHam}
\mathcal{H}_{eff} = -\frac{4G_F}{\sqrt{2}}V_{tb}V_{ts}^{*}\frac{\alpha_e}{4\pi}\sum_{i}C_i\left(\mu\right)\mathcal{O}_i\left(\mu\right)\,,
\end{equation}
where $G_F$ is the weak decay constant, $V_{tb}V_{ts}^{*}$ are the CKM matrix elements describing the $\bquark\to\tquark$ and $\tquark\to\squark$ transitions (the contributions of the \uquark and \cquark quarks to the loop is negligible), $\alpha_e$ is fine-structure constant, and $\sfrac{1}{4\pi}$ comes from the loop suppression. Wilson coefficients $C_i\left(\mu\right)$ contain all information about short-distance physics in the transition above the renormalization scale $\mu$.
% \todo[inline]{Add something about $\mu=M_W$ (matching scale),+ calculation at low-energy scale of $\mu\sim m_b$?} %we need calculation at low-energy scale
The operators $\mathcal{O}_i\left(\mu\right)$ are local four-fermion operators with different Lorentz structures. These currents are all left-handed. Formally, the right-handed $\mathcal{O}'_i$ currents contribute to the Hamiltonian too, however they are very suppressed in the Standard Model due to the parity violating nature of the weak interaction described earlier.
Looking at FCNC transitions,
% $q\to q l^+l^-$ transition, different processes are sensitive to different operators: \Ope1 and \Ope2 are current-current (tree-level) operators, \Ope3 ~- \Ope6 (QCD penguin operators) describe the interactions with gluons and are sensitive to $b \to s qq$ transitions.
the operator \Ope7 describes the photon contribution to the decay rate and is constrained by radiative decays of $q\to q l^+l^-$ transitions at small \qsq. The operators \Ope9 and \Ope10 are V and A currents, respectively. The operator \Ope8 describes gluon contribution to the diagrams. Assuming the SM scale $\mu=M_W$, \Ope8 vanishes in the Standard Model~\cite{SM-Buchalla}. The operators are given in \refEq{operators}.
%For \bsll transitions, the \emph{electroweak penguin operators} \Ope7, \Ope8, \Ope9 and \Ope10 are then of importance. The other operators either do not (significantly) contribute to the decay or are strongly constrained by other processes. The operator \Ope7 describes the photon contribution to the decay rate and is constrained by radiative decays of $q\to q l^+l^-$ transitions at small \qsq. The operators \Ope9 and \Ope10 are V and A currents respectively. The operator \Ope8 describes gluon contribution to the diagrams. Assuming the SM scale $\mu=M_W$, \Ope8 vanishes in the Standard Model~\cite{SM-Buchalla}. The operators are given in \refEq{operators}.
In the \refEq{operators},
$e$ is the elementary charge,
$g$ is the strong coupling constant, and
$m_b$ is the running \bquark quark mass.
The matrices are denoted as follows:
$\sigma_{\mu \nu}$ are Pauli matrices,
$\gamma_{\nu,5}$ are Dirac matrices and
$\lambda^a$ are Gell-Mann matrices.
The quark fields are denoted $\squarkbar$, $\bquark$,
the muon fields $\mu$, $\bar{\mu}$,
while $ G^{\mu \nu \, a}$ is the gluon field tensor.
The electromagnetic tensor is denoted $ F^{\mu \nu}$.
%
%\begin{array}{rl}
% O^u_1 = & (\bar{s}_L \gamma_{\mu} T^a u_L) (\bar{u}_L \gamma^{\mu} T^a b_L),
% \vspace{0.2cm} \\
% O^u_2 = & (\bar{s}_L \gamma_{\mu} u_L) (\bar{u}_L \gamma^{\mu} b_L),
% \vspace{0.2cm} \\
% O^c_1 = & (\bar{s}_L \gamma_{\mu} T^a c_L) (\bar{c}_L \gamma^{\mu} T^a b_L),
% \vspace{0.2cm} \\
% O^c_2 = & (\bar{s}_L \gamma_{\mu} c_L) (\bar{c}_L \gamma^{\mu} b_L),
% \vspace{0.2cm} \\
% O_3 = & (\bar{s}_L \gamma_{\mu} b_L) \sum_q (\bar{q}\gamma^{\mu} q),
% \vspace{0.2cm} \\
% O_4 = & (\bar{s}_L \gamma_{\mu} T^a b_L) \sum_q (\bar{q}\gamma^{\mu} T^a q),
% \vspace{0.2cm} \\
% O_5 = & (\bar{s}_L \gamma_{\mu_1}
% \gamma_{\mu_2}
% \gamma_{\mu_3} b_L)\sum_q (\bar{q} \gamma^{\mu_1}
% \gamma^{\mu_2}
% \gamma^{\mu_3} q),
% \vspace{0.2cm} \\
% O_6 = & (\bar{s}_L \gamma_{\mu_1}
% \gamma_{\mu_2}
% \gamma_{\mu_3} T^a b_L)\sum_q (\bar{q} \gamma^{\mu_1}
% \gamma^{\mu_2}
% \gamma^{\mu_3} T^a q),
% \vspace{0.2cm} \\
% O_7 = & \f{e}{g^2} m_b (\bar{s}_L \sigma^{\mu \nu} b_R) F_{\mu \nu},
% \vspace{0.2cm} \\
% O_8 = & \f{1}{g} m_b (\bar{s}_L \sigma^{\mu \nu} T^a b_R) G_{\mu \nu}^a,
% \vspace{0.2cm} \\
% O_9 = & \f{e^2}{g^2} (\bar{s}_L \gamma_{\mu} b_L) \sum_l
% (\bar{l}\gamma^{\mu} l),
% \vspace{0.2cm} \\
% O_{10} = & \f{e^2}{g^2} (\bar{s}_L \gamma_{\mu} b_L) \sum_l
% (\bar{l} \gamma^{\mu} \gamma_5 l),
%\end{array}
%assuming mu = M_W, C_8 and C_10 vanish
\begin{align}\label{eq:operators} \begin{split}
{\mathcal{O}}_{7} &= \frac{e}{g^2} m_b
(\bar{s} \sigma_{\mu \nu} \frac{1+\gamma_5}{2} b) F^{\mu \nu}\,, \\
{\mathcal{O}}_{8} &= \frac{1}{g} m_b
(\bar{s} \sigma_{\mu \nu} \frac{\lambda^a}{2} \frac{1+\gamma_5}{2} b) G^{\mu \nu \, a}\,, \\
{\mathcal{O}}_{9} &= \frac{e^2}{g^2}
(\bar{s} \gamma_{\mu} \frac{1-\gamma_5}{2} b)(\bar{\mu} \gamma^\mu \mu)\,,\\
{\mathcal{O}}_{10} &=\frac{e^2}{g^2}
(\bar{s} \gamma_{\mu} \frac{1-\gamma_5}{2} b)( \bar{\mu} \gamma^\mu \gamma_5 \mu)\,.\\
\end{split}\end{align}
These operators in combination with corresponding Wilson coefficients dominate in different \qsq regions, as illustrated in \refFig{q2_theory}. As the effective theory allows for any kind of interaction, it can also describe New Physics contributions. If the measured Wilson coefficients values are different to SM expectations, it means the contribution of different SM processes is accompanied by New Physics process.
%The top right diagram, a so called penguin diagram, is related to both C 9 and C 10 .
%The Wilson coefficient C 9 is related to vector like coupling, whereas C 10 is related
%to axial vector like coupling. Therefore the decay described by a virtual photon is
%only possible in the case of C 9 . The bottom diagram, a so called box diagram, is
%associated to both C 9 and C 10 .
Unfortunately, this theory describes free quarks. In experiments, the quarks are bound by the strong force as depicted in \refFig{bsll_meson}, described by non-perturbative quantum chromodynamics (QCD)\footnote{QCD is the theory of the strong interaction between quarks and gluons.}. Despite the fact these effects are very hard to compute, there are several tools that provide these calculations. The most widely used tools are Lattice QCD~\cite{SM-Wilson} and Light-Cone-Sum-Rules (LCSR)~\cite{SM-LCSM}. Moreover, the calculations based on QCD factorisation~\cite{SM-QCDF} are typically performed for low \qsq, for high \qsq (\qsq $\gtrsim$~15\gevgev) the Operator Product Expansion~\cite{SM-OPE} is used.
\begin{figure}[htb!] \centering
\includegraphics[width=0.49\textwidth]{./Feynman/bsll_eff_meson.pdf}
\includegraphics[width=0.49\textwidth]{./Feynman/bsll_eff_meson_charm.pdf} \vspace{-50pt}
\captionof{figure}[ Feynman diagrams of an effective \bsll transition in a meson.]
{
Feynman diagrams of an effective \BuToKstmm transition. The left diagram shows the \bsll process in the context of the interacting quark being bound in a \Bu meson decaying into \Kstarp\mup\mun. There is also a non-factorizable contribution from charm loops, as shown on the right. Even though its contribution is much smaller, it needs to be correctly treated too. \label{fig:bsll_meson}
}
\end{figure}
\clearpage
It is very hard to disentangle QCD processes from the \bsll transition, however by choosing a convenient basis and variables, the form-factor influence can be at least removed at the first order (this will be described later in \refSec{ANA_Theo_BR}). This is a limiting factor in many theory predictions for this process, even though the calculations are constantly improved. %Lattice QCD, Light-Cone-Sum-Rules, operator product expansions, heavy quark expansion, QCD factorisation, Soft Collinear Effective theory and Chiral perturbation theory.
Besides the challenging form-factor contributions, there is another non-factorizable contribution: charm loops~\cite{SM-CharmLoop}. The process is depicted in \refFig{bsll_meson} on the right. A charm loop is coupled to the \bquark and \squark quarks and to a virtual photon decaying to the muon pair. Their contribution is much smaller than the one of form-factors. However, with increasing precision of both measurements and QCD calculations, their effect becomes significant. Additional gluons can come into play, making the theoretical calculations even harder. This process is included in the \C9 Wilson coefficient and it is therefore important to separate this effect from possible contributions of physics beyond the Standard Model.
\subsubsection{New Physics}\label{sec:SM_NP}
As the effective theory allows for any kind of interaction, it can also describe New Physics (NP) contributions at large energy scale: they can be integrated out similarly to the electroweak bosons. Other operators that are negligible in the SM can contribute to the effective hamiltonian. An example of such operators $\mathcal{O}_S$, $\mathcal{O}_P$ and $\mathcal{O}_T$ that can bee added \eg from Higgs penguins\footnote{Higgs penguin is a Higgs to fermion-antifermion transition.} is listed in \refEq{operators_BSM}.
%
\begin{align}\label{eq:operators_BSM}\begin{split}
\mathcal{O}_S &= \left( \bar{s} \frac{1+\gamma_5}{2} b \right) \left( \bar{\mu} \mu\right)\,,\\
\mathcal{O}_P &= \left( \bar{s} \frac{1+\gamma_5}{2} b \right) \left( \bar{\mu} \gamma_5 \mu\right)\,,\\
\mathcal{O}_T &= \bar{s} \sigma_{\mu\nu} b \bar{\mu} \sigma^{\mu\nu} \mu
\end{split}
\end{align}
%An example of such a process can be a box diagram as on the left of \refFig{penguin_bsll}) with the exchange of two charged Higgs bosons instead of two $\W$s.
Moreover, the right-handed Wilson coefficients $C^{'}_i$ come into play. Different decays are sensitive to different coefficients, as presented in \refTab{SM_Wilson_sensitivity}. From the table it becomes clear that the decay of \BuToKstmm is sensitive to most of the coefficients.
\begin{table}[hbt!] \centering
\begin{tabular}{l|cccc}
Decay & \C7 & \C9 & \C10 & $C_{S,P}$ \\ \hline
$\B\to\left(X_s,\Kstar\right)\g$ & \checkmark & \texttimes & \texttimes & \texttimes \\
$\B\to\left(X_s,\Kstar\right)\ellm\ellp$ & \checkmark & \checkmark & \checkmark & \texttimes \\
$\Bs\to\mumu$ & \texttimes & \texttimes & \checkmark & \checkmark \\
\end{tabular}
\captionof{table}[Sensitivity of Wilson coefficient for different decays.]{Sensitivity of Wilson coefficient for different decays, where $X_s$ stands for any inclusive decay with an \squark quark.
\label{tab:SM_Wilson_sensitivity}
}
\end{table}
\clearpage
In most of the measurements with the potential to constrain New Physics, there is a good agreement with the SM. However, in several previous measurements, tensions of a few standard deviations appear. These measurements are discussed later in \refSec{ANA_previous}. All these tensions are in the order of 2-3 standard deviations away from the Standard Model prediction. However, they are all consistent with each other, hinting at possible New Physics contribution to the Wilson coefficients \C9 and \C10.
%branching ratio of \Bs\to\mumu (Nature 522 (2015) 68), branching ratios of $\B\to X_s\g$ (1301.0836), CP assymetry of \Bd\to\Kstarz\g (https://cds.cern.ch/record/
%1424352) and isospin assymetry of \B\to\Kstarz\g (0906.2177)
%C10 vs C9 State of New Physics in b − s transitions,(2015) arXiv:1411.3161, [1903.09578]
There are numerous NP approaches to explain these tensions, including the supersymmetric theory or the string theory. Among the currently most discussed theories are \emph{portal}, \emph{loop}, and \emph{leptoquark} models. \emph{Portal models} assume a particle responsible for the tensions that can be also involved in the dark matter production in the early universe~\cite{NP-portals,NP-portals2,NP-portals3}. This is typically a \Zprime boson. In these models, the \Zprime boson contributes to the operator \Ope{9} (and sometimes to the operator \Ope{10} ) with flavor violating couplings to quarks and non-universal couplings to leptons. Portal models provide corrections to \C9, however they also mean unwanted contributions to other Wilson coefficients. An example of a Feynman graph with a \Zprime boson is in \refFig{feynman-NP}, left.
%
\emph{Loop} models postulate that the NP contribution comes from loops containing particles. These particles are in some cases potential dark matter candidates~\cite{NP-loops,NP-loops2, NP-loops3}. An example of such a hypothetical loop is in \refFig{feynman-NP}, middle.
%
\emph{Leptoquark} models assume the existence of two (or more) leptoquarks: a particle carrying both lepton and baryon number, allowing leptons and quarks to interact directly, as shown in \refFig{feynman-NP}, right. This model can answer the question why are neutrinos massive, but it can also explain some of the tensions in \bsll decays~\cite{NP-Leptoquarks0, NP-Leptoquarks, NP-Leptoquarks2, NP-Leptoquarks3, NP-Leptoquarks4}. There are also other models combing these approaches~\cite{SM-leptoquarkportal-1,SM-leptoquarkportal-2,SM-leptoquarkportal-3}.
\begin{figure}[htb!] \centering
\includegraphics[width=0.34\textwidth]{./Feynman/bsll_Zprime.pdf} \hspace{-15pt}
\includegraphics[width=0.34\textwidth]{./Feynman/bsll_DM.pdf}\hspace{-15pt}
\includegraphics[width=0.34\textwidth]{./Feynman/bsll_Leptoquark.pdf}
\captionof{figure}[Potential New Physics Feynman diagrams.]
{
Potential New Physics Feynman diagrams. On the left, Feynman diagram with a potential \Zprime gauge boson, in the middle possible loop diagram with a contribution from a dark model particles, on the right diagram of an \bsll interaction through leptoquarks. \label{fig:feynman-NP}
}
\end{figure}
\clearpage

26
Chapters/Toys/jobs/614.tex

@ -0,0 +1,26 @@
\begin{figure}[hbt!]
\centering
\includegraphics[width=0.31\textwidth]{FCNC/Toys/614/Fl_bin0_Pulls.eps}
\includegraphics[width=0.31\textwidth]{FCNC/Toys/614/S3_bin0_Pulls.eps}
\includegraphics[width=0.31\textwidth]{FCNC/Toys/614/S4_bin0_Pulls.eps}\\
\includegraphics[width=0.31\textwidth]{FCNC/Toys/614/S5_bin0_Pulls.eps}
\includegraphics[width=0.31\textwidth]{FCNC/Toys/614/Afb_bin0_Pulls.eps}
\includegraphics[width=0.31\textwidth]{FCNC/Toys/614/S7_bin0_Pulls.eps}\\
\includegraphics[width=0.31\textwidth]{FCNC/Toys/614/S8_bin0_Pulls.eps}
\includegraphics[width=0.31\textwidth]{FCNC/Toys/614/S9_bin0_Pulls.eps}
\captionof{figure}[Pull distributions of the \pwave angular moments in reference-like pseudoexperiments.]{Pull distributions of the \pwave angular moments. 500 pseudoexperiments have been generated, mimicking the refrence \BuToKstJpsi decay. Each pseudoexperiment consists of 65\,000 pseudoevents.}\label{fig:toys-Ref-pull-P}
\end{figure}
%-----------------------------------------
\begin{figure}[hbt!]
\includegraphics[width=0.31\textwidth]{FCNC/Toys/614/FS_bin0_Pulls.eps}
\includegraphics[width=0.31\textwidth]{FCNC/Toys/614/SS1_bin0_Pulls.eps}
\includegraphics[width=0.31\textwidth]{FCNC/Toys/614/SS2_bin0_Pulls.eps}\\
\includegraphics[width=0.31\textwidth]{FCNC/Toys/614/SS3_bin0_Pulls.eps}
\includegraphics[width=0.31\textwidth]{FCNC/Toys/614/SS4_bin0_Pulls.eps}
\includegraphics[width=0.31\textwidth]{FCNC/Toys/614/SS5_bin0_Pulls.eps}
\captionof{figure}[Pull distribution of the \swave and interference angular moments in reference-like pseudoexperiments.]{Pull distributions of the \swave angular moments. 500 pseudoexperiments have been generated, mimicking the refrence \BuToKstJpsi decay. Each pseudoexperiment consists of 65\,000 pseudoevents.}\label{fig:toys-Ref-pull-S}
\end{figure}

32
Chapters/Toys/jobs/pull_table_631_col.tex

@ -0,0 +1,32 @@
\begin{table}[hbt!] \footnotesize \centering
\begin{tabular}{|l|c c c c c|}
\multicolumn{6}{c}{\textbf{means}}\\ \hline
\textbf{parameter} &[0.25--4.00] &[4.00--8.00] &[11.00--12.50] &[15.00--18.00] &[1.10--6.00]\\
\hline
$F_{L}$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.061 \pm 0.046$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.023 \pm 0.045$ &\cellcolor[HTML]{c47f51} $\phantom{-}0.421 \pm 0.048$ &\cellcolor[HTML]{f0fea2} $\phantom{-}0.378 \pm 0.046$ &\cellcolor[HTML]{5FA55F} $-0.045 \pm 0.045$\\
$S_{3}$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.046 \pm 0.043$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.050 \pm 0.041$ &\cellcolor[HTML]{5FA55F} $-0.003 \pm 0.045$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.094 \pm 0.046$ &\cellcolor[HTML]{5FA55F} $-0.012 \pm 0.042$\\
$S_{4}$ &\cellcolor[HTML]{5FA55F} $-0.016 \pm 0.044$ &\cellcolor[HTML]{a8d281} $\phantom{-}0.187 \pm 0.042$ &\cellcolor[HTML]{a8d281} $\phantom{-}0.169 \pm 0.045$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.046 \pm 0.046$ &\cellcolor[HTML]{5FA55F} $-0.023 \pm 0.045$\\
$S_{5}$ &\cellcolor[HTML]{5FA55F} $-0.006 \pm 0.044$ &\cellcolor[HTML]{c47f51} $\phantom{-}0.404 \pm 0.042$ &\cellcolor[HTML]{f0fea2} $\phantom{-}0.363 \pm 0.046$ &\cellcolor[HTML]{f0fea2} $\phantom{-}0.325 \pm 0.046$ &\cellcolor[HTML]{a8d281} $\phantom{-}0.115 \pm 0.039$\\
$A_{FB}$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.088 \pm 0.043$ &\cellcolor[HTML]{86d2a8} $-0.187 \pm 0.043$ &\cellcolor[HTML]{9ae9cd} $-0.252 \pm 0.048$ &\cellcolor[HTML]{Adfff1} $-0.355 \pm 0.044$ &\cellcolor[HTML]{5FA55F} $-0.018 \pm 0.045$\\
$S_{7}$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.062 \pm 0.043$ &\cellcolor[HTML]{5FA55F} $-0.033 \pm 0.040$ &\cellcolor[HTML]{5FA55F} $-0.067 \pm 0.041$ &\cellcolor[HTML]{5FA55F} $-0.030 \pm 0.044$ &\cellcolor[HTML]{5FA55F} $-0.046 \pm 0.043$\\
$S_{8}$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.023 \pm 0.045$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.065 \pm 0.040$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.069 \pm 0.045$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.052 \pm 0.043$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.031 \pm 0.043$\\
$S_{9}$ &\cellcolor[HTML]{5FA55F} $-0.040 \pm 0.042$ &\cellcolor[HTML]{5FA55F} $-0.013 \pm 0.039$ &\cellcolor[HTML]{5FA55F} $-0.015 \pm 0.042$ &\cellcolor[HTML]{86d2a8} $-0.102 \pm 0.043$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.014 \pm 0.041$\\
\hline
\multicolumn{6}{c}{}\\
\multicolumn{6}{c}{\textbf{widths}}\\ \hline
\textbf{parameter} &[0.25--4.00] &[4.00--8.00] &[11.00--12.50] &[15.00--18.00] &[1.10--6.00]\\
\hline
$F_{L}$ &\cellcolor[HTML]{5FA55F} $\phantom{-}1.027 \pm 0.032$ &\cellcolor[HTML]{5FA55F} $\phantom{-}1.002 \pm 0.032$ &\cellcolor[HTML]{5FA55F} $\phantom{-}1.064 \pm 0.034$ &\cellcolor[HTML]{5FA55F} $\phantom{-}1.011 \pm 0.032$ &\cellcolor[HTML]{5FA55F} $\phantom{-}1.009 \pm 0.032$\\
$S_{3}$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.966 \pm 0.031$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.916 \pm 0.029$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.988 \pm 0.032$ &\cellcolor[HTML]{5FA55F} $\phantom{-}1.020 \pm 0.033$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.935 \pm 0.030$\\
$S_{4}$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.970 \pm 0.031$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.931 \pm 0.030$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.965 \pm 0.032$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.995 \pm 0.033$ &\cellcolor[HTML]{5FA55F} $\phantom{-}1.010 \pm 0.032$\\
$S_{5}$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.993 \pm 0.031$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.927 \pm 0.029$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.979 \pm 0.033$ &\cellcolor[HTML]{5FA55F} $\phantom{-}1.001 \pm 0.032$ &\cellcolor[HTML]{86d2a8} $\phantom{-}0.865 \pm 0.027$\\
$A_{FB}$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.960 \pm 0.031$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.958 \pm 0.030$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.959 \pm 0.034$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.925 \pm 0.031$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.993 \pm 0.032$\\
$S_{7}$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.960 \pm 0.030$ &\cellcolor[HTML]{86d2a8} $\phantom{-}0.885 \pm 0.028$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.907 \pm 0.029$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.968 \pm 0.031$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.951 \pm 0.030$\\
$S_{8}$ &\cellcolor[HTML]{5FA55F} $\phantom{-}1.008 \pm 0.032$ &\cellcolor[HTML]{86d2a8} $\phantom{-}0.894 \pm 0.028$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.978 \pm 0.032$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.950 \pm 0.030$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.950 \pm 0.030$\\
$S_{9}$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.928 \pm 0.029$ &\cellcolor[HTML]{86d2a8} $\phantom{-}0.865 \pm 0.027$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.933 \pm 0.030$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.956 \pm 0.031$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.913 \pm 0.029$\\
\hline
\end{tabular}
\captionof{table}[The angular moments pull distribution properties in rare-like pseudoexperiments, \swave fixed to zero.]{The means and widths of the pull distributions in rare-like pseudoexperiments. 500 pseudoexperiments have been generated, mimicking the rare \BuToKstmm decay. The pseudoexperiments are generated with the $F_S=0.25$ and with non-zero interference moments $S_{S_i}$. In the fit, the $F_S$ and $S_{Si}$ moments are set to zero. The color scheme ranges from red (overestimation by 50\% of the uncertainty) through green (ideal value) to blue (underestimation by 50\% of the uncertainty), changing in steps of 10\% for readers convenience.} \label{tab:toys-Sig-pull-631}
\end{table}

48
Chapters/Toys/jobs/pull_table_632_fld.tex

@ -0,0 +1,48 @@
\begin{table}[hbt!] \footnotesize \centering
\begin{tabular}{|l|c c c c c|}
\multicolumn{6}{c}{\textbf{means}}\\ \hline
\textbf{parameter} &[0.25--4.00] &[4.00--8.00] &[11.00--12.50] &[15.00--18.00] &[1.10--6.00]\\
\hline
$F_{L}$ (0)& $-0.029 \pm 0.047$ & $-0.025 \pm 0.047$ & $\phantom{-}0.218 \pm 0.043$ & $\phantom{-}0.327 \pm 0.044$ & $-0.089 \pm 0.047$\\
$F_{L}$ (1)& $\phantom{-}0.136 \pm 0.049$ & $\phantom{-}0.074 \pm 0.047$ & $\phantom{-}0.092 \pm 0.047$ & $\phantom{-}0.214 \pm 0.045$ & $\phantom{-}0.049 \pm 0.043$\\
$F_{L}$ (2)& $\phantom{-}0.130 \pm 0.049$ & $-0.016 \pm 0.051$ & $\phantom{-}0.179 \pm 0.043$ & $\phantom{-}0.234 \pm 0.047$ & $\phantom{-}0.014 \pm 0.047$\\
$F_{L}$ (3)& $\phantom{-}0.064 \pm 0.045$ & $-0.082 \pm 0.052$ & $\phantom{-}0.200 \pm 0.042$ & $\phantom{-}0.157 \pm 0.042$ & $-0.040 \pm 0.043$\\
$F_{L}$ (4)& $\phantom{-}0.080 \pm 0.047$ & $\phantom{-}0.129 \pm 0.055$ & $\phantom{-}0.367 \pm 0.066$ & $\phantom{-}0.478 \pm 0.067$ & $-0.024 \pm 0.046$\\
$S_{3}$ (0)& $\phantom{-}0.012 \pm 0.041$ & $\phantom{-}0.047 \pm 0.042$ & $\phantom{-}0.094 \pm 0.045$ & $\phantom{-}0.304 \pm 0.046$ & $\phantom{-}0.006 \pm 0.043$\\
$S_{3}$ (1)& $\phantom{-}0.082 \pm 0.043$ & $\phantom{-}0.043 \pm 0.040$ & $\phantom{-}0.070 \pm 0.038$ & $\phantom{-}0.211 \pm 0.042$ & $-0.037 \pm 0.043$\\
$S_{3}$ (2)& $\phantom{-}0.009 \pm 0.044$ & $\phantom{-}0.011 \pm 0.041$ & $\phantom{-}0.076 \pm 0.039$ & $\phantom{-}0.244 \pm 0.046$ & $-0.039 \pm 0.046$\\
$S_{3}$ (3)& $-0.034 \pm 0.044$ & $\phantom{-}0.066 \pm 0.039$ & $\phantom{-}0.054 \pm 0.039$ & $\phantom{-}0.185 \pm 0.042$ & $\phantom{-}0.024 \pm 0.040$\\
$S_{3}$ (4)& $-0.048 \pm 0.038$ & $\phantom{-}0.084 \pm 0.035$ & $\phantom{-}0.089 \pm 0.040$ & $\phantom{-}0.273 \pm 0.044$ & $\phantom{-}0.021 \pm 0.040$\\
$S_{4}$ & $\phantom{-}0.046 \pm 0.044$ & $\phantom{-}0.190 \pm 0.044$ & $\phantom{-}0.172 \pm 0.041$ & $\phantom{-}0.043 \pm 0.044$ & $\phantom{-}0.105 \pm 0.043$\\
$S_{5}$ & $\phantom{-}0.022 \pm 0.045$ & $\phantom{-}0.391 \pm 0.046$ & $\phantom{-}0.344 \pm 0.045$ & $\phantom{-}0.263 \pm 0.050$ & $\phantom{-}0.181 \pm 0.046$\\
$A_{FB}$ & $\phantom{-}0.025 \pm 0.044$ & $-0.093 \pm 0.042$ & $-0.335 \pm 0.043$ & $-0.396 \pm 0.044$ & $-0.038 \pm 0.045$\\
$S_{7}$ & $\phantom{-}0.066 \pm 0.042$ & $\phantom{-}0.043 \pm 0.040$ & $-0.032 \pm 0.041$ & $\phantom{-}0.065 \pm 0.044$ & $\phantom{-}0.054 \pm 0.042$\\
$S_{8}$ & $-0.022 \pm 0.041$ & $\phantom{-}0.034 \pm 0.038$ & $-0.003 \pm 0.045$ & $\phantom{-}0.045 \pm 0.051$ & $-0.030 \pm 0.041$\\
$S_{9}$ & $-0.049 \pm 0.041$ & $-0.035 \pm 0.043$ & $\phantom{-}0.007 \pm 0.041$ & $-0.004 \pm 0.047$ & $-0.043 \pm 0.040$\\
\hline
\multicolumn{6}{c}{}\\
\multicolumn{6}{c}{\textbf{widths}}\\ \hline
\textbf{parameter} &[0.25--4.00] &[4.00--8.00] &[11.00--12.50] &[15.00--18.00] &[1.10--6.00]\\
\hline
$F_{L}$ (0)& $\phantom{-}1.036 \pm 0.033$ & $\phantom{-}1.045 \pm 0.033$ & $\phantom{-}0.943 \pm 0.030$ & $\phantom{-}0.974 \pm 0.031$ & $\phantom{-}1.036 \pm 0.033$\\
$F_{L}$ (1)& $\phantom{-}1.092 \pm 0.035$ & $\phantom{-}1.045 \pm 0.033$ & $\phantom{-}1.042 \pm 0.033$ & $\phantom{-}1.000 \pm 0.032$ & $\phantom{-}0.962 \pm 0.030$\\
$F_{L}$ (2)& $\phantom{-}1.027 \pm 0.035$ & $\phantom{-}1.064 \pm 0.036$ & $\phantom{-}0.895 \pm 0.031$ & $\phantom{-}0.971 \pm 0.033$ & $\phantom{-}0.988 \pm 0.034$\\
$F_{L}$ (3)& $\phantom{-}0.996 \pm 0.032$ & $\phantom{-}1.168 \pm 0.037$ & $\phantom{-}0.938 \pm 0.030$ & $\phantom{-}0.927 \pm 0.029$ & $\phantom{-}0.971 \pm 0.031$\\
$F_{L}$ (4)& $\phantom{-}1.024 \pm 0.033$ & $\phantom{-}1.190 \pm 0.039$ & $\phantom{-}1.340 \pm 0.047$ & $\phantom{-}1.376 \pm 0.048$ & $\phantom{-}0.997 \pm 0.032$\\
$S_{3}$ (0)& $\phantom{-}0.905 \pm 0.029$ & $\phantom{-}0.928 \pm 0.030$ & $\phantom{-}0.991 \pm 0.032$ & $\phantom{-}1.003 \pm 0.032$ & $\phantom{-}0.955 \pm 0.031$\\
$S_{3}$ (1)& $\phantom{-}0.971 \pm 0.031$ & $\phantom{-}0.895 \pm 0.028$ & $\phantom{-}0.851 \pm 0.027$ & $\phantom{-}0.926 \pm 0.029$ & $\phantom{-}0.961 \pm 0.030$\\
$S_{3}$ (2)& $\phantom{-}0.918 \pm 0.031$ & $\phantom{-}0.858 \pm 0.029$ & $\phantom{-}0.804 \pm 0.027$ & $\phantom{-}0.945 \pm 0.032$ & $\phantom{-}0.949 \pm 0.032$\\
$S_{3}$ (3)& $\phantom{-}0.976 \pm 0.031$ & $\phantom{-}0.873 \pm 0.028$ & $\phantom{-}0.867 \pm 0.028$ & $\phantom{-}0.936 \pm 0.030$ & $\phantom{-}0.904 \pm 0.029$\\
$S_{3}$ (4)& $\phantom{-}0.829 \pm 0.027$ & $\phantom{-}0.762 \pm 0.025$ & $\phantom{-}0.872 \pm 0.028$ & $\phantom{-}0.964 \pm 0.031$ & $\phantom{-}0.882 \pm 0.028$\\
$S_{4}$ & $\phantom{-}0.992 \pm 0.031$ & $\phantom{-}0.984 \pm 0.031$ & $\phantom{-}0.913 \pm 0.029$ & $\phantom{-}0.987 \pm 0.031$ & $\phantom{-}0.960 \pm 0.030$\\
$S_{5}$ & $\phantom{-}0.939 \pm 0.032$ & $\phantom{-}0.950 \pm 0.032$ & $\phantom{-}0.921 \pm 0.032$ & $\phantom{-}1.041 \pm 0.036$ & $\phantom{-}0.968 \pm 0.033$\\
$A_{FB}$ & $\phantom{-}0.955 \pm 0.031$ & $\phantom{-}0.923 \pm 0.030$ & $\phantom{-}0.890 \pm 0.031$ & $\phantom{-}0.945 \pm 0.031$ & $\phantom{-}0.994 \pm 0.032$\\
$S_{7}$ & $\phantom{-}0.932 \pm 0.030$ & $\phantom{-}0.886 \pm 0.028$ & $\phantom{-}0.904 \pm 0.029$ & $\phantom{-}0.978 \pm 0.031$ & $\phantom{-}0.938 \pm 0.030$\\
$S_{8}$ & $\phantom{-}0.904 \pm 0.029$ & $\phantom{-}0.828 \pm 0.027$ & $\phantom{-}0.970 \pm 0.032$ & $\phantom{-}1.101 \pm 0.036$ & $\phantom{-}0.903 \pm 0.029$\\
$S_{9}$ & $\phantom{-}0.896 \pm 0.029$ & $\phantom{-}0.959 \pm 0.031$ & $\phantom{-}0.902 \pm 0.029$ & $\phantom{-}1.027 \pm 0.033$ & $\phantom{-}0.879 \pm 0.028$\\
\hline
\end{tabular}
\captionof{table}[The angular moments pull distribution properties in rare-like pseudoexperiments with folding applied, \swave set to zero.]{The means and widths of the pull distributions in rare-like pseudoexperiments. 500 pseudoexperiments have been generated, mimicking the rare \BuToKstmm decay. In the fit to the pseudodata, folding is applied. The number at the parameters \FL and $S_3$ indicate the applied folding, as the two parameters can be measured using all folding techniques. } \label{tab:toys-Sig-pull-632}
\end{table}

48
Chapters/Toys/jobs/pull_table_632_fld_col.tex

@ -0,0 +1,48 @@
\begin{table}[hbt!] \footnotesize \centering
\begin{tabular}{|l|c c c c c|}
\multicolumn{6}{c}{\textbf{means}}\\ \hline
\textbf{parameter} &[0.25--4.00] &[4.00--8.00] &[11.00--12.50] &[15.00--18.00] &[1.10--6.00]\\
\hline
$F_{L}$ (0)&\cellcolor[HTML]{5FA55F} $-0.029 \pm 0.047$ &\cellcolor[HTML]{5FA55F} $-0.025 \pm 0.047$ &\cellcolor[HTML]{CCE892} $\phantom{-}0.218 \pm 0.043$ &\cellcolor[HTML]{f0fea2} $\phantom{-}0.327 \pm 0.044$ &\cellcolor[HTML]{5FA55F} $-0.089 \pm 0.047$\\
$F_{L}$ (1)&\cellcolor[HTML]{a8d281} $\phantom{-}0.136 \pm 0.049$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.074 \pm 0.047$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.092 \pm 0.047$ &\cellcolor[HTML]{CCE892} $\phantom{-}0.214 \pm 0.045$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.049 \pm 0.043$\\
$F_{L}$ (2)&\cellcolor[HTML]{a8d281} $\phantom{-}0.130 \pm 0.049$ &\cellcolor[HTML]{5FA55F} $-0.016 \pm 0.051$ &\cellcolor[HTML]{a8d281} $\phantom{-}0.179 \pm 0.043$ &\cellcolor[HTML]{CCE892} $\phantom{-}0.234 \pm 0.047$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.014 \pm 0.047$\\
$F_{L}$ (3)&\cellcolor[HTML]{5FA55F} $\phantom{-}0.064 \pm 0.045$ &\cellcolor[HTML]{5FA55F} $-0.082 \pm 0.052$ &\cellcolor[HTML]{a8d281} $\phantom{-}0.200 \pm 0.042$ &\cellcolor[HTML]{a8d281} $\phantom{-}0.157 \pm 0.042$ &\cellcolor[HTML]{5FA55F} $-0.040 \pm 0.043$\\
$F_{L}$ (4)&\cellcolor[HTML]{5FA55F} $\phantom{-}0.080 \pm 0.047$ &\cellcolor[HTML]{a8d281} $\phantom{-}0.129 \pm 0.055$ &\cellcolor[HTML]{f0fea2} $\phantom{-}0.367 \pm 0.066$ &\cellcolor[HTML]{c47f51} $\phantom{-}0.478 \pm 0.067$ &\cellcolor[HTML]{5FA55F} $-0.024 \pm 0.046$\\
$S_{3}$ (0)&\cellcolor[HTML]{5FA55F} $\phantom{-}0.012 \pm 0.041$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.047 \pm 0.042$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.094 \pm 0.045$ &\cellcolor[HTML]{f0fea2} $\phantom{-}0.304 \pm 0.046$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.006 \pm 0.043$\\
$S_{3}$ (1)&\cellcolor[HTML]{5FA55F} $\phantom{-}0.082 \pm 0.043$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.043 \pm 0.040$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.070 \pm 0.038$ &\cellcolor[HTML]{CCE892} $\phantom{-}0.211 \pm 0.042$ &\cellcolor[HTML]{5FA55F} $-0.037 \pm 0.043$\\
$S_{3}$ (2)&\cellcolor[HTML]{5FA55F} $\phantom{-}0.009 \pm 0.044$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.011 \pm 0.041$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.076 \pm 0.039$ &\cellcolor[HTML]{CCE892} $\phantom{-}0.244 \pm 0.046$ &\cellcolor[HTML]{5FA55F} $-0.039 \pm 0.046$\\
$S_{3}$ (3)&\cellcolor[HTML]{5FA55F} $-0.034 \pm 0.044$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.066 \pm 0.039$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.054 \pm 0.039$ &\cellcolor[HTML]{a8d281} $\phantom{-}0.185 \pm 0.042$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.024 \pm 0.040$\\
$S_{3}$ (4)&\cellcolor[HTML]{5FA55F} $-0.048 \pm 0.038$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.084 \pm 0.035$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.089 \pm 0.040$ &\cellcolor[HTML]{CCE892} $\phantom{-}0.273 \pm 0.044$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.021 \pm 0.040$\\
$S_{4}$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.046 \pm 0.044$ &\cellcolor[HTML]{a8d281} $\phantom{-}0.190 \pm 0.044$ &\cellcolor[HTML]{a8d281} $\phantom{-}0.172 \pm 0.041$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.043 \pm 0.044$ &\cellcolor[HTML]{a8d281} $\phantom{-}0.105 \pm 0.043$\\
$S_{5}$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.022 \pm 0.045$ &\cellcolor[HTML]{f0fea2} $\phantom{-}0.391 \pm 0.046$ &\cellcolor[HTML]{f0fea2} $\phantom{-}0.344 \pm 0.045$ &\cellcolor[HTML]{CCE892} $\phantom{-}0.263 \pm 0.050$ &\cellcolor[HTML]{a8d281} $\phantom{-}0.181 \pm 0.046$\\
$A_{FB}$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.025 \pm 0.044$ &\cellcolor[HTML]{5FA55F} $-0.093 \pm 0.042$ &\cellcolor[HTML]{Adfff1} $-0.335 \pm 0.043$ &\cellcolor[HTML]{Adfff1} $-0.396 \pm 0.044$ &\cellcolor[HTML]{5FA55F} $-0.038 \pm 0.045$\\
$S_{7}$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.066 \pm 0.042$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.043 \pm 0.040$ &\cellcolor[HTML]{5FA55F} $-0.032 \pm 0.041$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.065 \pm 0.044$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.054 \pm 0.042$\\
$S_{8}$ &\cellcolor[HTML]{5FA55F} $-0.022 \pm 0.041$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.034 \pm 0.038$ &\cellcolor[HTML]{5FA55F} $-0.003 \pm 0.045$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.045 \pm 0.051$ &\cellcolor[HTML]{5FA55F} $-0.030 \pm 0.041$\\
$S_{9}$ &\cellcolor[HTML]{5FA55F} $-0.049 \pm 0.041$ &\cellcolor[HTML]{5FA55F} $-0.035 \pm 0.043$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.007 \pm 0.041$ &\cellcolor[HTML]{5FA55F} $-0.004 \pm 0.047$ &\cellcolor[HTML]{5FA55F} $-0.043 \pm 0.040$\\
\hline
\multicolumn{6}{c}{}\\
\multicolumn{6}{c}{\textbf{widths}}\\ \hline
\textbf{parameter} &[0.25--4.00] &[4.00--8.00] &[11.00--12.50] &[15.00--18.00] &[1.10--6.00]\\
\hline
$F_{L}$ (0)&\cellcolor[HTML]{5FA55F} $\phantom{-}1.036 \pm 0.033$ &\cellcolor[HTML]{5FA55F} $\phantom{-}1.045 \pm 0.033$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.943 \pm 0.030$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.974 \pm 0.031$ &\cellcolor[HTML]{5FA55F} $\phantom{-}1.036 \pm 0.033$\\
$F_{L}$ (1)&\cellcolor[HTML]{5FA55F} $\phantom{-}1.092 \pm 0.035$ &\cellcolor[HTML]{5FA55F} $\phantom{-}1.045 \pm 0.033$ &\cellcolor[HTML]{5FA55F} $\phantom{-}1.042 \pm 0.033$ &\cellcolor[HTML]{5FA55F} $\phantom{-}1.000 \pm 0.032$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.962 \pm 0.030$\\
$F_{L}$ (2)&\cellcolor[HTML]{5FA55F} $\phantom{-}1.027 \pm 0.035$ &\cellcolor[HTML]{5FA55F} $\phantom{-}1.064 \pm 0.036$ &\cellcolor[HTML]{86d2a8} $\phantom{-}0.895 \pm 0.031$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.971 \pm 0.033$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.988 \pm 0.034$\\
$F_{L}$ (3)&\cellcolor[HTML]{5FA55F} $\phantom{-}0.996 \pm 0.032$ &\cellcolor[HTML]{a8d281} $\phantom{-}1.168 \pm 0.037$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.938 \pm 0.030$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.927 \pm 0.029$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.971 \pm 0.031$\\
$F_{L}$ (4)&\cellcolor[HTML]{5FA55F} $\phantom{-}1.024 \pm 0.033$ &\cellcolor[HTML]{a8d281} $\phantom{-}1.190 \pm 0.039$ &\cellcolor[HTML]{f0fea2} $\phantom{-}1.340 \pm 0.047$ &\cellcolor[HTML]{f0fea2} $\phantom{-}1.376 \pm 0.048$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.997 \pm 0.032$\\
$S_{3}$ (0)&\cellcolor[HTML]{5FA55F} $\phantom{-}0.905 \pm 0.029$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.928 \pm 0.030$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.991 \pm 0.032$ &\cellcolor[HTML]{5FA55F} $\phantom{-}1.003 \pm 0.032$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.955 \pm 0.031$\\
$S_{3}$ (1)&\cellcolor[HTML]{5FA55F} $\phantom{-}0.971 \pm 0.031$ &\cellcolor[HTML]{86d2a8} $\phantom{-}0.895 \pm 0.028$ &\cellcolor[HTML]{86d2a8} $\phantom{-}0.851 \pm 0.027$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.926 \pm 0.029$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.961 \pm 0.030$\\
$S_{3}$ (2)&\cellcolor[HTML]{5FA55F} $\phantom{-}0.918 \pm 0.031$ &\cellcolor[HTML]{86d2a8} $\phantom{-}0.858 \pm 0.029$ &\cellcolor[HTML]{86d2a8} $\phantom{-}0.804 \pm 0.027$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.945 \pm 0.032$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.949 \pm 0.032$\\
$S_{3}$ (3)&\cellcolor[HTML]{5FA55F} $\phantom{-}0.976 \pm 0.031$ &\cellcolor[HTML]{86d2a8} $\phantom{-}0.873 \pm 0.028$ &\cellcolor[HTML]{86d2a8} $\phantom{-}0.867 \pm 0.028$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.936 \pm 0.030$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.904 \pm 0.029$\\
$S_{3}$ (4)&\cellcolor[HTML]{86d2a8} $\phantom{-}0.829 \pm 0.027$ &\cellcolor[HTML]{9ae9cd} $\phantom{-}0.762 \pm 0.025$ &\cellcolor[HTML]{86d2a8} $\phantom{-}0.872 \pm 0.028$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.964 \pm 0.031$ &\cellcolor[HTML]{86d2a8} $\phantom{-}0.882 \pm 0.028$\\
$S_{4}$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.992 \pm 0.031$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.984 \pm 0.031$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.913 \pm 0.029$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.987 \pm 0.031$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.960 \pm 0.030$\\
$S_{5}$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.939 \pm 0.032$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.950 \pm 0.032$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.921 \pm 0.032$ &\cellcolor[HTML]{5FA55F} $\phantom{-}1.041 \pm 0.036$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.968 \pm 0.033$\\
$A_{FB}$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.955 \pm 0.031$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.923 \pm 0.030$ &\cellcolor[HTML]{86d2a8} $\phantom{-}0.890 \pm 0.031$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.945 \pm 0.031$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.994 \pm 0.032$\\
$S_{7}$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.932 \pm 0.030$ &\cellcolor[HTML]{86d2a8} $\phantom{-}0.886 \pm 0.028$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.904 \pm 0.029$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.978 \pm 0.031$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.938 \pm 0.030$\\
$S_{8}$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.904 \pm 0.029$ &\cellcolor[HTML]{86d2a8} $\phantom{-}0.828 \pm 0.027$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.970 \pm 0.032$ &\cellcolor[HTML]{a8d281} $\phantom{-}1.101 \pm 0.036$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.903 \pm 0.029$\\
$S_{9}$ &\cellcolor[HTML]{86d2a8} $\phantom{-}0.896 \pm 0.029$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.959 \pm 0.031$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.902 \pm 0.029$ &\cellcolor[HTML]{5FA55F} $\phantom{-}1.027 \pm 0.033$ &\cellcolor[HTML]{86d2a8} $\phantom{-}0.879 \pm 0.028$\\
\hline
\end{tabular}
\captionof{table}[The angular moments pull distribution properties in rare-like pseudoexperiments with folding applied, \swave set to zero.]{The means and widths of the pull distributions in rare-like pseudoexperiments. 500 pseudoexperiments have been generated, mimicking the rare \BuToKstmm decay. In the fit to the pseudodata, folding is applied. The number at the parameters \FL and $S_3$ indicate the applied folding, as the two parameters can be measured using all folding techniques. The color scheme ranges from red (overestimation by 50\% of the uncertainty) through green (ideal value) to blue (underestimation by 50\% of the uncertainty), changing in steps of 10\% for readers convenience. } \label{tab:toys-Sig-pull-632}
\end{table}

22
Chapters/Toys/jobs/pull_table_633.tex

@ -0,0 +1,22 @@
\begin{table} \footnotesize \centering
\begin{tabular}{|l|c c c c c|}
\multicolumn{6}{c}{\textbf{means}}\\ \hline
\textbf{parameter} &[0.25--4.00] &[4.00--8.00] &[11.00--12.50] &[15.00--18.00] &[1.10--6.00]\\
\hline
$F_{L}$ & $\phantom{-}0.136 \pm 0.049$ & $\phantom{-}0.074 \pm 0.047$ & $\phantom{-}0.092 \pm 0.047$ & $\phantom{-}0.214 \pm 0.045$ & $\phantom{-}0.049 \pm 0.043$\\
$S_{3}$ & $\phantom{-}0.082 \pm 0.043$ & $\phantom{-}0.043 \pm 0.040$ & $\phantom{-}0.070 \pm 0.038$ & $\phantom{-}0.211 \pm 0.042$ & $-0.037 \pm 0.043$\\
$S_{4}$ & $\phantom{-}0.046 \pm 0.044$ & $\phantom{-}0.190 \pm 0.044$ & $\phantom{-}0.172 \pm 0.041$ & $\phantom{-}0.043 \pm 0.044$ & $\phantom{-}0.105 \pm 0.043$\\
\hline
\multicolumn{6}{c}{}\\
\multicolumn{6}{c}{\textbf{widths}}\\ \hline
\textbf{parameter} &[0.25--4.00] &[4.00--8.00] &[11.00--12.50] &[15.00--18.00] &[1.10--6.00]\\
\hline
$F_{L}$ & $\phantom{-}1.092 \pm 0.035$ & $\phantom{-}1.045 \pm 0.033$ & $\phantom{-}1.042 \pm 0.033$ & $\phantom{-}1.000 \pm 0.032$ & $\phantom{-}0.962 \pm 0.030$\\
$S_{3}$ & $\phantom{-}0.971 \pm 0.031$ & $\phantom{-}0.895 \pm 0.028$ & $\phantom{-}0.851 \pm 0.027$ & $\phantom{-}0.926 \pm 0.029$ & $\phantom{-}0.961 \pm 0.030$\\
$S_{4}$ & $\phantom{-}0.992 \pm 0.031$ & $\phantom{-}0.984 \pm 0.031$ & $\phantom{-}0.913 \pm 0.029$ & $\phantom{-}0.987 \pm 0.031$ & $\phantom{-}0.960 \pm 0.030$\\
\hline
\end{tabular}
\captionof{table}[The means and widths of the pull distributions in rare-like pseudoexperiments.]{The means and widths of the pull distributions in rare-like pseudoexperiments. 500 pseudoexperiments have been generated, mimicking the rare \BuToKstmm decay.} \label{tab:toys-Sig-pull-633}
\end{table}

22
Chapters/Toys/jobs/pull_table_634.tex

@ -0,0 +1,22 @@
\begin{table} \footnotesize \centering
\begin{tabular}{|l|c c c c c|}
\multicolumn{6}{c}{\textbf{means}}\\ \hline
\textbf{parameter} &[0.25--4.00] &[4.00--8.00] &[11.00--12.50] &[15.00--18.00] &[1.10--6.00]\\
\hline
$F_{L}$ & $\phantom{-}0.130 \pm 0.049$ & $-0.016 \pm 0.051$ & $\phantom{-}0.179 \pm 0.043$ & $\phantom{-}0.234 \pm 0.047$ & $\phantom{-}0.014 \pm 0.047$\\
$S_{3}$ & $\phantom{-}0.009 \pm 0.044$ & $\phantom{-}0.011 \pm 0.041$ & $\phantom{-}0.076 \pm 0.039$ & $\phantom{-}0.244 \pm 0.046$ & $-0.039 \pm 0.046$\\
$S_{5}$ & $\phantom{-}0.022 \pm 0.045$ & $\phantom{-}0.391 \pm 0.046$ & $\phantom{-}0.344 \pm 0.045$ & $\phantom{-}0.263 \pm 0.050$ & $\phantom{-}0.181 \pm 0.046$\\
\hline
\multicolumn{6}{c}{}\\
\multicolumn{6}{c}{\textbf{widths}}\\ \hline
\textbf{parameter} &[0.25--4.00] &[4.00--8.00] &[11.00--12.50] &[15.00--18.00] &[1.10--6.00]\\
\hline
$F_{L}$ & $\phantom{-}1.027 \pm 0.035$ & $\phantom{-}1.064 \pm 0.036$ & $\phantom{-}0.895 \pm 0.031$ & $\phantom{-}0.971 \pm 0.033$ & $\phantom{-}0.988 \pm 0.034$\\
$S_{3}$ & $\phantom{-}0.918 \pm 0.031$ & $\phantom{-}0.858 \pm 0.029$ & $\phantom{-}0.804 \pm 0.027$ & $\phantom{-}0.945 \pm 0.032$ & $\phantom{-}0.949 \pm 0.032$\\
$S_{5}$ & $\phantom{-}0.939 \pm 0.032$ & $\phantom{-}0.950 \pm 0.032$ & $\phantom{-}0.921 \pm 0.032$ & $\phantom{-}1.041 \pm 0.036$ & $\phantom{-}0.968 \pm 0.033$\\
\hline
\end{tabular}
\captionof{table}[The means and widths of the pull distributions in rare-like pseudoexperiments.]{The means and widths of the pull distributions in rare-like pseudoexperiments. 500 pseudoexperiments have been generated, mimicking the rare \BuToKstmm decay.} \label{tab:toys-Sig-pull-634}
\end{table}

22
Chapters/Toys/jobs/pull_table_635.tex

@ -0,0 +1,22 @@
\begin{table} \footnotesize \centering
\begin{tabular}{|l|c c c c c|}
\multicolumn{6}{c}{\textbf{means}}\\ \hline
\textbf{parameter} &[0.25--4.00] &[4.00--8.00] &[11.00--12.50] &[15.00--18.00] &[1.10--6.00]\\
\hline
$F_{L}$ & $\phantom{-}0.064 \pm 0.045$ & $-0.082 \pm 0.052$ & $\phantom{-}0.200 \pm 0.042$ & $\phantom{-}0.157 \pm 0.042$ & $-0.040 \pm 0.043$\\
$S_{3}$ & $-0.034 \pm 0.044$ & $\phantom{-}0.066 \pm 0.039$ & $\phantom{-}0.054 \pm 0.039$ & $\phantom{-}0.185 \pm 0.042$ & $\phantom{-}0.024 \pm 0.040$\\
$S_{7}$ & $\phantom{-}0.066 \pm 0.042$ & $\phantom{-}0.043 \pm 0.040$ & $-0.032 \pm 0.041$ & $\phantom{-}0.065 \pm 0.044$ & $\phantom{-}0.054 \pm 0.042$\\
\hline
\multicolumn{6}{c}{}\\
\multicolumn{6}{c}{\textbf{widths}}\\ \hline
\textbf{parameter} &[0.25--4.00] &[4.00--8.00] &[11.00--12.50] &[15.00--18.00] &[1.10--6.00]\\
\hline
$F_{L}$ & $\phantom{-}0.996 \pm 0.032$ & $\phantom{-}1.168 \pm 0.037$ & $\phantom{-}0.938 \pm 0.030$ & $\phantom{-}0.927 \pm 0.029$ & $\phantom{-}0.971 \pm 0.031$\\
$S_{3}$ & $\phantom{-}0.976 \pm 0.031$ & $\phantom{-}0.873 \pm 0.028$ & $\phantom{-}0.867 \pm 0.028$ & $\phantom{-}0.936 \pm 0.030$ & $\phantom{-}0.904 \pm 0.029$\\
$S_{7}$ & $\phantom{-}0.932 \pm 0.030$ & $\phantom{-}0.886 \pm 0.028$ & $\phantom{-}0.904 \pm 0.029$ & $\phantom{-}0.978 \pm 0.031$ & $\phantom{-}0.938 \pm 0.030$\\
\hline
\end{tabular}
\captionof{table}[The means and widths of the pull distributions in rare-like pseudoexperiments.]{The means and widths of the pull distributions in rare-like pseudoexperiments. 500 pseudoexperiments have been generated, mimicking the rare \BuToKstmm decay.} \label{tab:toys-Sig-pull-635}
\end{table}

22
Chapters/Toys/jobs/pull_table_636.tex

@ -0,0 +1,22 @@
\begin{table} \footnotesize \centering
\begin{tabular}{|l|c c c c c|}
\multicolumn{6}{c}{\textbf{means}}\\ \hline
\textbf{parameter} &[0.25--4.00] &[4.00--8.00] &[11.00--12.50] &[15.00--18.00] &[1.10--6.00]\\
\hline
$F_{L}$ & $\phantom{-}0.080 \pm 0.047$ & $\phantom{-}0.129 \pm 0.055$ & $\phantom{-}0.367 \pm 0.066$ & $\phantom{-}0.478 \pm 0.067$ & $-0.024 \pm 0.046$\\
$S_{3}$ & $-0.048 \pm 0.038$ & $\phantom{-}0.084 \pm 0.035$ & $\phantom{-}0.089 \pm 0.040$ & $\phantom{-}0.273 \pm 0.044$ & $\phantom{-}0.021 \pm 0.040$\\
$S_{8}$ & $-0.022 \pm 0.041$ & $\phantom{-}0.034 \pm 0.038$ & $-0.003 \pm 0.045$ & $\phantom{-}0.045 \pm 0.051$ & $-0.030 \pm 0.041$\\
\hline
\multicolumn{6}{c}{}\\
\multicolumn{6}{c}{\textbf{widths}}\\ \hline
\textbf{parameter} &[0.25--4.00] &[4.00--8.00] &[11.00--12.50] &[15.00--18.00] &[1.10--6.00]\\
\hline
$F_{L}$ & $\phantom{-}1.024 \pm 0.033$ & $\phantom{-}1.190 \pm 0.039$ & $\phantom{-}1.340 \pm 0.047$ & $\phantom{-}1.376 \pm 0.048$ & $\phantom{-}0.997 \pm 0.032$\\
$S_{3}$ & $\phantom{-}0.829 \pm 0.027$ & $\phantom{-}0.762 \pm 0.025$ & $\phantom{-}0.872 \pm 0.028$ & $\phantom{-}0.964 \pm 0.031$ & $\phantom{-}0.882 \pm 0.028$\\
$S_{8}$ & $\phantom{-}0.904 \pm 0.029$ & $\phantom{-}0.828 \pm 0.027$ & $\phantom{-}0.970 \pm 0.032$ & $\phantom{-}1.101 \pm 0.036$ & $\phantom{-}0.903 \pm 0.029$\\
\hline
\end{tabular}
\captionof{table}[The means and widths of the pull distributions in rare-like pseudoexperiments.]{The means and widths of the pull distributions in rare-like pseudoexperiments. 500 pseudoexperiments have been generated, mimicking the rare \BuToKstmm decay.} \label{tab:toys-Sig-pull-636}
\end{table}

32
Chapters/Toys/jobs/pull_table_643_col.tex

@ -0,0 +1,32 @@
\begin{table}[hbt!] \footnotesize \centering
\begin{tabular}{|l|c c c c c|}
\multicolumn{6}{c}{\textbf{means}}\\ \hline
\textbf{parameter} &[0.25--4.00] &[4.00--8.00] &[11.00--12.50] &[15.00--18.00] &[1.10--6.00]\\
\hline
$F_{L}$ &\cellcolor[HTML]{a8d281} $\phantom{-}0.164 \pm 0.045$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.002 \pm 0.047$ &\cellcolor[HTML]{CCE892} $\phantom{-}0.237 \pm 0.042$ &\cellcolor[HTML]{a8d281} $\phantom{-}0.163 \pm 0.043$ &\cellcolor[HTML]{a8d281} $\phantom{-}0.184 \pm 0.043$\\
$S_{3}$ &\cellcolor[HTML]{5FA55F} $-0.025 \pm 0.039$ &\cellcolor[HTML]{5FA55F} $-0.017 \pm 0.040$ &\cellcolor[HTML]{5FA55F} $-0.022 \pm 0.039$ &\cellcolor[HTML]{5FA55F} $-0.051 \pm 0.038$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.017 \pm 0.042$\\
$S_{4}$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.072 \pm 0.044$ &\cellcolor[HTML]{86d2a8} $-0.113 \pm 0.038$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.043 \pm 0.038$ &\cellcolor[HTML]{5FA55F} $-0.035 \pm 0.041$ &\cellcolor[HTML]{5FA55F} $-0.066 \pm 0.038$\\
$S_{5}$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.030 \pm 0.042$ &\cellcolor[HTML]{5FA55F} $-0.046 \pm 0.036$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.061 \pm 0.039$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.029 \pm 0.040$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.008 \pm 0.041$\\
$A_{FB}$ &\cellcolor[HTML]{86d2a8} $-0.137 \pm 0.044$ &\cellcolor[HTML]{CCE892} $\phantom{-}0.210 \pm 0.039$ &\cellcolor[HTML]{5FA55F} $-0.042 \pm 0.039$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.060 \pm 0.039$ &\cellcolor[HTML]{5FA55F} $-0.084 \pm 0.040$\\
$S_{7}$ &\cellcolor[HTML]{5FA55F} $-0.038 \pm 0.041$ &\cellcolor[HTML]{5FA55F} $-0.041 \pm 0.037$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.012 \pm 0.038$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.011 \pm 0.041$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.078 \pm 0.040$\\
$S_{8}$ &\cellcolor[HTML]{5FA55F} $-0.081 \pm 0.043$ &\cellcolor[HTML]{5FA55F} $-0.053 \pm 0.041$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.086 \pm 0.043$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.050 \pm 0.040$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.010 \pm 0.042$\\
$S_{9}$ &\cellcolor[HTML]{5FA55F} $-0.006 \pm 0.039$ &\cellcolor[HTML]{5FA55F} $-0.003 \pm 0.040$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.031 \pm 0.038$ &\cellcolor[HTML]{5FA55F} $-0.003 \pm 0.042$ &\cellcolor[HTML]{5FA55F} $-0.098 \pm 0.041$\\
\hline
\multicolumn{6}{c}{}\\
\multicolumn{6}{c}{\textbf{widths}}\\ \hline
\textbf{parameter} &[0.25--4.00] &[4.00--8.00] &[11.00--12.50] &[15.00--18.00] &[1.10--6.00]\\
\hline
$F_{L}$ &\cellcolor[HTML]{5FA55F} $\phantom{-}1.012 \pm 0.032$ &\cellcolor[HTML]{5FA55F} $\phantom{-}1.050 \pm 0.033$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.926 \pm 0.030$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.929 \pm 0.030$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.952 \pm 0.030$\\
$S_{3}$ &\cellcolor[HTML]{86d2a8} $\phantom{-}0.869 \pm 0.027$ &\cellcolor[HTML]{86d2a8} $\phantom{-}0.893 \pm 0.028$ &\cellcolor[HTML]{86d2a8} $\phantom{-}0.865 \pm 0.028$ &\cellcolor[HTML]{86d2a8} $\phantom{-}0.837 \pm 0.027$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.934 \pm 0.030$\\
$S_{4}$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.980 \pm 0.031$ &\cellcolor[HTML]{86d2a8} $\phantom{-}0.854 \pm 0.027$ &\cellcolor[HTML]{86d2a8} $\phantom{-}0.801 \pm 0.027$ &\cellcolor[HTML]{86d2a8} $\phantom{-}0.880 \pm 0.029$ &\cellcolor[HTML]{86d2a8} $\phantom{-}0.858 \pm 0.027$\\
$S_{5}$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.922 \pm 0.029$ &\cellcolor[HTML]{86d2a8} $\phantom{-}0.805 \pm 0.026$ &\cellcolor[HTML]{86d2a8} $\phantom{-}0.821 \pm 0.028$ &\cellcolor[HTML]{86d2a8} $\phantom{-}0.866 \pm 0.028$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.924 \pm 0.029$\\
$A_{FB}$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.977 \pm 0.031$ &\cellcolor[HTML]{86d2a8} $\phantom{-}0.864 \pm 0.028$ &\cellcolor[HTML]{9ae9cd} $\phantom{-}0.737 \pm 0.028$ &\cellcolor[HTML]{9ae9cd} $\phantom{-}0.800 \pm 0.028$ &\cellcolor[HTML]{86d2a8} $\phantom{-}0.889 \pm 0.028$\\
$S_{7}$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.904 \pm 0.029$ &\cellcolor[HTML]{86d2a8} $\phantom{-}0.833 \pm 0.026$ &\cellcolor[HTML]{86d2a8} $\phantom{-}0.833 \pm 0.027$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.912 \pm 0.029$ &\cellcolor[HTML]{86d2a8} $\phantom{-}0.898 \pm 0.028$\\
$S_{8}$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.962 \pm 0.031$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.913 \pm 0.029$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.946 \pm 0.031$ &\cellcolor[HTML]{86d2a8} $\phantom{-}0.891 \pm 0.029$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.941 \pm 0.030$\\
$S_{9}$ &\cellcolor[HTML]{86d2a8} $\phantom{-}0.880 \pm 0.028$ &\cellcolor[HTML]{86d2a8} $\phantom{-}0.897 \pm 0.028$ &\cellcolor[HTML]{86d2a8} $\phantom{-}0.833 \pm 0.027$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.931 \pm 0.030$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.914 \pm 0.029$\\
\hline
\end{tabular}
\captionof{table}[The angular moments pull distribution properties in rare-like pseudoexperiments.]{The means and widths of the pull distributions for the angular moments in rare-like pseudoexperiments. 500 pseudoexperiments have been generated, mimicking the rare \BuToKstmm decay. The color scheme ranges from red (overestimation by 50\% of the uncertainty) through green (ideal value) to blue (underestimation by 50\% of the uncertainty), changing in steps of 10\% for readers convenience. } \label{tab:toys-Sig-pull-643}
\end{table}

48
Chapters/Toys/jobs/pull_table_644_fld_col.tex

@ -0,0 +1,48 @@
\begin{table} [hbt!]\footnotesize \centering
\begin{tabular}{|l|c c c c c|}
\multicolumn{6}{c}{\textbf{means}}\\ \hline
\textbf{parameter} &[0.25--4.00] &[4.00--8.00] &[11.00--12.50] &[15.00--18.00] &[1.10--6.00]\\
\hline
$F_{L}$ (0)&\cellcolor[HTML]{a8d281} $\phantom{-}0.183 \pm 0.044$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.062 \pm 0.046$ &\cellcolor[HTML]{a8d281} $\phantom{-}0.147 \pm 0.041$ &\cellcolor[HTML]{a8d281} $\phantom{-}0.139 \pm 0.044$ &\cellcolor[HTML]{CCE892} $\phantom{-}0.252 \pm 0.047$\\
$F_{L}$ (1)&\cellcolor[HTML]{CCE892} $\phantom{-}0.201 \pm 0.047$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.009 \pm 0.048$ &\cellcolor[HTML]{5FA55F} $-0.066 \pm 0.044$ &\cellcolor[HTML]{5FA55F} $-0.039 \pm 0.043$ &\cellcolor[HTML]{CCE892} $\phantom{-}0.216 \pm 0.047$\\
$F_{L}$ (2)&\cellcolor[HTML]{f0fea2} $\phantom{-}0.302 \pm 0.044$ &\cellcolor[HTML]{5FA55F} $-0.011 \pm 0.050$ &\cellcolor[HTML]{5FA55F} $-0.029 \pm 0.046$ &\cellcolor[HTML]{86d2a8} $-0.161 \pm 0.041$ &\cellcolor[HTML]{a8d281} $\phantom{-}0.199 \pm 0.047$\\
$F_{L}$ (3)&\cellcolor[HTML]{CCE892} $\phantom{-}0.242 \pm 0.046$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.091 \pm 0.049$ &\cellcolor[HTML]{5FA55F} $-0.087 \pm 0.044$ &\cellcolor[HTML]{5FA55F} $-0.023 \pm 0.040$ &\cellcolor[HTML]{CCE892} $\phantom{-}0.240 \pm 0.046$\\
$F_{L}$ (4)&\cellcolor[HTML]{CCE892} $\phantom{-}0.204 \pm 0.049$ &\cellcolor[HTML]{f0fea2} $\phantom{-}0.364 \pm 0.053$ &\cellcolor[HTML]{f0fea2} $\phantom{-}0.371 \pm 0.057$ &\cellcolor[HTML]{c47f51} $\phantom{-}0.452 \pm 0.068$ &\cellcolor[HTML]{CCE892} $\phantom{-}0.252 \pm 0.047$\\
$S_{3}$ (0)&\cellcolor[HTML]{5FA55F} $\phantom{-}0.043 \pm 0.041$ &\cellcolor[HTML]{5FA55F} $-0.056 \pm 0.042$ &\cellcolor[HTML]{5FA55F} $-0.029 \pm 0.043$ &\cellcolor[HTML]{86d2a8} $-0.105 \pm 0.046$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.001 \pm 0.041$\\
$S_{3}$ (1)&\cellcolor[HTML]{5FA55F} $-0.026 \pm 0.045$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.006 \pm 0.041$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.041 \pm 0.035$ &\cellcolor[HTML]{5FA55F} $-0.091 \pm 0.041$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.048 \pm 0.042$\\
$S_{3}$ (2)&\cellcolor[HTML]{5FA55F} $\phantom{-}0.012 \pm 0.043$ &\cellcolor[HTML]{5FA55F} $-0.035 \pm 0.039$ &\cellcolor[HTML]{5FA55F} $-0.095 \pm 0.041$ &\cellcolor[HTML]{5FA55F} $-0.044 \pm 0.038$ &\cellcolor[HTML]{5FA55F} $-0.033 \pm 0.043$\\
$S_{3}$ (3)&\cellcolor[HTML]{5FA55F} $-0.082 \pm 0.044$ &\cellcolor[HTML]{5FA55F} $-0.023 \pm 0.040$ &\cellcolor[HTML]{5FA55F} $-0.041 \pm 0.040$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.013 \pm 0.041$ &\cellcolor[HTML]{5FA55F} $-0.031 \pm 0.043$\\
$S_{3}$ (4)&\cellcolor[HTML]{5FA55F} $-0.060 \pm 0.033$ &\cellcolor[HTML]{5FA55F} $-0.008 \pm 0.033$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.036 \pm 0.033$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.048 \pm 0.038$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.005 \pm 0.033$\\
$S_{4}$ &\cellcolor[HTML]{5FA55F} $-0.081 \pm 0.045$ &\cellcolor[HTML]{86d2a8} $-0.127 \pm 0.041$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.049 \pm 0.040$ &\cellcolor[HTML]{5FA55F} $-0.040 \pm 0.042$ &\cellcolor[HTML]{5FA55F} $-0.080 \pm 0.043$\\
$S_{5}$ &\cellcolor[HTML]{5FA55F} $-0.017 \pm 0.042$ &\cellcolor[HTML]{5FA55F} $-0.082 \pm 0.039$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.002 \pm 0.038$ &\cellcolor[HTML]{5FA55F} $-0.010 \pm 0.039$ &\cellcolor[HTML]{5FA55F} $-0.059 \pm 0.040$\\
$A_{FB}$ &\cellcolor[HTML]{5FA55F} $-0.098 \pm 0.043$ &\cellcolor[HTML]{a8d281} $\phantom{-}0.105 \pm 0.042$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.037 \pm 0.041$ &\cellcolor[HTML]{5FA55F} $-0.047 \pm 0.043$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.028 \pm 0.046$\\
$S_{7}$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.016 \pm 0.043$ &\cellcolor[HTML]{5FA55F} $-0.057 \pm 0.040$ &\cellcolor[HTML]{5FA55F} $-0.049 \pm 0.038$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.011 \pm 0.041$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.073 \pm 0.042$\\
$S_{8}$ &\cellcolor[HTML]{5FA55F} $-0.019 \pm 0.036$ &\cellcolor[HTML]{5FA55F} $-0.018 \pm 0.035$ &\cellcolor[HTML]{5FA55F} $-0.046 \pm 0.039$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.025 \pm 0.041$ &\cellcolor[HTML]{5FA55F} $-0.055 \pm 0.036$\\
$S_{9}$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.025 \pm 0.041$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.028 \pm 0.039$ &\cellcolor[HTML]{5FA55F} $-0.013 \pm 0.042$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.016 \pm 0.043$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.044 \pm 0.041$\\
\hline
\multicolumn{6}{c}{}\\
\multicolumn{6}{c}{\textbf{widths}}\\ \hline
\textbf{parameter} &[0.25--4.00] &[4.00--8.00] &[11.00--12.50] &[15.00--18.00] &[1.10--6.00]\\
\hline
$F_{L}$ (0)&\cellcolor[HTML]{5FA55F} $\phantom{-}0.985 \pm 0.031$ &\cellcolor[HTML]{5FA55F} $\phantom{-}1.033 \pm 0.033$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.902 \pm 0.029$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.950 \pm 0.031$ &\cellcolor[HTML]{5FA55F} $\phantom{-}1.040 \pm 0.033$\\
$F_{L}$ (1)&\cellcolor[HTML]{5FA55F} $\phantom{-}1.047 \pm 0.033$ &\cellcolor[HTML]{5FA55F} $\phantom{-}1.080 \pm 0.034$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.968 \pm 0.031$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.924 \pm 0.030$ &\cellcolor[HTML]{5FA55F} $\phantom{-}1.056 \pm 0.033$\\
$F_{L}$ (2)&\cellcolor[HTML]{5FA55F} $\phantom{-}0.989 \pm 0.031$ &\cellcolor[HTML]{a8d281} $\phantom{-}1.127 \pm 0.036$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.996 \pm 0.032$ &\cellcolor[HTML]{86d2a8} $\phantom{-}0.887 \pm 0.029$ &\cellcolor[HTML]{5FA55F} $\phantom{-}1.040 \pm 0.033$\\
$F_{L}$ (3)&\cellcolor[HTML]{5FA55F} $\phantom{-}1.019 \pm 0.032$ &\cellcolor[HTML]{5FA55F} $\phantom{-}1.099 \pm 0.035$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.957 \pm 0.031$ &\cellcolor[HTML]{86d2a8} $\phantom{-}0.861 \pm 0.028$ &\cellcolor[HTML]{5FA55F} $\phantom{-}1.027 \pm 0.033$\\
$F_{L}$ (4)&\cellcolor[HTML]{5FA55F} $\phantom{-}1.072 \pm 0.034$ &\cellcolor[HTML]{a8d281} $\phantom{-}1.166 \pm 0.037$ &\cellcolor[HTML]{a8d281} $\phantom{-}1.160 \pm 0.040$ &\cellcolor[HTML]{f0fea2} $\phantom{-}1.340 \pm 0.049$ &\cellcolor[HTML]{5FA55F} $\phantom{-}1.057 \pm 0.034$\\
$S_{3}$ (0)&\cellcolor[HTML]{5FA55F} $\phantom{-}0.917 \pm 0.029$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.931 \pm 0.029$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.950 \pm 0.030$ &\cellcolor[HTML]{5FA55F} $\phantom{-}1.018 \pm 0.033$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.920 \pm 0.029$\\
$S_{3}$ (1)&\cellcolor[HTML]{5FA55F} $\phantom{-}0.996 \pm 0.031$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.915 \pm 0.029$ &\cellcolor[HTML]{9ae9cd} $\phantom{-}0.781 \pm 0.025$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.916 \pm 0.029$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.933 \pm 0.029$\\
$S_{3}$ (2)&\cellcolor[HTML]{5FA55F} $\phantom{-}0.961 \pm 0.030$ &\cellcolor[HTML]{86d2a8} $\phantom{-}0.864 \pm 0.027$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.901 \pm 0.029$ &\cellcolor[HTML]{86d2a8} $\phantom{-}0.840 \pm 0.027$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.961 \pm 0.030$\\
$S_{3}$ (3)&\cellcolor[HTML]{5FA55F} $\phantom{-}0.992 \pm 0.031$ &\cellcolor[HTML]{86d2a8} $\phantom{-}0.898 \pm 0.028$ &\cellcolor[HTML]{86d2a8} $\phantom{-}0.877 \pm 0.028$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.904 \pm 0.029$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.962 \pm 0.030$\\
$S_{3}$ (4)&\cellcolor[HTML]{9ae9cd} $\phantom{-}0.746 \pm 0.024$ &\cellcolor[HTML]{9ae9cd} $\phantom{-}0.739 \pm 0.023$ &\cellcolor[HTML]{9ae9cd} $\phantom{-}0.739 \pm 0.024$ &\cellcolor[HTML]{86d2a8} $\phantom{-}0.848 \pm 0.027$ &\cellcolor[HTML]{9ae9cd} $\phantom{-}0.746 \pm 0.024$\\
$S_{4}$ &\cellcolor[HTML]{5FA55F} $\phantom{-}1.013 \pm 0.032$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.910 \pm 0.029$ &\cellcolor[HTML]{86d2a8} $\phantom{-}0.870 \pm 0.028$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.933 \pm 0.030$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.956 \pm 0.030$\\
$S_{5}$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.928 \pm 0.029$ &\cellcolor[HTML]{86d2a8} $\phantom{-}0.872 \pm 0.028$ &\cellcolor[HTML]{86d2a8} $\phantom{-}0.817 \pm 0.027$ &\cellcolor[HTML]{86d2a8} $\phantom{-}0.871 \pm 0.028$ &\cellcolor[HTML]{86d2a8} $\phantom{-}0.885 \pm 0.028$\\
$A_{FB}$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.960 \pm 0.031$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.941 \pm 0.030$ &\cellcolor[HTML]{86d2a8} $\phantom{-}0.823 \pm 0.029$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.921 \pm 0.031$ &\cellcolor[HTML]{5FA55F} $\phantom{-}1.023 \pm 0.032$\\
$S_{7}$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.954 \pm 0.030$ &\cellcolor[HTML]{86d2a8} $\phantom{-}0.899 \pm 0.028$ &\cellcolor[HTML]{86d2a8} $\phantom{-}0.846 \pm 0.027$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.912 \pm 0.029$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.931 \pm 0.029$\\
$S_{8}$ &\cellcolor[HTML]{86d2a8} $\phantom{-}0.810 \pm 0.026$ &\cellcolor[HTML]{9ae9cd} $\phantom{-}0.773 \pm 0.024$ &\cellcolor[HTML]{86d2a8} $\phantom{-}0.860 \pm 0.027$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.905 \pm 0.029$ &\cellcolor[HTML]{86d2a8} $\phantom{-}0.809 \pm 0.026$\\
$S_{9}$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.916 \pm 0.029$ &\cellcolor[HTML]{86d2a8} $\phantom{-}0.881 \pm 0.028$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.930 \pm 0.030$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.941 \pm 0.030$ &\cellcolor[HTML]{5FA55F} $\phantom{-}0.922 \pm 0.029$\\
\hline
\end{tabular}
\captionof{table}[The angular moments pull distribution properties in rare-like pseudoexperiments with folding applied.]{The means and widths of the pull distributions of the angular moments in rare-like pseudoexperiments. 500 pseudoexperiments have been generated, mimicking the rare \BuToKstmm decay. In the fit to the pseudodata, folding is applied. The number at the parameters \FL and $S_3$ indicate the applied folding, as the two parameters can be measured using all five folding techniques. For the rest of the parameters, folding sensitive to the parameter is used. The color scheme ranges from red (overestimation by 50\% of the uncertainty) through green (ideal value) to blue (underestimation by 50\% of the uncertainty), changing in steps of 10\% for readers convenience.} \label{tab:toys-Sig-pull-644}
\end{table}

22
Chapters/Toys/jobs/pull_table_645.tex

@ -0,0 +1,22 @@
\begin{table} \footnotesize \centering
\begin{tabular}{|l|c c c c c|}
\multicolumn{6}{c}{\textbf{means}}\\ \hline
\textbf{parameter} &[0.25--4.00] &[4.00--8.00] &[11.00--12.50] &[15.00--18.00] &[1.10--6.00]\\
\hline
$F_{L}$ & $\phantom{-}0.201 \pm 0.047$ & $\phantom{-}0.009 \pm 0.048$ & $-0.066 \pm 0.044$ & $-0.039 \pm 0.043$ & $\phantom{-}0.216 \pm 0.047$\\
$S_{3}$ & $-0.026 \pm 0.045$ & $\phantom{-}0.006 \pm 0.041$ & $\phantom{-}0.041 \pm 0.035$ & $-0.091 \pm 0.041$ & $\phantom{-}0.048 \pm 0.042$\\
$S_{4}$ & $-0.081 \pm 0.045$ & $-0.127 \pm 0.041$ & $\phantom{-}0.049 \pm 0.040$ & $-0.040 \pm 0.042$ & $-0.080 \pm 0.043$\\
\hline
\multicolumn{6}{c}{}\\
\multicolumn{6}{c}{\textbf{widths}}\\ \hline
\textbf{parameter} &[0.25--4.00] &[4.00--8.00] &[11.00--12.50] &[15.00--18.00] &[1.10--6.00]\\
\hline
$F_{L}$ & $\phantom{-}1.047 \pm 0.033$ & $\phantom{-}1.080 \pm 0.034$ & $\phantom{-}0.968 \pm 0.031$ & $\phantom{-}0.924 \pm 0.030$ & $\phantom{-}1.056 \pm 0.033$\\
$S_{3}$ & $\phantom{-}0.996 \pm 0.031$ & $\phantom{-}0.915 \pm 0.029$ & $\phantom{-}0.781 \pm 0.025$ & $\phantom{-}0.916 \pm 0.029$ & $\phantom{-}0.933 \pm 0.029$\\
$S_{4}$ & $\phantom{-}1.013 \pm 0.032$ & $\phantom{-}0.910 \pm 0.029$ & $\phantom{-}0.870 \pm 0.028$ & $\phantom{-}0.933 \pm 0.030$ & $\phantom{-}0.956 \pm 0.030$\\
\hline
\end{tabular}
\captionof{table}[The means and widths of the pull distributions in rare-like pseudoexperiments.]{The means and widths of the pull distributions in rare-like pseudoexperiments. 500 pseudoexperiments have been generated, mimicking the rare \BuToKstmm decay.} \label{tab:toys-Sig-pull-645}
\end{table}

22
Chapters/Toys/jobs/pull_table_646.tex

@ -0,0 +1,22 @@
\begin{table} \footnotesize \centering
\begin{tabular}{|l|c c c c c|}
\multicolumn{6}{c}{\textbf{means}}\\ \hline
\textbf{parameter} &[0.25--4.00] &[4.00--8.00] &[11.00--12.50] &[15.00--18.00] &[1.10--6.00]\\
\hline
$F_{L}$ & $\phantom{-}0.302 \pm 0.044$ & $-0.011 \pm 0.050$ & $-0.029 \pm 0.046$ & $-0.161 \pm 0.041$ & $\phantom{-}0.199 \pm 0.047$\\
$S_{3}$ & $\phantom{-}0.012 \pm 0.043$ & $-0.035 \pm 0.039$ & $-0.095 \pm 0.041$ & $-0.044 \pm 0.038$ & $-0.033 \pm 0.043$\\
$S_{5}$ & $-0.017 \pm 0.042$ & $-0.082 \pm 0.039$ & $\phantom{-}0.002 \pm 0.038$ & $-0.010 \pm 0.039$ & $-0.059 \pm 0.040$\\
\hline
\multicolumn{6}{c}{}\\
\multicolumn{6}{c}{\textbf{widths}}\\ \hline
\textbf{parameter} &[0.25--4.00] &[4.00--8.00] &[11.00--12.50] &[15.00--18.00] &[1.10--6.00]\\
\hline
$F_{L}$ & $\phantom{-}0.989 \pm 0.031$ & $\phantom{-}1.127 \pm 0.036$ & $\phantom{-}0.996 \pm 0.032$ & $\phantom{-}0.887 \pm 0.029$ & $\phantom{-}1.040 \pm 0.033$\\
$S_{3}$ & $\phantom{-}0.961 \pm 0.030$ & $\phantom{-}0.864 \pm 0.027$ & $\phantom{-}0.901 \pm 0.029$ & $\phantom{-}0.840 \pm 0.027$ & $\phantom{-}0.961 \pm 0.030$\\
$S_{5}$ & $\phantom{-}0.928 \pm 0.029$ & $\phantom{-}0.872 \pm 0.028$ & $\phantom{-}0.817 \pm 0.027$ & $\phantom{-}0.871 \pm 0.028$ & $\phantom{-}0.885 \pm 0.028$\\
\hline
\end{tabular}
\captionof{table}[The means and widths of the pull distributions in rare-like pseudoexperiments.]{The means and widths of the pull distributions in rare-like pseudoexperiments. 500 pseudoexperiments have been generated, mimicking the rare \BuToKstmm decay.} \label{tab:toys-Sig-pull-646}
\end{table}

22
Chapters/Toys/jobs/pull_table_647.tex

@ -0,0 +1,22 @@
\begin{table} \footnotesize \centering
\begin{tabular}{|l|c c c c c|}
\multicolumn{6}{c}{\textbf{means}}\\ \hline
\textbf{parameter} &[0.25--4.00] &[4.00--8.00] &[11.00--12.50] &[15.00--18.00] &[1.10--6.00]\\
\hline
$F_{L}$ & $\phantom{-}0.242 \pm 0.046$ & $\phantom{-}0.091 \pm 0.049$ & $-0.087 \pm 0.044$ & $-0.023 \pm 0.040$ & $\phantom{-}0.240 \pm 0.046$\\
$S_{3}$ & $-0.082 \pm 0.044$ & $-0.023 \pm 0.040$ & $-0.041 \pm 0.040$ & $\phantom{-}0.013 \pm 0.041$ & $-0.031 \pm 0.043$\\
$S_{7}$ & $\phantom{-}0.016 \pm 0.043$ & $-0.057 \pm 0.040$ & $-0.049 \pm 0.038$ & $\phantom{-}0.011 \pm 0.041$ & $\phantom{-}0.073 \pm 0.042$\\
\hline
\multicolumn{6}{c}{}\\
\multicolumn{6}{c}{\textbf{widths}}\\ \hline
\textbf{parameter} &[0.25--4.00] &[4.00--8.00] &[11.00--12.50] &[15.00--18.00] &[1.10--6.00]\\
\hline
$F_{L}$ & $\phantom{-}1.019 \pm 0.032$ & $\phantom{-}1.099 \pm 0.035$ & $\phantom{-}0.957 \pm 0.031$ & $\phantom{-}0.861 \pm 0.028$ & $\phantom{-}1.027 \pm 0.033$\\
$S_{3}$ & $\phantom{-}0.992 \pm 0.031$ & $\phantom{-}0.898 \pm 0.028$ & $\phantom{-}0.877 \pm 0.028$ & $\phantom{-}0.904 \pm 0.029$ & $\phantom{-}0.962 \pm 0.030$\\
$S_{7}$ & $\phantom{-}0.954 \pm 0.030$ & $\phantom{-}0.899 \pm 0.028$ & $\phantom{-}0.846 \pm 0.027$ & $\phantom{-}0.912 \pm 0.029$ & $\phantom{-}0.931 \pm 0.029$\\
\hline
\end{tabular}
\captionof{table}[The means and widths of the pull distributions in rare-like pseudoexperiments.]{The means and widths of the pull distributions in rare-like pseudoexperiments. 500 pseudoexperiments have been generated, mimicking the rare \BuToKstmm decay.} \label{tab:toys-Sig-pull-647}
\end{table}

22
Chapters/Toys/jobs/pull_table_648.tex

@ -0,0 +1,22 @@
\begin{table} \footnotesize \centering
\begin{tabular}{|l|c c c c c|}
\multicolumn{6}{c}{\textbf{means}}\\ \hline
\textbf{parameter} &[0.25--4.00] &[4.00--8.00] &[11.00--12.50] &[15.00--18.00] &[1.10--6.00]\\
\hline
$F_{L}$ & $\phantom{-}0.204 \pm 0.049$ & $\phantom{-}0.364 \pm 0.053$ & $\phantom{-}0.371 \pm 0.057$ & $\phantom{-}0.452 \pm 0.068$ & $\phantom{-}0.252 \pm 0.047$\\
$S_{3}$ & $-0.060 \pm 0.033$ & $-0.008 \pm 0.033$ & $\phantom{-}0.036 \pm 0.033$ & $\phantom{-}0.048 \pm 0.038$ & $\phantom{-}0.005 \pm 0.033$\\
$S_{8}$ & $-0.019 \pm 0.036$ & $-0.018 \pm 0.035$ & $-0.046 \pm 0.039$ & $\phantom{-}0.025 \pm 0.041$ & $-0.055 \pm 0.036$\\
\hline
\multicolumn{6}{c}{}\\
\multicolumn{6}{c}{\textbf{widths}}\\ \hline
\textbf{parameter} &[0.25--4.00] &[4.00--8.00] &[11.00--12.50] &[15.00--18.00] &[1.10--6.00]\\
\hline
$F_{L}$ & $\phantom{-}1.072 \pm 0.034$ & $\phantom{-}1.166 \pm 0.037$ & $\phantom{-}1.160 \pm 0.040$ & $\phantom{-}1.340 \pm 0.049$ & $\phantom{-}1.057 \pm 0.034$\\
$S_{3}$ & $\phantom{-}0.746 \pm 0.024$ & $\phantom{-}0.739 \pm 0.023$ & $\phantom{-}0.739 \pm 0.024$ & $\phantom{-}0.848 \pm 0.027$ & $\phantom{-}0.746 \pm 0.024$\\
$S_{8}$ & $\phantom{-}0.810 \pm 0.026$ & $\phantom{-}0.773 \pm 0.024$ & $\phantom{-}0.860 \pm 0.027$ & $\phantom{-}0.905 \pm 0.029$ & $\phantom{-}0.809 \pm 0.026$\\
\hline
\end{tabular}
\captionof{table}[The means and widths of the pull distributions in rare-like pseudoexperiments.]{The means and widths of the pull distributions in rare-like pseudoexperiments. 500 pseudoexperiments have been generated, mimicking the rare \BuToKstmm decay.} \label{tab:toys-Sig-pull-648}
\end{table}

263
Chapters/Toys/toys.tex

@ -0,0 +1,263 @@
\section{Pseudoexperiments}\label{sec:toys}
In order to validate the \fcncfitter framework and its settings, dedicated tests on pseudoexperiments are performed. These tests are designed to verify the estimation of the fitted values as well as their associated statistical uncertainties. The tests are necessary as the \fcncfitter framework is a rather complex tool. Moreover, the limited statistical power of the sample calls for many constraints on the fitted parameters. These constrains also have to be thoroughly validated.
Therefore, dedicated sets of \emph{pseudoexperiments} are created. \emph{Pseudoexperiment} is a randomly-generated set of \emph{pseudoevents}. The pseudoevents are events generated according to a simplified model. Such simplified model, or a \emph{toy} model, allows to study specific cases of the framework configuration as well as the influence of specific parameter values on the final fit result. Moreover, these studies can be done on arbitrarily large samples, minimizing the influence of limited statistical power.
The goal of the pseudoexperiment studies is to validate and correct the functionality of the \fcncfitter framework and therefore to obtain a bias-free result with good coverage of statistical uncertainty. The focus of these studies is on the angular \pwave parameters, however, the coverage of statistical uncertainty and potential biases are also studied for all other free parameters. The events are generated following the distributions of \angleDist and \qsq without the angular acceptance weights applied. In order to study the influence of the acceptance weights, the acceptance weights are applied during the fit.
The pseudoexperiments are validated by studying the \emph{pull distributions} of the parameters that are free in the fit. Such distributions represent the difference between the measured value, $x$, and the generated value, $x_0$, divided by the uncertainty of the measurement $\sigma$,
%
\begin{equation}\label{eq:toy-pull}
p(x, \sigma) = \frac{x-x_0}{\sigma}\,.
\end{equation}
%
Using the central limit theorem, the shape of such distribution for any statistically independent random variable follows a Gaussian distribution. The width of such distribution is equal to one, the mean is equal to zero. Any shift from the mean value of zero indicates a bias of the measured value in terms of standard deviations. If the width is larger than one, it suggest the undercoverage of the uncertainty, width smaller than one signals overcoverage of the uncertainty. As an example, a width of 0.25 means the actual statistical uncertainty should be four times as large as the measured one. There are also many reasons why the pull distribution can not be described by a Gaussian distribution, such as that the free parameter is at its limit or there is a technical problem with the minimization.
\subsection{Generation of pseudoexperiments}\label{sec:toys-gen}
For this analysis, the fitted variables \mBu, \mKstarp, \angleDist are directly generated using a random number generator. For the generation, the \root class \texttt{TRandom3} is used. This class is used to generate equi-distributed pseudorandom floating-point numbers in the interval $(0,1\rangle$ via the Mersenne Twister algorithm~\cite{FIT-MersenneTwister}. This algorithm has been devoloped in the late 90's and it is widely used as it is relatively fast while passing most statistical tests designed to measure the quality of a random number generator\footnote{Among standardized tests are \eg DieHard~\cite{FIT-DieHard} or the U01 tests~\cite{FIT-testU01}. The Mersenne Twister algorithm passes all of DieHard tests and vast majority of the U01 tests.}.
The \texttt{TRandom3} class provides generation of uniform distributions. However, the desired distributions are non-uniform. There are several methods how to convert the uniformly distributed random numbers to produce non-uniform distributions: simplest of them being a simple transformation of the distribution. However, this is possible only for distributions with an analytical integral. Unfortunately, this is rarely the case. A simple method used to generate a non-uniform distribution, \emph{rejection method}\,\cite{FIT-NR}, is used in this analysis. The method is very similar to numerical computation of integrals.
\subsubsection{Rejection method}\label{sec:toys-genMethod}
%
\begin{wrapfigure}[12]{r}{0.58\textwidth}
\centering\vspace{-62pt}
\includegraphics[width=0.61\textwidth, angle=0]{FCNC/Toys/rejection.jpg}
\captionof{figure}[Rejection method illustration.]{Rejection method illustration.
The desired generated distribution is denoted $p(x)$, the \emph{comparison function} $f(x)$. The comparison function is always more than the function $p(x)$. The second random deviate of any $x_0$ is then used to decide whether to accept or reject the point at $x_0$. If rejected, new second random deviate of $f$ is found. Taken from~Ref.\,\cite{FIT-NR}.} \label{fig:FIT-reject}
\end{wrapfigure}
The goal is to generate a sample of random numbers with a non-uniform distribution function $p(x)$, where $x$
is defined and non-zero in a certain range $(A,B\rangle$. First step is to select a \emph{comparison function} $f(x)$. The comparison function has to be larger $p(x)$ for all $x\in(A,B\rangle$:\vspace{-0.25\baselineskip}
%
\begin{equation}
f(x) > p(x)\,\quad \text{for~} \forall x\in(A,B\rangle\,.\vspace{0.25\baselineskip}
\end{equation}
%
Then, the area below the comparison function $f(x)$ is populated uniformly with random points denoted $[u_x,u_y]$. For each point, the value $f(u_x)$ and $p(u_x)$ is calculated. If
$ u_y > \sfrac{p(u_x)}{f(u_x)}$ the point $[u_x,u_y]$ is \emph{rejected}. New point is generated until the condition is satisfied. The ratio of $\sfrac{\text{rejected}}{\text{accepted}}$ points is then equal to the ratio of the area between $f(x)$ and $p(x)$ to the area under $p(x)$. Hence, the accepted points follow the distribution $p(x)$. An illustration of this procedure is shown in \refFig{FIT-reject}.
The main advantage of this method is its variability and simplicity. The distribution function $p(x)$ always has to have a maximum, as the integration over its domain has to be equal to one, is positive and is continuous. Therefore, it is always possible to construct the comparison function as a 'rectangle' above the desired distribution $f(x) = \max_{x\in(A,B\rangle} \{ p(x) \}$. On the other hand, this leads to the main disadvantage of the method: when the area below the comparison function $f(x)$ is much larger than the area below $p(x)$, the count of rejected points will be very large, leading to a long computing time. Good comparison function is crucial for an effective generation of non-uniform distributions.
\subsection{Validation of the generation of the pseudoexperiments}\label{sec:toys-valid}
The \fcncfitter framework also provides the tools to generate the pseudoexperiments, exploiting the rejection method. In order to validate this functionality of the framework, pseudoexperiments with \BuToKstJpsi decays are generated.
The first test is to generate only events with the signal component. The events are generated following the distributions explained in \refSec{parMeas-sig}. The mass parameters and the parameter $F_S$ are taken from the fit to the reference decay \BuToKstJpsi, the angular components are generated with values based on the study of the \BuToKstJpsi decay done for~Ref.\,\cite{ANA-LHCb-angular4}.
%
\begin{figure}[hbt!]
\centering
\includegraphics[width=0.32\textwidth]{FCNC/Toys/Ref/6/ctk_toyfit__6__JpsiFit_OnlySignal_1BIN_bin0_Run12_SimultaneousFit_HighBmass_AllPDFs.eps}
\includegraphics[width=0.32\textwidth]{FCNC/Toys/Ref/6/ctl_toyfit__6__JpsiFit_OnlySignal_1BIN_bin0_Run12_SimultaneousFit_HighBmass_AllPDFs.eps}
\includegraphics[width=0.32\textwidth]{FCNC/Toys/Ref/6/phi_toyfit__6__JpsiFit_OnlySignal_1BIN_bin0_Run12_SimultaneousFit_HighBmass_AllPDFs.eps}\\
\includegraphics[width=0.32\textwidth]{FCNC/Toys/Ref/6/m_toyfit__6__JpsiFit_OnlySignal_1BIN_bin0_Run12_SimultaneousFit_HighBmass_AllPDFs.eps}
\includegraphics[width=0.32\textwidth]{FCNC/Toys/Ref/6/mkpi_toyfit__6__JpsiFit_OnlySignal_1BIN_bin0_Run12_SimultaneousFit_HighBmass_AllPDFs.eps}
\captionof{figure}[Fit to a signal-only pseudoexperiment.]{Fit to 52\,000 pseudoevents with only the signal component. The projections in \angleDist, \Bu meson mass, and \Kstarp meson mass are shown. The black points represent the generated pseudoevents, the black line is the fitted model. The blue space represents the signal component. The green dashed line shows only the \pwave component, the orange dotted line represents the \swave components and the pink dot-and-dash line depicts the interference between the \pwave and the \swave. }\label{fig:toy-sigOnly-Ref}
\end{figure}
%
According to the mass fit to the reference channel, there are about 52\,000 signal candidates in the data sample. Therefore, for this validation 52\,000 pseudoevents are generated. The fit to this pseudoexperiment is given in \refFig{toy-sigOnly-Ref}. The \fcncfitter generates the desired distributions and also successfully fits them.
Next step is the validation of the folding technique. As the \fcncfitter was initially designed without the cut on \ctk in mind, this is a crucial check, especially for folding four (see \refEq{foldings}). The fit to the pseudoevents with the folding applied is shownin \refFig{toy-sigOnly-Ref-fold} in the appendix \ref{app:toy-valid}. The generated pseudoevents and the fit to the pseudoevents fulfill the expectations and agree with each other.
Similarly, a pseudoexperiment with only the background component is performed. The pseudoevents are generated according to the distributions given in \refSec{parMeas-bkg} and are shown with their corresponding fit in \refFig{toy-bkgOnly-Ref}. Following the mass fit of the \BuToKstJpsi decay, the expected background yield is 13\,000 thousand events. Hence, 13\,000 events are generated in each pseudoexperiment. The parameters used in the generation are taken from the fit of the background data sample described in \refSec{parMeas-bkg}. In the case of the background component, the validation of the folding technique is even more important than in the case of signal only due to the complicated shape of the \ctk background not previously implemented in the \fcncfitter. As shown in \refFig{toy-bkgOnly-Ref-fold} in the appendix, the background is successfully generated, folded and fitted. Once again, 500 pseuodoexperiments are created and the pull distributions of the free parameters are investigated. The pull distributions are normalized and centralized at zero.
\begin{figure}[hbt!]
\centering
\includegraphics[width=0.32\textwidth]{FCNC/Toys/Bkg/7/ctk_toyfit__7__JpsiFit_OnlyBackground_1BIN_bin0_Run12_SimultaneousFit_HighBmass_AllPDFs.eps}
\includegraphics[width=0.32\textwidth]{FCNC/Toys/Bkg/7/ctl_toyfit__7__JpsiFit_OnlyBackground_1BIN_bin0_Run12_SimultaneousFit_HighBmass_AllPDFs.eps}
\includegraphics[width=0.32\textwidth]{FCNC/Toys/Bkg/7/phi_toyfit__7__JpsiFit_OnlyBackground_1BIN_bin0_Run12_SimultaneousFit_HighBmass_AllPDFs.eps}\\
\includegraphics[width=0.32\textwidth]{FCNC/Toys/Bkg/7/m_toyfit__7__JpsiFit_OnlyBackground_1BIN_bin0_Run12_SimultaneousFit_HighBmass_AllPDFs.eps}
\includegraphics[width=0.32\textwidth]{FCNC/Toys/Bkg/7/mkpi_toyfit__7__JpsiFit_OnlyBackground_1BIN_bin0_Run12_SimultaneousFit_HighBmass_AllPDFs.eps}
\captionof{figure}[Fit to a background-only pseudoexperiment.]{Fit to 13\,000 pseudoevents with only the background component. The black points represent the generated pseudoevents, the black line represents the fitted model. The red area represents the background component. }\label{fig:toy-bkgOnly-Ref}
\end{figure}
\subsection[Large scale pseudoexperiments]{Large scale pseudoexperiments}\label{sec:toy-ref}
Once the pseudoexperiment generation, folding and fitting is validated, the next step is to validate the fit technique using both the signal and the background components. To avoid potential biases of the minimization introduced by low statistical power of the sample, a pseudoexperiment with 65\,000 thousand events is generated. The fraction of signal events, the mass parameters and the $F_S$ parameters are generated with values obtained by the mass fit to the reference channel \BuToKstJpsi described in \refSec{parMeas-FS}, the background parameters are generated with values obtained by the fit to the background sample, described in \refSec{parMeas-bkg}. The angular parameters are initialized based on the study of the \BuToKstJpsi decay done for~Ref.\,\cite{ANA-LHCb-angular4}. In order to study the pull distributions of these pseudoexperiments, five hundred pseudoexperiments are generated for each folding.
The pull distributions are shown in \refFig{toys-Ref-pull-P} and \refFig{toys-Ref-pull-S} and presented in \refTab{toys-Ref-pull}. There are no large biases present, the largest one is 6\% of the statistical uncertainty in the case of the $F_S$ parameter. However, it is clear that the errors are significantly overestimated: the width of the pull distribution is in the order of 20\% for all parameters. This is caused by only an approximate estimation of the uncertainty in the fitter. The standard statistical uncertainties from the maximum-likelihood fit are obtained using the \hesse determination, which derives a symmetric statistical uncertainty by inverting the second derivative of the likelihood function at the best-fit value~\cite{FIT-hesse}. For any real physical problem, the covariance matrix has to be positive-definite. However, in the presence of correlated free parameter(s), this matrix can fail the positive-definite requirement. In this case, \hesse forms a positive-definite approximation~\cite{FIT-TMINUIT}. Due to modeling of the complicated structure of the \ctk background by the Chebyshev polynomial of order five, discussed \refSec{parMeas-bkg}, the parameters describing the \ctk background are highly correlated and therefore only an approximation of the statistical uncertainty is available.
Moreover, the presence of larger acceptance correction weights prevents the simplification of the full covariance matrix expression to the inverse Hessian. Hence \hesse uncertainty determination can no longer guarantee providing correct coverage.
This can be improved by using \minos~\cite{FIT-TMINUIT}, which takes into account the parameter correlations and non-linearities. \minos varies each parameter, minimizing the fit function with respect to the other parameters. This procedure however requires a good previous error estimation and is computationally very intense: running several hundred pseudoexperiments would require significant amount of CPU time. As the shape of the pull distribution using the \hesse approximation is Gaussian, the widths of the pull distributions can be used to correct the statistical uncertainty in the fit of the data. \vspace{\baselineskip}
\begin{table}[hbt!] \footnotesize \centering
\begin{tabular}{|l|c|c|}\hline
\textbf{parameter} & \textbf{mean} & \textbf{width}\\
\hline\hline
$F_{L}$ & $-0.008 \pm 0.009$ & $\phantom{-}0.116 \pm 0.006$\\
$S_{3}$ & $\phantom{-}0.013 \pm 0.018$ & $\phantom{-}0.234 \pm 0.013$\\
$S_{4}$ & $-0.004 \pm 0.014$ & $\phantom{-}0.185 \pm 0.010$\\
$S_{5}$ & $\phantom{-}0.003 \pm 0.017$ & $\phantom{-}0.229 \pm 0.012$\\
$A_{FB}$ & $\phantom{-}0.008 \pm 0.014$ & $\phantom{-}0.178 \pm 0.010$\\
$S_{7}$ & $\phantom{-}0.010 \pm 0.017$ & $\phantom{-}0.221 \pm 0.012$\\
$S_{8}$ & $-0.009 \pm 0.016$ & $\phantom{-}0.214 \pm 0.011$\\
$S_{9}$ & $\phantom{-}0.038 \pm 0.017$ & $\phantom{-}0.218 \pm 0.012$\\\hline
$F_{S}$ & $-0.060 \pm 0.006$ & $\phantom{-}0.074 \pm 0.004$\\
$S_{S1}$ & $\phantom{-}0.053 \pm 0.009$ & $\phantom{-}0.121 \pm 0.007$\\
$S_{S2}$ & $-0.038 \pm 0.015$ & $\phantom{-}0.204 \pm 0.011$\\
$S_{S3}$ & $\phantom{-}0.022 \pm 0.016$ & $\phantom{-}0.211 \pm 0.011$\\
$S_{S4}$ & $\phantom{-}0.021 \pm 0.017$ & $\phantom{-}0.220 \pm 0.012$\\
$S_{S5}$ & $-0.071 \pm 0.015$ & $\phantom{-}0.199 \pm 0.011$\\
\hline
\end{tabular}
\captionof{table}[The angular moments pull distribution properties in reference-like pseudoexperiments.]{The widths and the means of the pull distributions of the angular moments in reference-like pseudoexperiments. 500 pseudoexperiments have been generated, mimicking the refrence \BuToKstJpsi decay. Each pseudoexperiment consists of 65\,000 pseudoevents.} \label{tab:toys-Ref-pull}
\end{table}
Another five sets of pseudoexpriments are created in order to validate the pull distribution of fits exploiting the folding technique. The corresponding pull distributions are presented in \refApp{toys-ref}. The effect of the complicated background structure in \ctk can be nicely seen in \refFig{app-toys-ref-fld4}: the odd orders of the polynomials cancel out, leaving only orders of two and four. The correlations between the coefficient is therefore weaker and the width of the pull distribution is close to one.
% An example of such generated and fitted sample is in \refFig{toy-Ref}.
%
%\begin{figure}[hbt!]
% \centering
% \includegraphics[width=0.32\textwidth]{FCNC/Toys/Ref/5/ctk_toyfit__5__JpsiFit_1BIN_bin0_Run12_SimultaneousFit_HighBmass_AllPDFs.eps}
% \includegraphics[width=0.32\textwidth]{FCNC/Toys/Ref/5/ctl_toyfit__5__JpsiFit_1BIN_bin0_Run12_SimultaneousFit_HighBmass_AllPDFs.eps}
% \includegraphics[width=0.32\textwidth]{FCNC/Toys/Ref/5/phi_toyfit__5__JpsiFit_1BIN_bin0_Run12_SimultaneousFit_HighBmass_AllPDFs.eps}\\
% \includegraphics[width=0.32\textwidth]{FCNC/Toys/Ref/5/m_log_toyfit__5__JpsiFit_1BIN_bin0_Run12_SimultaneousFit_HighBmass_AllPDFs.eps}
% \includegraphics[width=0.32\textwidth]{FCNC/Toys/Ref/5/m_toyfit__5__JpsiFit_1BIN_bin0_Run12_SimultaneousFit_HighBmass_AllPDFs.eps}
% \includegraphics[width=0.32\textwidth]{FCNC/Toys/Ref/5/mkpi_toyfit__5__JpsiFit_1BIN_bin0_Run12_SimultaneousFit_HighBmass_AllPDFs.eps}\\
% \captionof{figure}[Fit to reference channel like pseudoexperiments.]{Fit to 65\,000 pseudoexperiments generated to mimic the \BuToKstJpsi decay. The black points represent the generated pseudoexperiments, the black line represents the fitted model. The red area represents the background component, the blue space represents the signal component. The green dashed line shows only the \pwave component, the orange dotted line represents the \swave components and the pink dot-and-dash line depicts the interference between the \pwave and the \swave. }\label{fig:toy-Ref}
%
%\end{figure}
%%---------------
%\todo[inline]{Copy the toys plots to local folder!}
\input{Chapters/Toys/jobs/614}
\subsection[Realistic scale pseudoexperiments]{Realistic scale pseudoexperiments}\label{sec:toy-sig}
Lastly, the statistical properties of the fit are investigated by creating a set of 500 pseudoexperiments designed to resemble the \BuToKstmm decay.
The fraction of signal pseudoevents and the mass parameters are generated with values obtained by the mass fit to the \BuToKstmm channel described in \refSec{parMeas-FS}. The fraction of signal pseudoevents for each \qsq bin and each Run are listed in \refTab{toys-fsig}. The background parameters are generated with values obtained by the fit to the background sample, described in \refSec{parMeas-bkg}. The background is generated according to \refEq{bkg-angular}: the order of the Chebyshev polynomial in the \ctk dimension is five.
%
\begin{table}[hbt!] \footnotesize \centering
\begin{tabular}{l|c c c c c}
$f_{sig}$ &[0.25--4.00] &[4.00--8.00] &[11.00--12.50] &[15.00--18.00] &[1.10--6.00]\\ \hline
\runI & 0.33 & 0.35 & 0.51 & 0.53 & 0.25 \\
\runII & 0.36 & 0.28 & 0.43 & 0.61 & 0.32\\
\end{tabular}
\captionof{table}[Fraction of signal pseudoevents in the pseudoexperiments.]{Fraction of signal pseudoevents in the pseudoexperiments for each of the \qsq bins and for each Run. The fraction is based on the mass fit to the data sample of the \BuToKstmm decay candidates.} \label{tab:toys-fsig}
\end{table}
%
The \pwave angular parameters are initialized to the Standard Model values obtained using the \flavio package~\cite{ANA-flavio}. The $F_S$ is initialized to be 0.25, based on the \emph{maximal} value of $F_S$ observed in~Ref.\,\cite{ANA-LHCb-angular4}. The interference angular parameters are initialized according to the values measured in~Ref.\,\cite{ANA-LHCb-angular4}. For each pseudoexperiment, 871 pseudoevents are generated. This is motivated by the number of selected signal candidates in \refTab{sel-selection_yields_rare}. In the fit, the free parameters are the \pwave angular parameters, the angular background parameters, the fraction of signal pseudoevents to all pseudoevents, and the exponential parameter describing the \Bu mass background.
Due to the low number of available candidates in the sample, it is not possible to use the complicated model of the \ctk background shown in \refSec{parMeas-bkg}. Hence, a dedicated test has been done to show that a Chebyshev polynomial of order of two is sufficient to describe the \ctk background shape. In this test, despite the generated distribution following the Chebyshev polynomial order of five, only the parameters up to the order of two are left floating, the rest is set to zero. The pull distributions created from 500 pseudoevents are summarized in \refTab{toys-Sig-pull-643}. The simplified background description does not bring any significant bias to the floating angular parameters\footnote{This does not hold for samples with more statistical power: a dedicated test is done using pseudoexperiments with 10 times more pseudoevents than currently present in the data. The simplified background description using the Chebyshev polynomial in \ctk of order of two does not describe the background well enough anymore and the pulls of the angular parameters evince biases of up to 55\% of the standard statistical uncertainty.}. Furthermore, by lowering the order of the Chebyshev polynomial, the overestimation of the statistical uncertainty is reduced to $\sim90\%$.
\input{Chapters/Toys/jobs/pull_table_643_col}
The largest discrepancy of the mean of the pull distribution from zero is present for the parameter \FL, the bias reaches up to 24\% of the statistical uncertainty in the bin in $\qsq\in[11.0,12.5]\gevgev$. The folding technique is also tested and the results are shown in \refTab{toys-Sig-pull-644}. The bias in \FL can be reduced by using the folding technique, but it does not disappear. The parameter \FL is very sensitive to \ctk and therefore small bias caused by not properly describing the background shape is expected. Another more prominent bias in the full angular fit is present in the case of \AFB in $\qsq\in[4.0,8.0]\gevgev$. However, this bias disappears by using the folding 0, see \refTab{toys-Sig-pull-644}.
Also in the case of folded fit, the width of the pulls is $\approx0.9$. The only exception is $S_3$ and $S_8$ with folding four applied: in folding four, the \ctk is also folded. In this case, parabolic fit was not describing the folded background well enough, hence the fourth Chebyshev polynomial parameter is added. This leads to a correlation present in the background description, resulting in the smaller width of the pulls. Contrary to this, the width of the \FL pulls are up to 1.340. However, using this folding, the measurement of parameter \FL is also biased by up to 45\% of the statistical uncertainty. This indicates the large sensitivity of \FL on proper background description.
\input{Chapters/Toys/jobs/pull_table_644_fld_col}
%\input{Chapters/Toys/jobs/pull_table_645}
%\input{Chapters/Toys/jobs/pull_table_646}
%\input{Chapters/Toys/jobs/pull_table_647}
%\input{Chapters/Toys/jobs/pull_table_648}
It was discussed already in \refSec{parMeas-FS} that obtaining the $F_S$ parameter using the fit to the reconstructed \Kstarp mass is not possible due to the low statistical power of the sample. Hence, a dedicated test is done: the pseudoevents are generated with the $F_S = 0.25$ and with non-zero interference terms, but in the modeling of the pseudoevents both $F_S$ and the interference terms are fixed to zero. The background is treated the same way as in the previous case: in the pseudoevents the generated shape \ctk follows a Chebyshev polynomial of order of five, but in the fit only order of two is allowed. The mean and width of the pulls obtained by studying 500 pseudoexperiments is listed in \refTab{toys-Sig-pull-631}. The parameters $S_3$, $S_4$, $S_7$, $S_8$ and $S_9$ show none or little bias and their statistical uncertainty is estimated well. On the other hand, parameters \FL, $S_5$ and \AFB show rather large biases, especially in bins between and above the charmonium resonances. Looking at \refTab{toys-fsig}, those two bins also have the highest background fraction. In these bins, the parameters show a tendency to run into the given parameter limit despite their value at generation being very far from it.
\input{Chapters/Toys/jobs/pull_table_631_col}
The folding technique is applied also in this pseudoexperimental setup. The results are given in \refTab{toys-Sig-pull-632}. The folding technique significantly reduces the bias in the \AFB parameter, the parameter \FL also can be reduced using the folding technique. Unfortunately, the folding technique decreases the bias in the parameter $S_5$ only minimally and $S_5$ remains the only problematic parameter in the \qsq region below the \jpsi resonance. As the potential worst emerging bias from setting the \swave contribution to zero is up to 35\% of the statistical uncertainty, the \swave parameters can be omitted in the fit to data. It is worth noting that this assumes $F_S=0.25$ for each pseudoexperiment, which is the maximal observed value in any bin in~Ref.\,\cite{ANA-LHCb-angular4}: the actual value of $F_S$ can be very well below this value.
%\input{Chapters/Toys/jobs/pull_table_632_fld}
\input{Chapters/Toys/jobs/pull_table_632_fld_col}
%\input{Chapters/Toys/jobs/pull_table_633}
%\input{Chapters/Toys/jobs/pull_table_634}
%\input{Chapters/Toys/jobs/pull_table_635}
%\input{Chapters/Toys/jobs/pull_table_636}
Using the fits to the pseudoexperiments, an estimation of the statistical uncertainty in the fit to the data in the rare \BuToKstmm channel is done. The statistical uncertainty is estimated by fitting the statistical uncertainty distribution for the given parameter using 500 pseudoexperiments with a Gaussian function. The expected statistical uncertainty is the mean of this Gaussian distribution. In the case of \FL and \AFB, the error distribution deviates from a pure Gaussian distribution, as the parameters run into the limit, and a right tail is present. As the deviation is not large, the Gaussian distribution is used to describe also the statistical uncertainty for these two parameters. Together with the very narrow pull distributions due to the complicated shape of the background in \ctk in the sample with large statistical power (see \refTab{toys-Ref-pull}), using the Feldman-Cousins technique~\cite{FIT-Feldman} to ensure the correct coverage of the angular parameters will be necessary in the future measurements of this channel.\vspace{\baselineskip}
The expected statistical uncertainties of the angular parameters \allAng are given in \refTab{toys-error-estimate}. In order to put these absolute uncertainties into perspective, the Standard Model value for each parameter is given together with the expected uncertainty. The Standard Model values are obtained using the \flavio package. The standard statistical uncertainty is obtained from fits to pseudoexperiments using the folding technique. For the parameter \FL folding 1 is used, for the parameter $S_3$ folding 3 is used. This choice of the folding is made based on the results in \refTab{toys-Sig-pull-632}: these foldings have the smallest bias. For the rest of the parameters, the folding sensitive to the parameter is used. The pull distributions are shown in \refApp{toys-ref}. Comparing the uncertainty to the \BuToKstKspimm measurement of the angular moments~\cite{ANA-LHCb-angular4}, the only two comparable \qsq intervals are $\qsq\in[1.1,6.0]$ and $\qsq\in[15.0,18.0]$. In the interval below the \jpsi resonance, the expected statistical uncertainties in this work are up to two times larger, in the high \qsq interval the uncertainty is up to three times larger compared to the one in the \BuToKstKspimm measurement. Note that the statistical uncertainty in~Ref.\,\cite{ANA-LHCb-angular4} is estimated with the Feldman-Cousins technique, which returned larger statistical uncertainty values than HESSE.
Due to the large uncertainty of the parameter \FL and the potential bias of this parameter, the fit to the $\Pprime{i}$ basis, defined in \refEq{P'_definition}, has not been performed.
\begin{table}[hbt!]\small \centering
\begin{tabular}{l|c c c c c}
\textbf{parameter} &[0.25--4.00] &[4.00--8.00] &[11.00--12.50] &[15.00--18.00] &[1.10--6.00]\\
\hline
$\FL$ & $\phantom{-}0.67 \pm 0.15$ & $\phantom{-}0.66 \pm 0.12$ & $\phantom{-}0.43 \pm 0.16$ & $\phantom{-}0.34 \pm 0.14$ & $\phantom{-}0.75 \pm 0.13$\\
$S_3$ & $\phantom{-}0.00 \pm 0.15$ & $-0.03 \pm 0.16$ & $-0.09 \pm 0.23$ & $-0.19 \pm 0.19$ & $-0.01 \pm 0.15$\\
$S_4$ & $-0.03 \pm 0.21$ & $-0.24 \pm 0.19$ & $-0.28 \pm 0.25$ & $-0.30 \pm 0.21$ & $-0.15 \pm 0.19$\\
$S_5$ & $\phantom{-}0.04 \pm 0.19$ & $-0.37 \pm 0.19$ & $-0.41 \pm 0.26$ & $-0.30 \pm 0.19$ & $-0.19 \pm 0.19$\\
$\AFB$ & $-0.09 \pm 0.14$ & $\phantom{-}0.19 \pm 0.12$ & $\phantom{-}0.39 \pm 0.18$ & $\phantom{-}0.39 \pm 0.16$ & $\phantom{-}0.01 \pm 0.12$\\
$S_7$ & $-0.02 \pm 0.19$ & $-0.01 \pm 0.19$ & $-0.00 \pm 0.26$ & $-0.00 \pm 0.22$ & $-0.02 \pm 0.19$\\
$S_8$ & $-0.01 \pm 0.23$ & $-0.00 \pm 0.22$ & $\phantom{-}0.00 \pm 0.29$ & $\phantom{-}0.00 \pm 0.24$ & $-0.01 \pm 0.21$\\
$S_9$ & $-0.00 \pm 0.15$ & $-0.00 \pm 0.16$ & $\phantom{-}0.00 \pm 0.23$ & $\phantom{-}0.00 \pm 0.19$ & $-0.00 \pm 0.15$\\
\end{tabular}\captionof{table}[Expected standard statistical uncertainty in the fit to data.]{The Standard Model values of the angular parameters with their expected standard statistical uncertainty from the fit to the data. The Standard Model values are obtained using the \flavio package~\cite{ANA-flavio}. The standard statistical uncertainty is obtained from fits to pseudoexperiments using the folding technique. For the parameter \FL folding 1 is used, for the parameter $S_3$ folding 3 is used. For the rest of the parameters, the folding sensitive to the parameter is used. }\label{tab:toys-error-estimate}
\end{table}
%No folding
%$Fl$ & $0.67 \pm 0.15$ & $0.66 \pm 0.12$ & $0.43 \pm 0.21$ & $0.34 \pm 0.17$ & $0.75 \pm 0.14$\\
%$S3$ & $0.00 \pm 0.16$ & $-0.03 \pm 0.17$ & $-0.09 \pm 0.26$ & $-0.19 \pm 0.21$ & $-0.01 \pm 0.16$\\
%$S4$ & $-0.03 \pm 0.21$ & $-0.24 \pm 0.20$ & $-0.28 \pm 0.30$ & $-0.30 \pm 0.25$ & $-0.15 \pm 0.20$\\
%$S5$ & $0.04 \pm 0.19$ & $-0.37 \pm 0.20$ & $-0.41 \pm 0.29$ & $-0.30 \pm 0.23$ & $-0.19 \pm 0.19$\\
%$Afb$ & $-0.09 \pm 0.15$ & $0.19 \pm 0.12$ & $0.39 \pm 0.20$ & $0.39 \pm 0.17$ & $0.01 \pm 0.13$\\
%$S7$ & $-0.02 \pm 0.19$ & $-0.01 \pm 0.20$ & $-0.00 \pm 0.29$ & $-0.00 \pm 0.24$ & $-0.02 \pm 0.19$\\
%$S8$ & $-0.01 \pm 0.22$ & $-0.00 \pm 0.20$ & $0.00 \pm 0.30$ & $0.00 \pm 0.25$ & $-0.01 \pm 0.20$\\
%$S9$ & $-0.00 \pm 0.16$ & $-0.00 \pm 0.17$ & $0.00 \pm 0.26$ & $0.00 \pm 0.21$ & $-0.00 \pm 0.16$\\
%%%Folding 0
%$Fl$ & $0.67 \pm 0.15$ & $0.66 \pm 0.12$ & $0.43 \pm 0.19$ & $0.34 \pm 0.16$ & $0.75 \pm 0.13$\\
%$S3$ & $0.00 \pm 0.15$ & $-0.03 \pm 0.16$ & $-0.09 \pm 0.23$ & $-0.19 \pm 0.19$ & $-0.01 \pm 0.15$\\
%$Afb$ & $-0.09 \pm 0.14$ & $0.19 \pm 0.12$ & $0.39 \pm 0.18$ & $0.39 \pm 0.16$ & $0.01 \pm 0.12$\\
%$S9$ & $-0.00 \pm 0.15$ & $-0.00 \pm 0.16$ & $0.00 \pm 0.23$ & $0.00 \pm 0.19$ & $-0.00 \pm 0.15$\\
%
%
%
%%%Folding 1
%$Fl$ & $0.67 \pm 0.15$ & $0.66 \pm 0.12$ & $0.43 \pm 0.16$ & $0.34 \pm 0.14$ & $0.75 \pm 0.13$\\
%$S3$ & $0.00 \pm 0.15$ & $-0.03 \pm 0.16$ & $-0.09 \pm 0.23$ & $-0.19 \pm 0.19$ & $-0.01 \pm 0.15$\\
%$S4$ & $-0.03 \pm 0.21$ & $-0.24 \pm 0.19$ & $-0.28 \pm 0.25$ & $-0.30 \pm 0.21$ & $-0.15 \pm 0.19$\\
%
%
%%%Folding 2
%$Fl$ & $0.67 \pm 0.15$ & $0.66 \pm 0.12$ & $0.43 \pm 0.17$ & $0.34 \pm 0.14$ & $0.75 \pm 0.13$\\
%$S3$ & $0.00 \pm 0.16$ & $-0.03 \pm 0.16$ & $-0.09 \pm 0.24$ & $-0.19 \pm 0.20$ & $-0.01 \pm 0.15$\\
%$S5$ & $0.04 \pm 0.19$ & $-0.37 \pm 0.19$ & $-0.41 \pm 0.26$ & $-0.30 \pm 0.19$ & $-0.19 \pm 0.19$\\
%
%
%%%Folding 3
%$Fl$ & $0.67 \pm 0.14$ & $0.66 \pm 0.12$ & $0.43 \pm 0.17$ & $0.34 \pm 0.14$ & $0.75 \pm 0.13$\\
%$S3$ & $0.00 \pm 0.15$ & $-0.03 \pm 0.16$ & $-0.09 \pm 0.23$ & $-0.19 \pm 0.19$ & $-0.01 \pm 0.15$\\
%$S7$ & $-0.02 \pm 0.19$ & $-0.01 \pm 0.19$ & $-0.00 \pm 0.26$ & $-0.00 \pm 0.22$ & $-0.02 \pm 0.19$\\
%
%
%%%Folding 4
%$Fl$ & $0.67 \pm 0.15$ & $0.66 \pm 0.12$ & $0.43 \pm 0.17$ & $0.34 \pm 0.15$ & $0.75 \pm 0.14$\\
%$S3$ & $0.00 \pm 0.14$ & $-0.03 \pm 0.15$ & $-0.09 \pm 0.23$ & $-0.19 \pm 0.19$ & $-0.01 \pm 0.13$\\
%$S8$ & $-0.01 \pm 0.23$ & $-0.00 \pm 0.22$ & $0.00 \pm 0.29$ & $0.00 \pm 0.24$ & $-0.01 \pm 0.21$\\
The statistical uncertainty estimation can be used to obtain the expected sensitivity of the measurement of the real value of the vector coupling Re(\C9 ).
A likelihood scan as a function of Re(\C9 ) is performed. This is done using the \flavio package and shown in \refFig{toys-C9}. A pseudomeasurement is generated using the predictions for a New Physics model with Re(\C9 )=-2 as central value,
which is approximately the value preferred by the fit in~Ref.\,\cite{ANA-LHCb-angular4}.
The \qsq bins used are in the range $[1.1,6.0]$\gevgev and $[15.0,18.0]$\gevgev, as the predictions close to \jpsi are affected by the \ccbar loops (see \refSec{SM_bsll}). One unit on the $y$-axis $[-2\Delta\log\mathrm{L}]$ corresponds to one standard deviations squared. Assuming the value of Re(\C9 )=-2, the expected deviation from the Standard Model value is $\approx2.4$ standard deviations. It is important to stress this estimation is done only using the expected \emph{statistical} uncertainty on the parameters. Performing systematic studies especially related to the background shape can increase this uncertainty. Moreover, performing a Feldman-Cousin scan is necessary as the pull distributions show that the uncertainty estimation is volatile. The uncertainties obtained from the Feldman-Cousins scan can be larger than the ones presented here.
\begin{figure}[hbt!] \vspace{-10pt}
\centering
\includegraphics[width=0.75\textwidth]{./FCNC/C9_sensitivity.pdf}
\captionof{figure}[Likelihood scan of the shift of Re(\C9 ) assuming NP value.]{Likelihood scan of the shift of Re(\C9 ) from its SM value. The expected likelihood scan is represented by the dashed green line. The prediction is compared to the measured likelihood scan in the decay \BuToKstmmKSFull, given by the solid blue line. The preditions are taken from the \flavio package~\cite{ANA-flavio}. }\label{fig:toys-C9}
\end{figure}
\clearpage

85
Chapters/TrackEff/TrackCalib.tex

@ -0,0 +1,85 @@
\subsection{TrackCalib package}\label{sec:trackEff-TrackCalib}
The ultimate goal of the track reconstruction efficiency measurement is to measure the ratio $R$ defined in \refEq{trackEff-R}. The ratio obtained directly from the trigger selection can be used by a wide range of \lhcb analyses to correct the track reconstruction efficiencies obtained from the simulation. However, many analyses require a dedicated approach: tighter selection than the one presented here, different detector occupancy measure to weight their simulation sample, special binning in momentum $p$ and pseudorapidity $\eta$ or estimating the track reconstruction efficiency or the ratio $R$ in some other variable. In order to simplify the dedicated measurement of the track reconstruction efficiency tailored to the needs of any analysis, a dedicated tool \TrackCalib has been created and made available to the collaboration in 2017. This \python tool allows to evaluate the track reconstruction efficiency using command line options. In \runI, such customization has not been possible. However, exploiting the stripping lines described in \refSec{trackEff-strip}, these options have been recently extended also to the \runI sample. The tool documentation is available online at~\cite{Twiki-TrackCalib}.
The \TrackCalib package works in three main steps: data preparation, the fit of the data and the plotting of the efficiencies. It is possible to run each step or run all three steps together. The user can decide
\begin{itemize}\setlength{\parskip}{0\baselineskip}
\item what method is used
\item which simulation version is used
\item whether data, simulation or both are used
\item whether only one magnet polarity or both are used
\end{itemize}
for the track reconstruction efficiencies or $R$ evaluation. Moreover, the user can decide in dependence on what variables should the efficiency or the ratio $R$ be evaluated. In each of the three steps, additional options can be set.\vspace{\baselineskip}
\subsubsection{Data preparation}\label{sec:trackEff-TrackCalib-prepare}
In the first step of \TrackCalib tool, the dataset used for the tracking efficiency calculation is selected. The full datasample obtained from the trigger lines is rather large (especially in real data) not just due to the large amount of saved events, but also because of many variables being saved. Therefore, a smaller datafile consisting only of relevant variables is created. Additional selection criteria, typically ghost track probability cut, can be set by the user. This criteria can be applied only on the \Probe track, \Tag track or both. The default requirements used by the \TrackCalib are listed in \refTab{trEff-calib}.
\begin{table}
\begin{center}
\begin{tabular}[htbp]{c|r|r|r}
{Variable} &{\velo method} &{\Tstation method} &{Long method} \\ \hline \hline
\multicolumn{4}{c}{\emph{Tag} selection criteria} \\ \hline
\dllmupi &$--$ &$--$ &$--$ \\
\ptot &$>5\gevc$ &$>7\gevc$ &$>10\gevc$ \\
\pt &$>0.7\gevc$ &$>0.5\gevc$ &$>1.3\gevc$ \\
{\rm track}\;\chisqndf &$<5$ &$<5$ &$<2$ \\
IP &$--$ &$>0.2\mm$ &$--$ \\ \hline
\multicolumn{4}{c}{\emph{Probe} selection criteria} \\ \hline
\ptot &$>5\gevc$ &$>5\gevc$ &$>5\gevc$ \\
\pt &$>0.7\gevc$ &$>0.1\gevc$ &$>0.1\gevc$ \\
{\rm track}\;\chisqndf &$--$ &$--$ &$-$ \\ \hline
\multicolumn{4}{c}{\jpsi candidates selection criteria} \\ \hline
$|m_{\mup\mun}-m_{\jpsi}|$ &$<200\mevcc$ &$<500\mevcc$ &$<500\mevcc$ \\
\pt &$--$ &$--$ &$>0\gevc$ \\
{\rm vertex}\;\chisq &$<5$ &$<5$ &$<5$ \\
Track DOCA &$--$ &$--$ &$--$ \\
IP &$-$ &$--$ &$<0.8\mm$ \\
\end{tabular}
\captionof{table}[\TrackCalib selection criteria.]{Selection cuts applied applied to the \Tag track, \Probe track and the reconstructed \jpsi candidate by the default \TrackCalib selection.}
\label{tab:trEff-calib}
\end{center}
\end{table}
Moreover, the required overlap fraction needed to associate the tracks can be modified. The variable used for the weighting of the simulation sample can be chosen. Lastly, maximum number of event candidates per method and charge settings\footnote{Simirarly to what is done in the trigger selection, the \TrackCalib tool either uses a \mup track as a \Probe and \mun as a \Tag or the opposite charge configuration.} can be used. The selected dataset is locally stored in order to be readily available for the next two steps.\vspace{\baselineskip}
\subsubsection{Fit execution}\label{sec:trackEff-TrackCalib-fit}
In this part of the \TrackCalib tool, the previously created dataset is divided based on the selected variables. The user can decide what binning in the desired variables is used\footnote{This can be done both by requiring a certain number of bins with the same width or by specifying the bin edges.}. In the case of low statistics sample, instead of performing a simultaneous fit to the matched and failed \jpsi candidates, as explained in \refSec{trackMeas-tag-and-probe}, a fit to the matched and \emph{all} \jpsi candidates is performed. Due to very high track reconstruction efficiency, the failed sample has very little signal component (see \refFig{trEff-mass}): by avoiding the fit to the failed sample, the fit stability improves. To further improve the fit stability, the Crystal Ball function used to describe the signal component can be replaced by a sum of two Gaussian distributions. Lastly, the user can also execute an ubinned fit to the \jpsi mass.
For each method and each variable bin, a dedicated file containing the calculated efficiency as well as the fitted distribution is saved. Moreover, an output file is created, where the fit status and the fitted parameter values are saved. For user's convenience, another warning file is created, where only failed fit statuses and variables with large or zero uncertainty are saved. This allows for quick recongnition of failed fits.
\subsubsection{Plotting}\label{sec:trackEff-TrackCalib-plot}
Last part of the \TrackCalib tool is the plotting of the track reconstruction efficiency dependency plots and the creation of correction tables: the \root files (the correction tables), where the ratio $R$ is saved in bins of the desired variables (the default is pseudorapidity and momentum). In this step, the three methods are also combined into the Combined and Final methods.
\subsubsection{Simulation samples}\label{sec:trackEff-centralProd}
In order to apply the tag-and-probe method on the simulated sample, several sets of the decay \decay{\Bu}{\jpsi(\to\mumu)X}, where $X$ is any particle, are created. These simulated samples are then treated the same way as the record data. In order to correct the difference between the simulation and the data in the occupancy, weights based on the number of hits in the \spd are applied.
The \lhcb software is constantly evolving and improving. Therefore, it is important to evaluate the track reconstruction efficiency correctly for each version of the software. The available simulation samples used in \TrackCalib for each data taking year are listed in \refTab{trEff-sim}.
\begin{table}
\begin{center}
\begin{tabular}[htbp]{l|l}
{year} &{Simulation versions} \\ \hline
2015\,(EM) & Sim08h, Sim09b \\
2015 & Sim09a, Sim09b \\
2016 & Sim09a, Sim09b, Sim09d \\
2016\,(strip) & Sim09h \\
2017 & Sim09h \\
2017\,(strip) & Sim09h \\
2018 & Sim09h \\
2018\,(strip) & Sim09h \\
\end{tabular}
\captionof{table}[Available simulation samples for the track reconstruction efficiency measurements.]{Available simulation samples for the track reconstruction efficiency measurements. In the first months of data taking in 2015, the settings of the machine were different to the rest of the year. This sample is denoted \emph{early measurements} (EM). Samples produced using the stripping selection instead of the trigger selection are denoted as (strip). }
\label{tab:trEff-sim}
\end{center}
\end{table}

187
Chapters/TrackEff/measurement.tex

@ -0,0 +1,187 @@
%\subsection{Track reconstruction at \lhcb}\label{sec:trackReco}
%In order to understand the procedure of estimating the track reconstruction efficiencies, iti is important to first understand the process of reconstructing tracks at the \lhcb detector. Tracks are reconstructed from \emph{charged} particles transversing the detector material. As they travel through the detector material and interact with it, they deposit a fraction of their energy in the detector. This energy is measured and referred to as a \emph{hit}. Hits represent the spatial and timing information of the trajectory of the particle. Exploiting this information, dedicated algorithms can reconstruct the trajectory of the particle: a \emph{track}.
%
%Any hit in the detector located at a given $z$-position is described by its time and position in the x-y plane and by the particle charge $q$ relative to its momentum $p$~\cite{LHCb-Performance}:
%%
%\begin{equation}\label{eq:track_vec}
% \vec{x}\left(z\right) = \begin{pmatrix} x \\ y \\ t_x \\ t_y \\ q/p \end{pmatrix}\,,
%\end{equation}
%%
%and its the covariance matrix in order to reflect the correlation of uncertainties.
%
%Each hit position can be extrapolated to the next detector layer using a \emph{track-propagation function} $f$ as follows:
%%
%\begin{equation}\label{eq:track_propagation}
%\vec{x}\left(z_1\right) = f_{z_1\rightarrow z_2}\vec{x}\left(z_2\right)\,.
%\end{equation}
%
%In the case of no magnetic field, $f$ takes the form of a simple propagation matrix
%%
%\begin{equation}\label{eq:track_propagation}
%f_{z_1\rightarrow z_2} =\begin{pmatrix}
%1 & 0 & z_2-z_1 & 0 & 0 \\
%0 & 1 & 0 & z_2-z_1 & 0 \\
%0 & 0 & 1 & 0 & 0 \\
%0 & 0 & 0 & 1 & 0 \\
%0 & 0 & 0 & 0 & 1
%\end{pmatrix}\,.
%\end{equation}
%
%However, the particles are passing a strong magnetic field and the function $f$ becomes not-trivial: it depends on $z_1$ and $z_2$ as the magnetic field is also present no just in the dipole magnet area, but also near the \ttracker and \Tstations. Moreover, multiple-scattering effects need to be taken into account by enlarging the uncertainties in the covariance matrix of each hit.
%
%\subsubsection{Kalman filters}\label{sec:trackReco-kalman}
%
%As the propagation matrix is non-linear, even with an exact propagation matrix available, calculating the particle's trajectory would require significant computational resources. Therefore, \emph{Kalman filter}\footnote{Also known as linear quadratic estimation.} is used~\cite{Kalman}. This technique is widely used in high-energy physics experiments as it eliminates the computation of matrix inversion, speeding up the computation immensely, while accounting for multiple scattering and other possible energetic losses.
%
%The Kalman filter is an iterative algorithm. It exploits the fact we know the propagation matrix: measured point $x_{k}$ and its error covariance matrix $P_k$ is propagated to $\hat{x}_{k+1}$ and $P_{k+1}$. If the particle interacts with the material, the search window is enlarged accordingly. The point at $x_{k+1}$ is also measured. This information is used to update $\hat{x}_{k}$ using \emph{filter equations}. These equations remove extreme outliers, speeding up the pattern recognition process. This is repeated until all hit information from all detectors is used. Then, the process is reversed: starting from the last updated point, the previous measurements are updated. This ensures full exploitation of the available information in a timely manner. Kalman filter then provides an estimate of the final momentum and also the $\chisq$ for given track, describing the track fit quality.
%
%%http://bilgin.esme.org/BitsAndBytes/KalmanFilterforDummies
\subsection{Track reconstruction algorithms in \lhcb}\label{sec:trackReco-trackTypes}
As shown in \refSec{det_tracking_vertexing}, the \lhcb detector is designed with large gaps between the tracking detectors. The particles can escape or be created at any point in the active detector area, leaving hits only in some of the detectors. Hence there are many possible track types to be reconstructed. A sketch of the most used types at the \lhcb experiment is given in \refFig{trEff-track_types}. The tracks are reconstructed using algorithms that correspond to the different track types. These algorithms are independent and therefore a particle crossing the detector is typically reconstructed by several of these algorithms: for an example, a particle bent out of the \lhcb acceptance by the magnet can be reconstructed by the \emph{upstream} tracking algorithm as well as the \velo tracking algorithm.
\begin{figure}[htbp]
\begin{center}
\subfloat[]{\includegraphics[width=0.5\textwidth]{TrackEff/trackTypesRunIAndII_Blue.pdf}}\\
\captionof{figure}[The most common track types used at the \lhcb experiment.]{The most common track types used at the \lhcb experiment. The most valuable track type to the \lhcb physics analyses are \emph{long tracks} as they have the best momentum resolution. Long tracks have hits in all \lhcb tracking detectors.}
\label{fig:trEff-track_types}
\end{center}
\end{figure}
\begin{itemize}[leftmargin=*]
\setlength{\itemindent}{0em}
\item \textbf{Long track:} A track with origin in the \velo that also transverses the \Tstations. Hit information from the \ttracker \emph{can} be added, but it is not required. This type of track is the most common track type in \lhcb studies as it has the best momentum resolution.
There are two independent algorithms used to obtain the long track: \emph{forward tracking} and \emph{matching}.
The \emph{forward tracking} algorithm propagates \velo track's trajectory to the T stations, taking into account the bending of the trajectory by the magnet. In the \emph{matching} algorithm standalone T tracks are created and combined with \velo tracks, also taking the bending into account. The two algorithm results can be compared and a combined set of best long tracks is obtained. \ttracker hits are added only after finding a track candidate from the \velo and \Tstation hits.
\item \textbf{Velo track:} A track that only consists of hits in the \velo detector: they are independent of the forward tracking. These tracks are used for primary vertices reconstruction.
\item \textbf{Upstream track:} A track reconstructed using \velo and \ttracker hits. As these tracks have only poor momentum information, they are rarely used in analyses. If no other algorithm reconstructs this track, it corresponds to a particle with low enough momentum so it is bent out of the \lhcb acceptance by the magnet.
\item \textbf{Downstream track:} A track reconstructed using \ttracker and T station hits. As there is no \velo information, the momentum resolution is worse compared to long tracks. Long-lived particle decay products leave downstream tracks in the detector.
\item \textbf{T track:} A track reconstructed only using hits in \Tstations. Similarly to \velo tracks, there is no momentum information. If no other algorithm reconstructs this track, it typically represents a very long-lived particle decay product.\\
\end{itemize}
The algorithms searching for hits from different detectors and combining them can sometimes combine hits that do not originate from the same particle. Such tracks, called \emph{ghost} tracks, contribute to the background. Most of the ghost tracks can be rejected by requiring a high track fit quality \chisq. However, this can also lead to rejection of real particle tracks, modifying the track reconstruction efficiency. To resolve this issue, a dedicated neural networks is trained in the \lhcb reconstruction software. This neural network is designed to remove most ghost tracks while minimizing the impact on real tracks. It returns a value between 0 and 1, a "ghost probability", which is typically required to be below 0.4, corresponding to removal of more than 70\% of reconstructed ghost tracks with hardly any loss in efficiency.
\subsection{Determination of the track reconstruction efficiency}\label{sec:trackMeas}
In most analyses carried out by the \lhcb collaboration, the track reconstruction efficiency is estimated using a Monte Carlo simulation. While the simulation is a very good representation of the real data, it is not perfect. The main discrepancy between the real data and simulation is the detector occupancy distributions (see \refSec{sel-SimulationCorrection}), but there can be other imperfections in the kinematic variables as well. The track reconstruction efficiency depends mainly on the kinematic properties of the track (momentum, direction, and position in the detector) as well as on the occupancy of the detector. While the discrepancies between the data and the simulation in the kinematic and occupancy quantities can be corrected for, the track reconstruction efficiency also depends on the placement and the amount of dead channels, inactive materials and others. These effects are very hard to simulate or correct for in the Monte Carlo simulation. Hence, the track reconstruction efficiency obtained using purely a simulation sample is a good approximation, but it does not meet the required precision. Measurement using a data-driven technique is necessary.
The track reconstruction efficiency measurement is executed using a data-driven tag-and-probe method exploiting the decay of \decay{\jpsi}{\mumu}. Tag-and-probe technique is widely used in high energy physics~\cite{tagAndProbe_CMS_1,tagAndProbe_CMS_2,tagAndProbe_ATLAS} to measure the efficiency of various processes, typically reconstruction or selection. The method exploits two-product decays of a well-know resonance. One of the decay products, the \Tag, is a well identified track, while the other, the \Probe, is an unbiased track. The \Probe track then either passes or fails the reconstruction or selection criteria for which the efficiency is to be measured. The ratio of track passing this criteria to all reconstructed unbiased tracks is the reconstruction or selection efficiency $\varepsilon$:
\begin{equation}\label{eq:tag-and-probe}
\varepsilon = \frac{N_{\text{passing criteria}}}{N_\text{{all unbiased}}}\,.
\end{equation}
The method used at the \lhcb experiment was developed during \runI and further advanced during \runII~\cite{TrackEffRun1}. It exploits the decay of \jpsi mesons to a muon pair. Recently, a new technique exploiting the decay of \decay{\jpsi}{\en\ep} has been developed~\cite{TrackEffElectrons}. However, the focus in this work is on the \jpsi\to\mup\mun decay only. In the following section, the tag-and-probe method used to measure the track reconstruction efficiency in \lhcb is explained in detail.
\subsubsection[Tag-and-probe technique using \texorpdfstring{\JpsiTomm}{Jpsi to mu mu} decays]{Tag-and-probe technique using \texorpdfstring{\JpsiTommBF}{Jpsi to mu mu} decays}\label{sec:trackMeas-tag-and-probe}
The muon decay of \jpsi meson is used for track reconstruction efficiency determination muons transverse the whole \lhcb detector region and leave hits also in the muon stations. Moreover, they do not interact hadronically.
The \Tag track is a muon track reconstructed using standard long track reconstruction algorithm and passing a tight selection to make sure it is a decay product of a \jpsi resonance, such as a momentum requirement. The \Probe track is reconstructed using one of three dedicated algorithms designed in such a way that each probes one (or two) of the three tracking detectors of the \lhcb: the \velo, the \Tstations and the \ttracker. These algorithms are very loose in order to minimize the potential bias imposed by any selection on the final result.
The criteria to determine whether the \Probe track is efficiently reconstructed or not is the existence of a long track that can be associated with the \Probe track. The matching is performed by checking the amount of common hits between the \Probe and the long track in the tracking detectors. The \emph{overlap fraction} is used as the association criterion. The overlap fraction is the the number of common hits $N_{\rm common}$ divided by the minimum number of hits in the subdetector required by the long track reconstruction algorithm $N_{\rm required}$:
\begin{equation}\label{eq:overlap}
{\rm overlap ~fraction} = \frac{N_{\rm common}}{N_{\rm required}}\,,
\end{equation}
%
Using the overlap fraction as the association criterion, the \refEq{tag-and-probe} then becomes
\begin{equation}\label{eq:tag-and-probe-real}
\varepsilon_{\rm tr} = \frac{N_{\rm assoc}(\jpsi\to\mup\mun)}{N_{\rm all}(\jpsi\to\mup\mun)}\,,
\end{equation}
%In the measurement of the efficiency combinatorial background is also present, therefore only \Probe tracks corresponding to the \JpsiTomm decay are considered.
where $N_{\rm assoc}$ denotes the number of \Probe tracks passing the association criteria and $N_{\rm all}$ denotes all unbiased \Probe tracks.
Depending on the algorithm used to reconstruct the \Probe track, there are three methods to obtain the track reconstruction efficiency. The methods, illustrated in \refFig{trEff-probe_reco}, are:
\begin{itemize}[leftmargin=*]
\setlength{\itemindent}{0em}
\item \textbf{Long method}~~The \Probe track is reconstructed at first using muon station hits to create a standalone muon track. This track is then matched to the hits in the \ttracker. Note that the long track reconstruction algorithm described in \refSec{trackReco-trackTypes} does require hits neither in the muon stations nor the \ttracker. Hence, this method directly probes the track reconstruction efficiency of long tracks. The \Probe track is considered efficient, when the overlap fraction is at least 70\% for the muon stations and 60\% for the \ttracker. %The \ttracker hits are added by searching for them along the trajectory after the long track reconstruction.
\item \textbf{\velo method}~~The \Probe track is reconstructed as a downstream track with added muon station hits in order to identify the particle as a muon. This method probes the \velo track reconstruction efficiency. The \Probe track is considered efficient when the overlap fraction in the \Tstations is at least 50\%.
\item \textbf{\Tstation method}~~The \Probe track is reconstructed by a dedicated algorithm from hits in the \velo and the muon stations. This method probes the \Tstation track reconstruction efficiency. The \Probe track is considered efficient when there are at least two common hits in the muon stations and the same \velo segment as the \Probe signature.
\end{itemize}
\begin{figure}[htbp]\vspace{-1cm}
\begin{center}
\subfloat[]{\includegraphics[width=10cm]{TrackEff/trackEffMuonTT_tag}}\\
\subfloat[]{\includegraphics[width=10cm]{TrackEff/trackEffDownstream_tag}}\\
\subfloat[]{\includegraphics[width=10cm]{TrackEff/trackEffVeloMuon_tag}}\\
\captionof{figure}[Illustration of the probe track reconstruction algorithms.]{Illustration of the probe track reconstruction algorithms: (a) Long method, (b) \velo method, and (c) \Tstation method~\cite{TrackEffRun1}.
Red dots indicate the hit information used by each algorithm to select the \Probe track.
Solid blue line represent the trajectory of the \Tag (upper line) and \Probe (lower line) tracks.
The blue dotted line represents the sub-detector region which is probed by the respective method.
The dashed vertical line shows the bending plane of the magnet.
}
\label{fig:trEff-probe_reco}
\end{center}
\end{figure}
Different methods probe different track reconstruction algorithms. When combining the \velo and \Tstation efficiency, a careful reader may notice this is equivalent to probing the long track reconstruction efficiency. This is further exploited by two methods:
\begin{itemize}[leftmargin=*]
\setlength{\itemindent}{0em}
\item \textbf{Combined method}~~This method represents the combination of \velo and \Tstation efficiencies. The efficiencies from these two methods are uncorrelated, with the exception of track kinematics and detector occupancy. The combined method efficiency is then simply
\begin{equation}\label{eq:TrEff-combined}
\varepsilon_{\rm Comb} = \varepsilon_{\rm \velo} \times \varepsilon_{\rm T station} \,.
\end{equation}
\item \textbf{Final method}~~The Final method is the weighted average of Long and Combined methods. The weights are inverse squares of the uncertainty for each method, $w_{\rm Comb}=\sfrac{1}{\sigma^2_{\rm Comb}}$, $w_{\rm Long}=\sfrac{1}{\sigma^2_{\rm Long}}$. The weighted average is then
\begin{equation}\label{eq:TrEff-Final}
\varepsilon_{\rm Final} = \frac{w_{\rm Comb} \varepsilon_{\rm Comb} + w_{\rm Long} \varepsilon_{\rm Long}}{w_{\rm Comb}+w_{\rm Long}} \,.
\end{equation}
The statistical uncertainty of the average is
\begin{equation}\label{eq:TrEff-Final-error}
\sigma_{\rm Final} = \frac{1}{\sqrt{w_{\rm Comb}+ w_{\rm Long}}}\,.
\end{equation}
This method represents the most precise track reconstruction efficiency at \lhcb, as it exploits the information from all three available methods.
\end{itemize}
It is worth noting here that using muon tracks as the \Tag and \Probe does not take into account the material absorption effects on the track reconstruction efficiency. The track reconstruction efficiency presented here represents the probability of a particle crossing the full active detector area to be reconstructed. However, the hadronic interactions can be taken into account in the form of systematic uncertainty. The uncertainty is equal the fraction of hadrons that cannot be reconstructed due to hadronnic interactions multiplied by the material budget uncertainty. As the cross-section of the hadronic interaction depends on the given particle, this has to be evaluated separately for each hadron. Moreover, it also depends on the momentum of the hadron: the fraction of hadrons that cannot be reconstructed but to the hadronic interactions can be estimated for each process using the simulation.
\subsubsection{Efficiency evaluation}\label{trackMeas-eval}
When selecting the \jpsi candidates, there is a contribution from random combinations of real or even fake muon tracks. The number of signal \jpsi\to\mup\mun events $N_{\rm all}$, as well as the number of associated events $N_{\rm assoc}$ in \refEq{tag-and-probe-real} needs to be extracted from a mass fit to the \jpsi candidate. The \jpsi candidate's mass is calculated as
\begin{equation}
m_{\rm rec} = \sqrt{ \left( E_{\rm tag} + E_{\rm probe}\right)^2 -
\left( \vec{p}_{\rm tag} + \vec{p}_{\rm probe}\right)^2
}\,,
\end{equation}
where $E$ is energy of the \Tag or \Probe track and $\vec{p}$ is its momentum.
The reconstructed \jpsi candidates are split into two sets. \emph{Matched} candidates fulfill the association criteria, \emph{failed} candidates do not fulfill the association criteria. These two sets are simultaneously fitted with a sum of two Crystal Ball functions with shared mean value as the signal component and an exponential function as the background component. The two sets share all signal shape parameters. The yields and the background shape in the \matched and \failed sets are independent. Following \refEq{tag-and-probe-real}, the track reconstruction efficiency can be expressed as a function of the signal yields of the \matched, $N_{\rm sig}^{\rm Match}$, and \failed, $N_{\rm sig}^{\rm Fail}$, samples
\begin{equation}\label{eq:tag-and-probe-eff}
\varepsilon_{\rm tr} = \frac{N_{\rm sig}^{\rm Match}}{N_{\rm sig}^{\rm Match}+N_{\rm sig}^{\rm Fail}}\,.
\end{equation}
It is important to consider only the \emph{signal} yields, as combinatorial background is present in the measurement. An example of the mass distributions for the matched and failed candidates is given in \refFig{trEff-mass}.
In this approach, the efficiency is treated as a fit parameter\footnote{In the fit to the \jpsi mass, the floated parameters are then the track reconstruction efficiency and the total yield of $N_{\rm sig}<{all}=N_{\rm sig}^{\rm Match}+N_{\rm sig}^{\rm Fail}$.}. This scheme guarantees that the correlations between the parameters are properly treated in the calculation of the statistical uncertainties.
\begin{figure}[hbt!]
\centering
\includegraphics[width=0.45\textwidth]{TrackEff/2018/Full_bin0_Long_Matched.eps}
\includegraphics[width=0.45\textwidth]{TrackEff/2018/Full_bin0_Long_Failed.eps}
\captionof{figure}[Invariant reconstructed \jpsi candidate mass distribution.]{Invariant reconstructed \jpsi candidate mass distribution for the Long method. On the left, the invariant mass of the matched candidates is shown, on the right are failed candidates. The black points represent a subset of the data obtained during the data taking year 2018. The black line represents the full fit model. The red dashed line represents the signal component.}\label{fig:trEff-mass}
\end{figure}
As discussed in \refSec{trackMeas}, the efficiencies obtained from the simulation are a good approximation of the actual efficiencies, however they are not perfect. On the other hand, the tag-and-probe method of obtaining the efficiencies is rather lengthy and performing this study for every analysis is not feasible. Therefore, a correction factor $R$ is calculated:
\begin{equation}\label{eq:trackEff-R}
R=\frac{\varepsilon_{\rm data}}{\varepsilon_{\rm sim}}\,,
\end{equation}
where the $\varepsilon_{\rm data}$ represents the tag-and-probe efficiency in data and $\varepsilon_{\rm sim}$ represents the tag-and-probe efficiency in simulation. This ratio $R$ can be used independently by many analyses to 'correct' the efficiency obtained directly from simulation. Moreover, first order uncertainties are canceled out. Therefore, the ratio $R$ is the ultimate goal of the tracking efficiency measurement.
In order to accomodate the requirements of most \lhcb analyses, the track reconstruction efficiency is measured in bins of pseudorapidity, $\eta$, momentum, $p$, number of hits in the \spd detector, $N_{\spd \rm hits}$, and the number of primary vertices present in the event $N_{\rm PV}$. The ratio $R$ is measured in two dimensions in bins of pseudorapidity and momentum. This is referred to later as \emph{correction table}.

128
Chapters/TrackEff/production.tex

@ -0,0 +1,128 @@
\subsection{Tracking efficiency measurement in Run II}\label{sec:trackEff-RunII}
The track reconstruction has been already measured (and published~\cite{TrackEffRun1}) in \runI. While the efficiency measurement is a crucial part of any \runI analysis, the precision was limited by the available data size: any selected event was required to pass an unbiased single muon software trigger. This trigger could perform only a simple track reconstruction due to time constrains. On top of this, the alignment and calibration was performed using the recorded data and the trigger bandwidth had to match the stripping bandwidth.
This procedure was fundamentally changed for \runII. The real-time alignment and calibration together with the full reconstruction available in the high-level trigger allowed for the full track reconstruction in real time\footnote{~``\textit{Real time is defined as
the interval between the collision in the detector and the moment
the data are sent to permanent storage.}''~\cite{Tesla}}.
This allows for fast evaluation of data directly after it has been recorded. Hence, dedicated trigger software has been created in order to perform the tag-and-probe efficiency evaluation at the \hlt level.
\subsubsection{Trigger lines implementation}\label{sec:trackEff-trigger}
As explained in \refSec{trackMeas-tag-and-probe}, a \jpsi meson reconstructed from two muon tracks has to be found. Therefore, the first stage of the trigger selection is searching for good-quality muon tracks. Such track fulfills the requirements of the \texttt{Hlt1TrackMuon} trigger line: the track has to have $\pt>800\mev$, fulfill the \texttt{IsMuon} requirement (see \refSec{sel-StrippingSelection}) and has to have the impact parameter significance larger than eight. The last requirement ensures the track is detached from the primary vertex to reduce the background pollution.
In order to speed up the computation process, first a \Tag track has to be find. Such track has to fulfill also additional criteria listed for each method in \refTab{trEff-tag-trig}. The tag track is reconstructed using the standard \lhcb tracking. Only when such a track is found, the dedicated track reconstruction is performed to search for \Probe tracks, accelerating the computation process significantly. Also the \Probe tracks have to fulfill additional loose requirements, see \refTab{trEff-probe-trig}. Average decision time for each of the trigger decisions is below 1\ms, satisfying the software trigger requirements. All the requirements are optimized to have the largest possible kinematic coverage while keeping the coverage identical for the three methods. There are six trigger lines implemented: two lines per method. As the trigger has to distinguish between the \Tag and \Probe tracks, the charge of the muons is exploited. One line reconstructs the \jpsi candidates using positively charged \Tag muon track and negatively charged \Probe track, the other one uses the tracks with opposite charges.
\begin{table}
\begin{center}
\begin{tabular}[htbp]{c|r|r|r}
{Variable} &{\velo method} &{\Tstation method} &{Long method} \\ \hline
\dllmupi &$>-2$ &$>-1$ &$>-2$ \\
\ptot &$>5\gev$ &$>7\gev$ &$>10\gev$ \\
\pt &$>700\mev$ &$-$ &$>1300\mev$ \\
{\rm track}\;\chisqndf &$<10$ &$<3$ &$<5$ \\
IP &$>0.5\mm$ &$>0.2\mm$ &$-$ \\
\end{tabular}
\captionof{table}[Tag track trigger selection criteria.]{Selection cuts applied to the \textbf{\emph{tag}} tracks by the software trigger.} \label{tab:trEff-tag-trig}
\end{center}
\end{table}
\begin{table}
\begin{center}
\begin{tabular}[htbp]{c|r|r|r}
{Variable} &{\velo method} &{\Tstation method} &{Long method} \\ \hline
\ptot &$>5\gev$ &$>5\gev$ &$>5\gev$ \\
\pt &$>500\mev$ &$>500\mev$ &$>500\mev$ \\
{\rm track}\;\chisqndf &$<10$ &$<5$ &$-$ \\
\end{tabular}
\captionof{table}[Probe track trigger selection criteria.]{Selection cuts applied to the \textbf{\emph{probe}} tracks by the software trigger.}
\label{tab:trEff-probe-trig}
\end{center}
\end{table}
Lastly, criteria listed in \refTab{trEff-jpsi-trig} are applied on the \jpsi meson. These conditions are optimized in order to reduce the combinatorial background and to make sure the two-muon vertex has a good quality. Moreover, in the case of the \velo method, the distance of closest approach (DOCA) condition is added in order to speed up the computation process.
In order to measure the track reconstruction efficiency, the overlap fraction also has to be saved. Therefore, another trigger line is added: the selection is identical to the selection described above plus a requirement of finding a long track associated to the probe track with an overlap fraction fulfilling the criteria for each method is added. The existence of two lines allows for online tracking efficiency calculation.
\begin{table}
\begin{center}
\begin{tabular}[htbp]{c|r|r|r}
{Variable} &{\velo method} &{\Tstation method} &{Long method} \\ \hline
$|m_{\mup\mun}-m_{\jpsi}|$ &$<200\mev$ &$<500\mev$ &$<500\mev$ \\
\pt &$-$ &$>500\mev$ &$>1000\mev$ \\
{\rm vertex}\;\chisq &$<5$ &$<2$ &$<2$ \\
Track DOCA &$<5\mm$ &$-$ &$-$ \\
IP &$-$ &$-$ &$<0.8\mm$ \\
\end{tabular}
\captionof{table}[Reconstructed \jpsi trigger selection criteria.]{Selection cuts applied to the \jpsiBF reconstructed from tag and probe tracks by the software trigger.}
\label{tab:trEff-jpsi-trig}
\end{center}
\end{table}
\subsubsection{Trigger lines online monitoring}
The full tracking efficiency estimation in real time also allows for real time monitoring of the track reconstruction efficiency. A dedicated online monitoring tool has been implemented in 2017.
For the monitoring purposes, the output of the trigger lines described in the previous section is saved in a form of three one-dimensional histograms in mass, momentum and pseudorapidity distributions. This is saved for both \Probe and \Tag muon tracks as well as the \jpsi candidates.
In order to estimate the online efficiency, a fit to the \jpsi reconstructed mass is performed. The fit consists of Gaussian distribution for signal and exponential function for background. This fit is performed for every \emph{run} that lasted at least 45 minutes in order to have sufficiently large data sample available. A run is a set of data taken during the same detector settings that lasted maximum of one hour. The yields of these fits can be used to estimate the tracking efficiency for each method. %The yields and efficiencies are saved and send to the Online Presenter to monitor the tracking efficiency evolution for each method in real time.
\subsubsection{Stripping lines implementation}\label{sec:trackEff-strip}
While the full reconstruction allows for faster and more efficient determination of the track reconstruction efficiencies, when a trigger line fails, there is no data available for measuring the track reconstruction. Therefore, dedicated stripping lines for each method have been implemented also for \runII. This has been proven to be useful in 2017, when a part of the trigger line for the \Tstation method was overwritten by an output from a different trigger line. With available stripping lines, the \Tstation method has been successfully recovered. The price to pay is the same as in \runI: smaller amount of \jpsi candidates available and longer processing times. However, the datasample taken in 2017 was large enough to fully recover the \Tstation method. The measured track reconstruction efficiency measurements via the recovered \Tstation method is given in \refFig{trEff-2017}.
The stripping lines perform very similar calculations as the trigger lines, however, the workflow does not allow to search for the \Tag track first, resulting in longer computation times. This is slightly improved by imposing a mass requirement on the combination of the muon candidates before the vertex fit. The cuts used in the stripping lines are identical to the cuts applied in the trigger selection and they are listed in \refTab{trEff-strip}.
\begin{table}
\begin{center}
\begin{tabular}[htbp]{c|r|r|r}
{Variable} &{\velo method} &{\Tstation method} &{Long method} \\ \hline \hline
\multicolumn{4}{c}{\emph{Tag} selection criteria} \\ \hline
\dllmupi &$>-2$ &$>-1$ &$>-2$ \\
\ptot &$>5\gev$ &$>7\gev$ &$>10\gev$ \\
\pt &$>0.7\gev$ &$>0.0\gev$ &$>1.3\gev$ \\
{\rm track}\;\chisqndf &$<10$ &$<3$ &$<5$ \\
IP &$>0.5\mm$ &$>0.2\mm$ &$--$ \\ \hline
\multicolumn{4}{c}{\emph{Probe} selection criteria} \\ \hline
\ptot $>5\gev$ &$>5\gev$ &$>5\gev$ \\
\pt &$>0.5\gev$ &$>0.5\gev$ &$>0.5\gev$ \\
{\rm track}\;\chisqndf &$<10$ &$<5$ &$--$ \\ \hline
\multicolumn{4}{c}{\jpsi candidates selection criteria} \\ \hline
$|m_{\mup\mun}-m_{\jpsi}|^{\rm precomb}$ &$<2000\mev$ &$<1000\mev$ &$<1000\mev$ \\
$|m_{\mup\mun}-m_{\jpsi}|^{\rm postcomb}$ &$<200\mev$ &$<500\mev$ &$<500\mev$ \\
\pt &$--$ &$>0.5\gev$ &$>1\gev$ \\
{\rm vertex}\;\chisq &$<5$ &$<2$ &$<2$ \\
Track DOCA &$<5\mm$ &$--$ &$--$ \\
IP &$-$ &$--$ &$<0.8\mm$ \\
\end{tabular}
\captionof{table}[Stripping selection criteria.]{Selection cuts applied to the \Tag track, \Probe track and the reconstructed \jpsi candidate by the stripping selection. \texttt{Precomb} and \texttt{postcomb} denote cuts applied before and after the vertex fit respectively.}
\label{tab:trEff-strip}
\end{center}
\end{table}
\begin{figure}[hbt!]
\centering
\includegraphics[width=0.48\textwidth]{TrackEff/2017_strip/ETA_T.eps}
\includegraphics[width=0.48\textwidth]{TrackEff/2017_strip/P_T.eps}
\includegraphics[width=0.48\textwidth]{TrackEff/2017_strip/nPVs_T.eps}
\includegraphics[width=0.48\textwidth]{TrackEff/2017_strip/nSPDHits_T.eps}
\captionof{figure}[Track reconstruction efficiency using stripping for 2017.]{Track reconstruction efficiency using the \T method in pseudorapidity $\eta$ (top left), in momentum $p$ (top right), in the number of primary vertices $N_{PV}$ (bottom left) and in number of hits in the \spd $N_{\spd hits}$ (bottom right) for the 2017 data-taking period (25ns bunch spacing). This sample is obtained using stripping in order to recover faulty trigger selection of the \T method in 2017. Sim09h denotes the used version of the simulation software used by the \lhcb experiment. The uncertainties are statistical only. }\label{fig:trEff-2017}
\end{figure}
Moreover, the selection criteria applied in the trigger selection might not match requirements of analyses exploring the edges of the available phase-space. An example is a dedicated set of stripping lines allowing to study the track reconstruction efficiency in events with minimal detector occupancy. This has been added in 2017. This analysis is beyond the scope of this work, for the details, see~Ref.\,\cite{UPC-AnaNote}.
%
%Another advantage of the stripping lines is they can be also applied on the \runI datasample. The \runI track reconstruction efficiency has been thoroughly studied, however, the dataflow changed significantly between \runI and \runII. Small changes in the stripping selection would require a lot of effort for the analysts to obtain the track reconstruction efficiencies. Rewriting the stripping lines in \runI enables the \runI track reconstruction efficiency to be calculated using the \TrackCalib tool.

64
Chapters/TrackEff/results.tex

@ -0,0 +1,64 @@
%\subsection{Method validation}\label{sec:trackEff-validate}
\subsection{Results}\label{sec:trackEff-results}
The tracking efficiencies for the 2018 data-taking period obtained form data and from the simulation version Sim09h depending on pseudorapidity $\eta$, momentum $p$, number of primary vertices $N_{PV}$ and the number of hits in the \spd detector $N_{\spd hits}$ are shown in Figs.\,\ref{fig:trEff-eta}, \ref{fig:trEff-p}, \ref{fig:trEff-nPVs} and \ref{fig:trEff-nSPD}. All the shown track reconstruction efficiencies are 94\% or higher. The agreement between the track reconstruction efficiency obtained form the data sample and the simulation is very good. The two-dimensional correction table in momentum $p$ and pseudorapidity $\eta$ is given in \refFig{trEff-R}. The ratio is very close to one in all bins. The two-dimensional ratio for the Final method is made available as a correction table for physics analyses performed by the \lhcb collaboration.
The results for all the data-taking years 2011-2018 can be found at~Ref.\,\cite{Twiki-TrackCalib}. As all the results are consistent throughout the years, they are not shown in this work. Alternatively, more results are published in~Ref.\,\cite{TrackEff-FIG}.
\begin{figure}[hbt!]
\centering
\includegraphics[width=0.48\textwidth]{TrackEff/PossibleCorrectResults/2018_25ns/Data2018_25ns_MC2018_25nsSim09h_WG/ETA_Velo.eps}
\includegraphics[width=0.48\textwidth]{TrackEff/PossibleCorrectResults/2018_25ns/Data2018_25ns_MC2018_25nsSim09h_WG/ETA_T.eps}
\includegraphics[width=0.48\textwidth]{TrackEff/PossibleCorrectResults/2018_25ns/Data2018_25ns_MC2018_25nsSim09h_WG/ETA_Long.eps}
\includegraphics[width=0.48\textwidth]{TrackEff/PossibleCorrectResults/2018_25ns/Data2018_25ns_MC2018_25nsSim09h_WG/ETA_Combined.eps}
\includegraphics[width=0.48\textwidth]{TrackEff/PossibleCorrectResults/2018_25ns/Data2018_25ns_MC2018_25nsSim09h_WG/ETA_Final.eps}\\ \vspace{-3pt}
\captionof{figure}[Track reconstruction efficiency vs. pseudorapidity for 2018.]{Track reconstruction efficiency versus pseudorapidity $\eta$ for the 2018 data-taking period (25ns bunch spacing). Sim09h denotes the used version of the simulation software used by the \lhcb experiment. The track reconstruction efficiency is obtained using the \velo method (top left), the \T method (top right), the Long method (middle left), the Combined method (middle right), product of the \velo and \T method and the Final method (bottom), weighted average of the Long and Combined methods. The uncertainties are statistical only.}\label{fig:trEff-eta}
\end{figure}
\begin{figure}[hbt!]
\centering
\includegraphics[width=0.48\textwidth]{TrackEff/PossibleCorrectResults/2018_25ns/Data2018_25ns_MC2018_25nsSim09h_WG/P_Velo.eps}
\includegraphics[width=0.48\textwidth]{TrackEff/PossibleCorrectResults/2018_25ns/Data2018_25ns_MC2018_25nsSim09h_WG/P_T.eps}
\includegraphics[width=0.48\textwidth]{TrackEff/PossibleCorrectResults/2018_25ns/Data2018_25ns_MC2018_25nsSim09h_WG/P_Long.eps}
\includegraphics[width=0.48\textwidth]{TrackEff/PossibleCorrectResults/2018_25ns/Data2018_25ns_MC2018_25nsSim09h_WG/P_Combined.eps}
\includegraphics[width=0.48\textwidth]{TrackEff/PossibleCorrectResults/2018_25ns/Data2018_25ns_MC2018_25nsSim09h_WG/P_Final.eps}\\ \vspace{-3pt}
\captionof{figure}[Track reconstruction efficiency vs. momentum for 2018.]{Track reconstruction efficiency versus momentum $p$ for the 2018 data-taking period (25ns bunch spacing). Sim09h denotes the used version of the simulation software used by the \lhcb experiment. The track reconstruction efficiency is obtained using the \velo method (top left), the \T method (top right), the Long method (middle left), the Combined method (middle right), product of the \velo and \T method and the Final method (bottom), weighted average of the Long and Combined methods. The uncertainties are statistical only.}\label{fig:trEff-p}
\end{figure}
\begin{figure}[hbt!]
\centering
\includegraphics[width=0.48\textwidth]{TrackEff/PossibleCorrectResults/2018_25ns/Data2018_25ns_MC2018_25nsSim09h_WG/nPVs_Velo.eps}
\includegraphics[width=0.48\textwidth]{TrackEff/PossibleCorrectResults/2018_25ns/Data2018_25ns_MC2018_25nsSim09h_WG/nPVs_T.eps}
\includegraphics[width=0.48\textwidth]{TrackEff/PossibleCorrectResults/2018_25ns/Data2018_25ns_MC2018_25nsSim09h_WG/nPVs_Long.eps}
\includegraphics[width=0.48\textwidth]{TrackEff/PossibleCorrectResults/2018_25ns/Data2018_25ns_MC2018_25nsSim09h_WG/nPVs_Combined.eps}
\includegraphics[width=0.48\textwidth]{TrackEff/PossibleCorrectResults/2018_25ns/Data2018_25ns_MC2018_25nsSim09h_WG/nPVs_Final.eps}\\ \vspace{-3pt}
\captionof{figure}[Track reconstruction efficiency vs. the number of primary vertices for 2018.]{Track reconstruction efficiency versus the number of primary vertices $N_{PV}$ for the 2018 data-taking period (25ns bunch spacing). Sim09h denotes the used version of the simulation software used by the \lhcb experiment. The track reconstruction efficiency is obtained using the \velo method (top left), the \T method (top right), the Long method (middle left), the Combined method (middle right), product of the \velo and \T method and the Final method (bottom), weighted average of the Long and Combined methods. The uncertainties are statistical only.}\label{fig:trEff-nPVs}
\end{figure}
\begin{figure}[hbt!]
\centering
\includegraphics[width=0.48\textwidth]{TrackEff/PossibleCorrectResults/2018_25ns/Data2018_25ns_MC2018_25nsSim09h_WG/nSPDHits_Velo.eps}
\includegraphics[width=0.48\textwidth]{TrackEff/PossibleCorrectResults/2018_25ns/Data2018_25ns_MC2018_25nsSim09h_WG/nSPDHits_T.eps}
\includegraphics[width=0.48\textwidth]{TrackEff/PossibleCorrectResults/2018_25ns/Data2018_25ns_MC2018_25nsSim09h_WG/nSPDHits_Long.eps}
\includegraphics[width=0.48\textwidth]{TrackEff/PossibleCorrectResults/2018_25ns/Data2018_25ns_MC2018_25nsSim09h_WG/nSPDHits_Combined.eps}
\includegraphics[width=0.48\textwidth]{TrackEff/PossibleCorrectResults/2018_25ns/Data2018_25ns_MC2018_25nsSim09h_WG/nSPDHits_Final.eps}\\ \vspace{-3pt}
\captionof{figure}[Track reconstruction efficiency vs. the number of hits in the \spd detector for 2018.]{Track reconstruction efficiency versus the number of hits in the \spd detector $N_{\spd hits}$ for the 2018 data-taking period (25ns bunch spacing). Sim09h denotes the used version of the simulation software used by the \lhcb experiment. The track reconstruction efficiency is obtained using the \velo method (top left), the \T method (top right), the Long method (middle left), the Combined method (middle right), product of the \velo and \T method and the Final method (bottom), weighted average of the Long and Combined methods. The uncertainties are statistical only.}\label{fig:trEff-nSPD}
\end{figure}
\begin{figure}[hbt!]
\centering
\includegraphics[width=0.48\textwidth]{TrackEff/PossibleCorrectResults/2018_25ns/Data2018_25ns_MC2018_25nsSim09h_WG/Ratio_Velo_P-ETA_pretty.eps}
\includegraphics[width=0.48\textwidth]{TrackEff/PossibleCorrectResults/2018_25ns/Data2018_25ns_MC2018_25nsSim09h_WG/Ratio_T_P-ETA_pretty.eps}
\includegraphics[width=0.48\textwidth]{TrackEff/PossibleCorrectResults/2018_25ns/Data2018_25ns_MC2018_25nsSim09h_WG/Ratio_Long_P-ETA_pretty.eps}
\includegraphics[width=0.48\textwidth]{TrackEff/PossibleCorrectResults/2018_25ns/Data2018_25ns_MC2018_25nsSim09h_WG/Ratio_Combined_P-ETA_pretty.eps}
\includegraphics[width=0.48\textwidth]{TrackEff/PossibleCorrectResults/2018_25ns/Data2018_25ns_MC2018_25nsSim09h_WG/Ratio_Final_P-ETA_pretty.eps}\\ \vspace{-3pt}
\captionof{figure}[Track reconstruction efficiency correction tables for 2018.]{Track reconstruction efficiency ratio of data to simulation in momentum $p$ and pseudorapidity $\eta$ for the 2018 data-taking period (25ns bunch spacing). Sim09h denotes the used version of the simulation software used by the \lhcb experiment. The track reconstruction efficiency is obtained using the \velo method (top left), the \T method (top right), the Long method (middle left), the Combined method (middle right), product of the \velo and \T method and the Final method (bottom), weighted average of the Long and Combined methods. The ratio for the Final method is made available as a correction table for physics analyses performed by the \lhcb collaboration. The uncertainties are statistical only. }\label{fig:trEff-R}
\end{figure}

41
Chapters/TrackEff/systematics.tex

@ -0,0 +1,41 @@
\subsection{Systematical uncertainties}\label{sec:trackEff-sys}
The sources of potential systematic uncertainties have been investigated for the \runI track reconstruction efficiency measurement~\cite{TrackEffRun1}. The method of measuring the track reconstruction efficiencies remained unchanged, hence the uncertainties are not expected to significantly change in the \runII measurement.
Changing the \jpsi mass signal model from the sum of two Crysta-ball distributions to the sum of two Gaussian distributions does not change the efficiency significantly compared to the statistical uncertainty. Similarly, changing the background model from an exponential distribution to a linear one leads only to a negligible change in the track reconstruction efficiency.
Another source of the systematical uncertainty could be the difference between the long method and the combined method. However, the difference is observed to be small relative to the statistical uncertainty and is further reduced in the ratio of the track reconstruction efficiency in data to the efficiency in the simulation.
The dominant systematical uncertainty in the \runI measurement originates from the choice of the occupancy variable used to improve the agreement of the simulated event sample with the real data. The uncertainty is evaluated by using the number of hits in
the \spd, the number of long tracks in the event and the number of primary interaction vertices as the occupancy variables. The largest deviation observed in \runII for the correction factors obtained from the combination of all methods in any of the two-dimensional correction tables is 0.8\%.
%8 Systematic uncertainties
%Small differences in the ratio of efficiencies are seen when reweighting the simulated
%samples in different parameters such as the number of primary vertices, or the number of
%hits or tracks in the different subdetectors. The largest of these differences is taken as
%a systematic uncertainty and amounts to 0.4%. No systematic uncertainty is assigned
%for the agreement of the track reconstruction efficiency determined by the tag-and-probe
%method and the hit-based method (which is on the order of 1%), as the differences cancel
%when forming the efficiency ratio. Accordingly, no systematic uncertainties are assigned
%for the fit model as these cancel when forming the fraction of reconstructed J/ψ decays
%where the probe can be matched to a long track. It has been checked that this is true
%for a range of fit models, the largest variation being 0.2%. Furthermore, no systematic
%uncertainty is assigned to the possible matching of a correctly reconstructed probe track
%to a fake long track, as the requirement for a large overlap in the subdetectors ensure that
%both reconstructed tracks are either real tracks or fake tracks, where the latter would not
%peak at the J/ψ mass. No systematic uncertainty is assigned for the fact that the VELO
%+ T-station method and the long method show slightly different results in Figs. 4–6, as
%both methods probe different momentum spectra and any residual difference will cancel
%15
%when forming the ratio with simulation. No systematic uncertainty is assigned for the
%double-counting of the matching efficiency in the combined method, as this efficiency is
%very close to 100%, and any uncertainty would get further reduced when forming the ratio
%with simulation. No systematic uncertainty is assigned for the large difference for the
%VELO + T efficiency between simulation and data at low momenta in 2011 and 2012, as
%this is automatically taken into account when forming the ratio of efficiencies. Despite this
%difference, the integrated track reconstruction efficiencies between simulation and data are
%in agreement due to compensation of this effect for high momenta, where the efficiency is
%higher in simulation than in data

12
Chapters/TrackEff/trackEff.tex

@ -0,0 +1,12 @@
\section{Tracking efficiency measurement}\label{sec:trackEff}
The measurement of track reconstruction efficiency of the \lhcb detector and its reconstruction algorithms efficiency is essential for most analyses carried out by the \lhcb collaboration. Contrary to the other large \lhc experiments, the physics program of \lhcb focuses on relatively low momentum particles. Hence, it is optimized to contain as little material as possible in order to reduce their scattering and hence be able to perform very precise measurements. Therefore, any redundancy in the \lhcb tracking system is removed. The downside of this approach is sensitivity to the performance of tracking detectors and the tracking algorithms. The track reconstruction efficiency is vital for the physics program of \lhcb.
\input{./Chapters/TrackEff/measurement}
\input{./Chapters/TrackEff/production}
\input{./Chapters/TrackEff/TrackCalib}
\input{./Chapters/TrackEff/systematics}
\input{./Chapters/TrackEff/results}
\clearpage

19
Chapters/Uncertanities/uncertanities.tex

@ -0,0 +1,19 @@
\section{Systematic uncertainties}
\subsection{Correlations}
\subsection{Statistical uncertanity}
\subsection{Re-weighting of phase-space MC}
\subsection{Bootstrapping of phase-space MC}
\subsection{Angular acceptance parametrization}\label{sec:sys-angAcc}
\subsection{Signal peak mass model}
\subsection{Angular background model}
\subsection{Angular resolution}\label{sec:sys-angRes}
\subsection{Summary of systematic uncertanities}
\subsection[Uncertainty of \texorpdfstring{${\Si{i}}{Si} $} parameters]
{Uncertainty of \texorpdfstring{$\mathbf{\Si{i}}$}{Si} parameters}
\subsection[Uncertainty of \texorpdfstring{${\Pprime{i}}{P prime} $} parameters]
{Uncertainty of \texorpdfstring{$\mathbf{\Pprime{i}}$}{P prime i} parameters}
\todoFill[inline]{Uncertanities: Fill}
\clearpage

94
Chapters/Validation/refFit.tex

@ -0,0 +1,94 @@
\subsection[Fit to the reference channel \texorpdfstring{ $\BuToKstJpsi$}{.}]{Fit to the \texorpdfstring{\BuToKstJpsiBF}{.}}\label{valid-reference}
As the statistical power of the rare \BuToKstmm channel is highly limited, tests are performed using the \BuToKstJpsi data sample. Candidates with a dimuon invariant mass squared between 8.68 and 10.09 \gevgev are considered. The data sample is split between the \runI and \runII samples. The angular parameters are shared between both samples. The two samples are fitted simultaneously in four dimensions of the \Bu meson reconstructed mass \mBu and the angles \angles exploiting the maximum-likelihood method. The parameter $F_S$ is extracted from a two-dimensional fit to the \Bu meson mass and the \Kstarp mass. The statistical power of this sample is large enough to test the functionality of the \fcncfitter framework as well as all corrections applied to the data.
The projection of the full fit is presented in \refFig{MainFit-Ref}. The results of this fit are compared to previous measurements by \babar studying both decays of \Bu and \Bd mesons to \Kstar\mup\mun~\cite{FIT-BaBar}, \belle focusing on the \BuToKstJpsi decay~\cite{FIT-Belle}, and \lhcb measurements of the \BdToJPsiKst decay~\cite{FIT-LHCb-Jpsi} and of the \BuToKstmm decay with \KstToKsPi~\cite{ANA-LHCb-angular4}, where the fit to \BuToKstJpsi is also performed as an important check of the fitter framework. The results of the full angular fit are shown in \refTab{FIT-otherMeasurements-Ref}. The statistical uncertainty is approximated by the \hesse calculation (see \refSec{toy-ref}) and scaled according to the pull widths listed in \refTab{toys-Ref-pull}. For readers convenience, the difference between this measurement and the measurement listed in \refTab{FIT-otherMeasurements-Ref} are shown in terms of standard deviations in \refTab{FIT-otherMeasurements-Ref-sigma}.
The measured values from the full angular fit agree very well with the other measurements. The only exception is the parameter $S_4$ that is consistently below the values measured by other experiments. This can be caused by the complicated modeling of the angular acceptance: the parameter $S_4$ is very sensitive to the symmetry of the \ctl distribution. However, the angular acceptance description does not always prefer a symmetry in \ctl, see \refApp{AngCorr}. A dedicated test by varying the order of the polynomial used to correct the angular acceptance in \ctl can be done.
\begin{figure}[hbt!]
\centering
\includegraphics[width=0.35\textwidth]{FCNC/MainFit/ctk_JpsiFit_1BIN_bin0_SimultaneousFit_Run12_AllPDFs.eps}\hspace{-15pt}
\includegraphics[width=0.35\textwidth]{FCNC/MainFit/ctl_JpsiFit_1BIN_bin0_SimultaneousFit_Run12_AllPDFs.eps}\hspace{-15pt}
\includegraphics[width=0.35\textwidth]{FCNC/MainFit/phi_JpsiFit_1BIN_bin0_SimultaneousFit_Run12_AllPDFs.eps}
\captionof{figure}[Full angular fit to the reference channel \BuToKstJpsi.]{Full angular fit to the reference channel \BuToKstJpsi. The black markers represent the data. The black line shows the full fit model. The blue space represents the signal contribution. From left to right, \ctk, \ctl and $\phi$ projecitions are shown. Red surface represents the background contribution. The green dashed line shows only the \pwave component, the orange dotted line represents the \swave components and the pink dot-and-dash line depicts the interference between the \pwave and the \swave.}\label{fig:MainFit-Ref}
\end{figure}
\begin{table}[hbt!]\small \centering
\begin{tabular}{l|lllll}
& this fit & \lhcb~\Bu & Belle (\Bu) & BaBar ($\B^+$+\Bd)& \lhcb~\Bd \\\hline
$F_{L}$ & 0.563$\pm$0.014 & \cellOneSigma 0.572$\pm$0.005 & \cellTwoSigmas 0.604$\pm$0.015 & \cellOneSigma 0.556$\pm$0.009 & \cellOneSigma 0.572$\pm$0.008\\
$S_{3}$ & 0.014$\pm$0.011 & \cellTwoSigmas -0.002$\pm$0.007 & \cellTwoSigmas -0.018$\pm$0.017 & \cellOneSigma 0.011$\pm$0.011 & \cellTwoSigmas -0.013$\pm$0.010\\
$S_{4}$ & -0.211$\pm$0.014 &\cellThreeSigmas -0.246$\pm$0.008 &\cellThreeSigmas -0.255$\pm$0.010& \cellTwoSigmas -0.237$\pm$0.007 &\cellThreeSigmas -0.250$\pm$0.006\\
$S_{5}$ & -0.013$\pm$0.015 & \cellOneSigma -0.003$\pm$0.008 & \cellOneSigma 0.000$\pm$0.000 & \cellOneSigma 0.000$\pm$0.000 & \cellOneSigma 0.000$\pm$0.000\\
$A_{FB}$ & 0.002$\pm$0.007 & \cellOneSigma -0.002$\pm$0.005 & \cellOneSigma 0.000$\pm$0.000 & \cellOneSigma 0.000$\pm$0.000 & \cellOneSigma 0.000$\pm$0.000\\
$S_{7}$ & 0.002$\pm$0.014 & \cellOneSigma -0.001$\pm$0.008 & \cellOneSigma 0.000$\pm$0.000 & \cellOneSigma 0.000$\pm$0.000 & \cellOneSigma 0.000$\pm$0.000\\
$S_{8}$ & -0.062$\pm$0.015 & \cellOneSigma -0.063$\pm$0.008 & \cellTwoSigmas -0.037$\pm$0.018 & \cellOneSigma -0.058$\pm$0.015 & \cellOneSigma -0.048$\pm$0.007\\
$S_{9}$ & -0.074$\pm$0.011 & \cellOneSigma -0.084$\pm$0.007 & \cellTwoSigmas -0.041$\pm$0.016 & \cellTwoSigmas -0.095$\pm$0.014 & \cellOneSigma -0.084$\pm$0.006\\
\end{tabular}\captionof{table}[Full angular fit to \BuToKstJpsi compared to previous measurements.]{Comparison of the full angular fit to reference channel \BuToKstJpsi to previously done measurements by \babar, \belle and two \lhcb measurements ~\cite{FIT-BaBar, FIT-Belle,FIT-LHCb-Jpsi,ANA-LHCb-angular4}, focusing on different combinations of \Bu and \Bd meson decays, as indicated. The measurements are published in the form of polarization amplitudes. The amplitudes are converted into the $S_i$ angular observables and the uncertanities are propagated to the basis using 100\,000 randomly generated samples. The full angular fit results are in agreement with the previously published measurements. The statistical uncertainty of this result is scaled according to \refTab{toys-Ref-pull}, as the pseudoexperiment studies showed an overestimation of the statistical uncertainities (for the details see \refSec{toys}). Dark green represents an agreement better than one standard deviation, lime represents an agreement better than two standard deviations and yellow represents an agreement better than three standard deviations.}\label{tab:FIT-otherMeasurements-Ref}
\end{table}
\begin{table}[hbt!]\small \centering
\begin{tabular}{l|cccc}
& \lhcb \Bu & Belle (\Bu) & BaBar ($B^++$\Bd) & \lhcb \Bd \\\hline
$F_{L}$ & -0.61 & -2.00 & \phantom{-}0.42 & -0.56\\
$S_{3}$ & \phantom{-}1.23 & \phantom{-}1.58 & \phantom{-}0.19 & \phantom{-}1.82\\
$S_{4}$ & \phantom{-}2.17 & \phantom{-}2.56 & \phantom{-}1.66 & \phantom{-}2.56\\
$S_{5}$ & -0.59 & -0.87 & -0.87 & -0.87\\
$A_{FB}$ & \phantom{-}0.46 & \phantom{-}0.29 & \phantom{-}0.29 & \phantom{-}0.29\\
$S_{7}$ & \phantom{-}0.19 & \phantom{-}0.14 & \phantom{-}0.14 & \phantom{-}0.14\\
$S_{8}$ & \phantom{-}0.06 & -1.07 & -0.19 & -0.85\\
$S_{9}$ & \phantom{-}0.77 & -1.70 & \phantom{-}1.18 & \phantom{-}0.80\\
\end{tabular}\captionof{table}[Full angular fit to \BuToKstJpsi compared to previous measurements in terms of standard deviations.]{The difference between the full angular fit to reference channel \BuToKstJpsi and the previously done measurements by \babar, \belle and two \lhcb measurements ~\cite{ANA-LHCb-angular4,FIT-BaBar,FIT-Belle,FIT-LHCb-Jpsi} in terms of the standard deviations. The measurements are published in the form of polrization amplitudes. The amplitudes are converted into the $S_i$ angular observables and the uncertanities are propagated to the basis using 100\,000 randomly generated samples. The full angular fit results are in agreement with the previously published measurements.}\label{tab:FIT-otherMeasurements-Ref-sigma}
\end{table}
Moreover, the reference channel \BuToKstJpsi is used to further test the angular folding method. The values obtained using the full angular fit are compared to the values obtained via folding methods 0-4 listed in \refTab{FIT-fld-Ref}. The agreement between the results is almost perfect. The projections of these fits are shown in \refFig{MainFit-Ref-fld}.
\begin{table}[hbt!] \small \centering
\begin{tabular}{l|cccccc}
& Full angular & Folding 0 & Folding 1 & Folding 2 & Folding 3 & Folding 4 \\\hline
$F_{L}$ &\phantom{-}0.563$\pm$0.014 &\phantom{-}0.563$\pm$0.012 &\phantom{-}0.565$\pm$0.011 &\phantom{-}0.564$\pm$0.011 &\phantom{-}0.564$\pm$0.012 &\phantom{-}0.564$\pm$0.019\\
$S_{3}$ &\phantom{-}0.014$\pm$0.011 &\phantom{-}0.015$\pm$0.005 &\phantom{-}0.015$\pm$0.005 &\phantom{-}0.016$\pm$0.005 &\phantom{-}0.016$\pm$0.005 &\phantom{-}0.016$\pm$0.005\\
$S_{4}$ & -0.211$\pm$0.014 & --- & -0.218$\pm$0.008& --- & --- & --- \\
$S_{5}$ & -0.013$\pm$0.015 & --- & --- & -0.012$\pm$0.007 & --- & --- \\
$A_{FB}$ &\phantom{-}0.002$\pm$0.007 &\phantom{-}0.001$\pm$0.004 & --- & --- & --- & --- \\
$S_{7}$ &\phantom{-}0.002$\pm$0.014 & --- & --- & --- &\phantom{-}0.002$\pm$0.007 & --- \\
$S_{8}$ & -0.062$\pm$0.015 & --- & --- & --- & --- & -0.069$\pm$0.010\\
$S_{9}$ & -0.074$\pm$0.011 & -0.074$\pm$0.005& --- & --- & --- & --- \\
\end{tabular}\captionof{table}[Full angular fit to \BuToKstJpsi comparison to fits via the angular folding method.]{Comparison of the full angular fit results to reference channel \BuToKstJpsi to the fits using angular folding method. The results are in perfect agreement. This proves the functionality of the folding methods.}\label{tab:FIT-fld-Ref}
\end{table}
\clearpage
%\thispagestyle{empty}
\begin{figure}[hbt!] \vspace{-10pt}
\centering
\includegraphics[width=0.32\textwidth]{FCNC/MainFit/ctk_JpsiFit_1BIN_bin0_SimultaneousFit_folding0_Run12_AllPDFs.eps}
\includegraphics[width=0.32\textwidth]{FCNC/MainFit/ctl_JpsiFit_1BIN_bin0_SimultaneousFit_folding0_Run12_AllPDFs.eps}
\includegraphics[width=0.32\textwidth]{FCNC/MainFit/phi_JpsiFit_1BIN_bin0_SimultaneousFit_folding0_Run12_AllPDFs.eps}\\ \vspace{-3pt}
\includegraphics[width=0.32\textwidth]{FCNC/MainFit/ctk_JpsiFit_1BIN_bin0_SimultaneousFit_folding1_Run12_AllPDFs.eps}
\includegraphics[width=0.32\textwidth]{FCNC/MainFit/ctl_JpsiFit_1BIN_bin0_SimultaneousFit_folding1_Run12_AllPDFs.eps}
\includegraphics[width=0.32\textwidth]{FCNC/MainFit/phi_JpsiFit_1BIN_bin0_SimultaneousFit_folding1_Run12_AllPDFs.eps}\\\vspace{-3pt}
\includegraphics[width=0.32\textwidth]{FCNC/MainFit/ctk_JpsiFit_1BIN_bin0_SimultaneousFit_folding2_Run12_AllPDFs.eps}
\includegraphics[width=0.32\textwidth]{FCNC/MainFit/ctl_JpsiFit_1BIN_bin0_SimultaneousFit_folding2_Run12_AllPDFs.eps}
\includegraphics[width=0.32\textwidth]{FCNC/MainFit/phi_JpsiFit_1BIN_bin0_SimultaneousFit_folding2_Run12_AllPDFs.eps}\\\vspace{-3pt}
\includegraphics[width=0.32\textwidth]{FCNC/MainFit/ctk_JpsiFit_1BIN_bin0_SimultaneousFit_folding3_Run12_AllPDFs.eps}
\includegraphics[width=0.32\textwidth]{FCNC/MainFit/ctl_JpsiFit_1BIN_bin0_SimultaneousFit_folding3_Run12_AllPDFs.eps}
\includegraphics[width=0.32\textwidth]{FCNC/MainFit/phi_JpsiFit_1BIN_bin0_SimultaneousFit_folding3_Run12_AllPDFs.eps}\\\vspace{-3pt}
\includegraphics[width=0.32\textwidth]{FCNC/MainFit/ctk_JpsiFit_1BIN_bin0_SimultaneousFit_folding4_Run12_AllPDFs.eps}
\includegraphics[width=0.32\textwidth]{FCNC/MainFit/ctl_JpsiFit_1BIN_bin0_SimultaneousFit_folding4_Run12_AllPDFs.eps}
\includegraphics[width=0.32\textwidth]{FCNC/MainFit/phi_JpsiFit_1BIN_bin0_SimultaneousFit_folding4_Run12_AllPDFs.eps}\\\vspace{-3pt}
\captionof{figure}[Full angular fit to the reference channel via the angular folding method.]{Full angular fit to the reference channel \BuToKstJpsi for the five folding methods. The black markers represent the data, the blue space represents the signal contribution. On the left, \ctk projeciton is shown, in the middle \ctl projection and on the right $\phi$ projecition is shown. Red surface represents the background contribution. The green dashed line shows only the \pwave component, the orange dotted line represents the \swave components and the pink dot-and-dash line depicts the interference between the \pwave and the \swave.}\label{fig:MainFit-Ref-fld}
\end{figure}
\begin{textblock*}{23cm}(1.13\textwidth,4.7cm) % {block width} (coords)
\rotatebox{-90}{\centering Folding 0 \hspace{1.85cm} Folding 1 \hspace{1.85cm} Folding 2 \hspace{1.85cm} Folding 3 \hspace{1.85cm} Folding 4}
\end{textblock*}
\clearpage

131
Chapters/Validation/validation.tex

@ -0,0 +1,131 @@
\section{Tests with large statistics}\label{valid}
In order to validate the \fcncfitter framework and the functionality of the angular acceptance correction, tests with large statistical samples are performed. First, the simulation samples introduced in \refSec{AnaIntro-MC} are fitted. As the Monte Carlo simulation uses a form-factor model \texttt{BTOSLLBALL}~\cite{FIT-btosllBall}, the extraction of the initial angular moments \allAng very complicated. Therefore, instead of extracting the angular moments from the \texttt{BTOSLLBALL} model itself, a \emph{generator-level} simulation is studied in order to obtain the values of the angular parameters used at generation. Generator level sample is free of any acceptance, reconstruction or selection effects. The agreement of the angular moments of the full Monte Carlo simulation and the generator level simulation is a crucial validation of the angular acceptance correction procedure.
The full simulation sample is also used to validate the folding procedure described in \refSec{ANA_folding}. The large statistics allows for a full angular fit. The angular observables obtained by the full angular fit are compared to the results of the folded fit.
The last step of the validation is the fit to the reference \BuToKstJpsi channel. The statistical power of the reference channel allows to test the functionality of the \fcncfitter framework also on the data with present background contribution. The fit is validated by comparing the measured angular moments to previous measurements. Moreover, the \BuToKstJpsi decay data sample is also used to further validate the folding method.
\subsection{Fit to the simulation sample}\label{valid}
The simulation sample used for the validation is treated the same way as the real data: the events pass the \lhcb acceptance and the full selection. The angular acceptance correction is applied. The fit is performed using the same \qsq binning as in the data sample. There is no \swave pollution present in the simulation sample: only the \pwave is fitted. The fit projections are shown in \refFig{FIT-MC-sigFit}. The fit converges and describes the data very well.
%
%\input{Chapters/ParameterMeasurement/MC_MassFit_Jpsi_Run1}
%\input{Chapters/ParameterMeasurement/MC_MassFit_Jpsi_Run2}
\begin{figure}[hbt!]
\centering
\includegraphics[width=0.32\textwidth]{FCNC/MCfit/Signal/ctk_MC_SignalFit_5BINS_bin0_SimultaneousFit_Run12_AllPDFs.eps}
\includegraphics[width=0.32\textwidth]{FCNC/MCfit/Signal/ctl_MC_SignalFit_5BINS_bin0_SimultaneousFit_Run12_AllPDFs.eps}
\includegraphics[width=0.32\textwidth]{FCNC/MCfit/Signal/phi_MC_SignalFit_5BINS_bin0_SimultaneousFit_Run12_AllPDFs.eps}\\ \vspace{-3pt}
\includegraphics[width=0.32\textwidth]{FCNC/MCfit/Signal/ctk_MC_SignalFit_5BINS_bin1_SimultaneousFit_Run12_AllPDFs.eps}
\includegraphics[width=0.32\textwidth]{FCNC/MCfit/Signal/ctl_MC_SignalFit_5BINS_bin1_SimultaneousFit_Run12_AllPDFs.eps}
\includegraphics[width=0.32\textwidth]{FCNC/MCfit/Signal/phi_MC_SignalFit_5BINS_bin1_SimultaneousFit_Run12_AllPDFs.eps}\\ \vspace{-3pt}
\includegraphics[width=0.32\textwidth]{FCNC/MCfit/Signal/ctk_MC_SignalFit_5BINS_bin2_SimultaneousFit_Run12_AllPDFs.eps}
\includegraphics[width=0.32\textwidth]{FCNC/MCfit/Signal/ctl_MC_SignalFit_5BINS_bin2_SimultaneousFit_Run12_AllPDFs.eps}
\includegraphics[width=0.32\textwidth]{FCNC/MCfit/Signal/phi_MC_SignalFit_5BINS_bin2_SimultaneousFit_Run12_AllPDFs.eps}\\ \vspace{-3pt}
\includegraphics[width=0.32\textwidth]{FCNC/MCfit/Signal/ctk_MC_SignalFit_5BINS_bin3_SimultaneousFit_Run12_AllPDFs.eps}
\includegraphics[width=0.32\textwidth]{FCNC/MCfit/Signal/ctl_MC_SignalFit_5BINS_bin3_SimultaneousFit_Run12_AllPDFs.eps}
\includegraphics[width=0.32\textwidth]{FCNC/MCfit/Signal/phi_MC_SignalFit_5BINS_bin3_SimultaneousFit_Run12_AllPDFs.eps}\\ \vspace{-3pt}
\includegraphics[width=0.32\textwidth]{FCNC/MCfit/Signal/ctk_MC_SignalFit_5BINS_bin4_SimultaneousFit_Run12_AllPDFs.eps}
\includegraphics[width=0.32\textwidth]{FCNC/MCfit/Signal/ctl_MC_SignalFit_5BINS_bin4_SimultaneousFit_Run12_AllPDFs.eps}
\includegraphics[width=0.32\textwidth]{FCNC/MCfit/Signal/phi_MC_SignalFit_5BINS_bin4_SimultaneousFit_Run12_AllPDFs.eps}\\ \vspace{-3pt}
\captionof{figure}[Projections of the fit to the simulated \BuToKstmm decay sample.]{Projections of the fit to the simulated \BuToKstmm decay sample. All events are weighted according to the acceptance correction function. The black markers represent the data, blue area represents the fit. Each figure represents one \qsq interval (\qsq range of the interval is denoted in the figures in \gevgev). On the left, \ctk projecitons are shown, in the middle \ctl projections and on the right $\phi$ projecitions are shown.}\label{fig:FIT-MC-sigFit}
\end{figure}
\clearpage
\subsection{Generator level simulation fit}\label{valid-genMC}
In order to extract the angular parameters used for the \lhcb Monte Carlo simulation, an independent generator level sample of 200\,000 \Bu mesons decaying at rest to $\Kstarp_{[\Kp\piz]}\mumu$ have been generated. In \refFig{FIT-GenLvl-vs-sigMC}, the measured values of \pwave angular moments are shown.
%For readers comfort, the larger bin of $[1.1\gevgev-6\gevgev]$ is removed.
The agreement between the \lhcb simulation and the generator level event simulation is very good. The difference between them is below three standard deviations in all bins and all variables, showing the functionality of the angular acceptance corrections in all \qsq regions.
\begin{figure}[hbt!]\vspace{-10pt}
\centering
\includegraphics[width=0.40\textwidth]{FCNC/MCfit/GenLvl_vs_MC_Fl.eps}
\includegraphics[width=0.40\textwidth]{FCNC/MCfit/GenLvl_vs_MC_S3.eps}\\
\includegraphics[width=0.40\textwidth]{FCNC/MCfit/GenLvl_vs_MC_S4.eps}
\includegraphics[width=0.40\textwidth]{FCNC/MCfit/GenLvl_vs_MC_S5.eps}\\
\includegraphics[width=0.40\textwidth]{FCNC/MCfit/GenLvl_vs_MC_Afb.eps}
\includegraphics[width=0.40\textwidth]{FCNC/MCfit/GenLvl_vs_MC_S7.eps}\\
\includegraphics[width=0.40\textwidth]{FCNC/MCfit/GenLvl_vs_MC_S8.eps}
\includegraphics[width=0.40\textwidth]{FCNC/MCfit/GenLvl_vs_MC_S9.eps}\\
\captionof{figure}[Fit to the generator level compared to the fit to the full simulation.]{Fit to the generator level simulation compared to the fit to the \lhcb simulation results. The brown stripes represent the resonant \qsq regions. These regions are excluded from the fit. The red boxes represent the difference between the processed \lhcb simulation and the generator level simulation in terms of standard deviations, \stdev. The fitted values are in agreement, proving the functionality of the angular acceptance correction in all \qsq regions.}\label{fig:FIT-GenLvl-vs-sigMC}
\end{figure}
%\input{Chapters/ParameterMeasurement/GenLvl_Vs_Davids}
%\input{Chapters/ParameterMeasurement/GenLvlMC_5BINS_bin0_Run12}
%\input{Chapters/ParameterMeasurement/GenLvlMC_5BINS_bin1_Run12}
%\input{Chapters/ParameterMeasurement/GenLvlMC_5BINS_bin2_Run12}
%\input{Chapters/ParameterMeasurement/GenLvlMC_5BINS_bin3_Run12}
%\input{Chapters/ParameterMeasurement/GenLvlMC_5BINS_bin4_Run12}
%\input{Chapters/ParameterMeasurement/GenLvlMC_8BINS_bin0_Run12}
%\input{Chapters/ParameterMeasurement/GenLvlMC_8BINS_bin1_Run12}
%\input{Chapters/ParameterMeasurement/GenLvlMC_8BINS_bin2_Run12}
%\input{Chapters/ParameterMeasurement/GenLvlMC_8BINS_bin3_Run12}
%\input{Chapters/ParameterMeasurement/GenLvlMC_8BINS_bin4_Run12}
%\input{Chapters/ParameterMeasurement/GenLvlMC_8BINS_bin5_Run12}
%\input{Chapters/ParameterMeasurement/GenLvlMC_8BINS_bin6_Run12}
%\input{Chapters/ParameterMeasurement/GenLvlMC_8BINS_bin7_Run12}
\subsection{Validation of the folding method}\label{valid-folding}
In order to validate the \fcncfitter framework's folding method classes, a check using the \lhcb simulation sample of \BuToKstmm is performed. The sample is fitted using the full angular description as well as using all five folding techniques described in \refSec{ANA_folding}. The results of the full fit are compared to the fit results using the five folding methods. As shown in \refFig{FIT-Fold-comparison}, there is a perfect agreement between all five folding methods and the full angular fit in all \qsq bins and all angular observables.
%-----------------------------------------
\begin{figure}[hbt!]\vspace{-10pt}
\centering
\includegraphics[width=0.40\textwidth]{FCNC/MCfit/Folding_Fl_new.pdf}
\includegraphics[width=0.40\textwidth]{FCNC/MCfit/Folding_S3.pdf}\\
\includegraphics[width=0.40\textwidth]{FCNC/MCfit/Folding_S4.eps}
\includegraphics[width=0.40\textwidth]{FCNC/MCfit/Folding_S5.eps}\\
\includegraphics[width=0.40\textwidth]{FCNC/MCfit/Folding_Afb.eps}
\includegraphics[width=0.40\textwidth]{FCNC/MCfit/Folding_S7.eps}\\
\includegraphics[width=0.40\textwidth]{FCNC/MCfit/Folding_S8.eps}
\includegraphics[width=0.40\textwidth]{FCNC/MCfit/Folding_S9.eps}\\
\captionof{figure}[Full angular fit compared to fits using angular folding method.]{Full angular fit compared to fits using angular folding method as indicated in each figure by 'Fld'. The brown stripes represent the resonance \qsq regions. These regions are excluded from the fit. The red boxes represent the difference between the processed \lhcb simulation and the generator level simulation in terms of standard deviations \stdev. The results from the full angular fit and the fits using the folding methods are in perfect agreement in all \qsq regions. This proves the functionality of the angular acceptance correction in the folded fits.}\label{fig:FIT-Fold-comparison}
\end{figure}
%-----------------------------------------
%\input{Chapters/ParameterMeasurement/MC_Signal_5BINS_bin0_folding0_Run12}
%\input{Chapters/ParameterMeasurement/MC_Signal_5BINS_bin0_folding1_Run12}
%\input{Chapters/ParameterMeasurement/MC_Signal_5BINS_bin0_folding2_Run12}
%\input{Chapters/ParameterMeasurement/MC_Signal_5BINS_bin0_folding3_Run12}
%\input{Chapters/ParameterMeasurement/MC_Signal_5BINS_bin0_folding4_Run12}
%\input{Chapters/ParameterMeasurement/MC_Signal_5BINS_bin1_folding0_Run12}
%\input{Chapters/ParameterMeasurement/MC_Signal_5BINS_bin1_folding1_Run12}
%\input{Chapters/ParameterMeasurement/MC_Signal_5BINS_bin1_folding2_Run12}
%\input{Chapters/ParameterMeasurement/MC_Signal_5BINS_bin1_folding3_Run12}
%\input{Chapters/ParameterMeasurement/MC_Signal_5BINS_bin1_folding4_Run12}
%\input{Chapters/ParameterMeasurement/MC_Signal_5BINS_bin2_folding0_Run12}
%\input{Chapters/ParameterMeasurement/MC_Signal_5BINS_bin2_folding1_Run12}
%\input{Chapters/ParameterMeasurement/MC_Signal_5BINS_bin2_folding2_Run12}
%\input{Chapters/ParameterMeasurement/MC_Signal_5BINS_bin2_folding3_Run12}
%\input{Chapters/ParameterMeasurement/MC_Signal_5BINS_bin2_folding4_Run12}
%\input{Chapters/ParameterMeasurement/MC_Signal_5BINS_bin3_folding0_Run12}
%\input{Chapters/ParameterMeasurement/MC_Signal_5BINS_bin3_folding1_Run12}
%\input{Chapters/ParameterMeasurement/MC_Signal_5BINS_bin3_folding2_Run12}
%\input{Chapters/ParameterMeasurement/MC_Signal_5BINS_bin3_folding3_Run12}
%\input{Chapters/ParameterMeasurement/MC_Signal_5BINS_bin3_folding4_Run12}
%\input{Chapters/ParameterMeasurement/MC_Signal_5BINS_bin4_folding0_Run12}
%\input{Chapters/ParameterMeasurement/MC_Signal_5BINS_bin4_folding1_Run12}
%\input{Chapters/ParameterMeasurement/MC_Signal_5BINS_bin4_folding2_Run12}
%\input{Chapters/ParameterMeasurement/MC_Signal_5BINS_bin4_folding3_Run12}
%\input{Chapters/ParameterMeasurement/MC_Signal_5BINS_bin4_folding4_Run12}
\include{./Chapters/Validation/refFit}
\clearpage

BIN
Feynman/BuToKstJpsi.pdf

Binary file not shown.

49
Feynman/BuToKstJpsi.tex

@ -0,0 +1,49 @@
\documentclass{article}
\usepackage[paperheight=110mm,paperwidth=125mm, margin=15mm]{geometry}
\usepackage{feynmp-auto}
%\addtolength{\oddsidemargin}{-10mm}
%\addtolength{\evensidemargin}{-10mm}
% commas in feynmp sometimes lead to strange errors, use \comma instead
\DeclareMathSymbol{\comma}{\mathpunct}{letters}{"3B}
\begin{document}\LARGE
\centering
\begin{fmffile}{BuToKstJpsi_diagram} \centering
\begin{fmfgraph*}(280,220)
\fmfstraight
\fmfleft{l6,l5,l4,l3,l2,l1,l0}
\fmfright{r6,r5,r4,r3,r2,r1,r0}
\fmf{phantom}{l2,c1,c2,c3,c4,c5,c6,c7,r2}
\fmf{phantom}{l4,e1,e2,e3,e4,e5,e6,e7,r4}
\fmffreeze
%Make the spectator quark
\fmf{fermion,label=$u$,tension=1.0, label.side=left}{l6,r6}
%Make b to c
\fmf{fermion,label=$\bar{b}$,tension=1.0,label.side=left}{c4,l2}
\fmf{fermion,label=$\bar{c}$,tension=1.0,label.side=left}{r0,c4}
%Make the blobs
\fmf{plain,right=-0.3,tension=-0.5,label=$B^+$, label.side=right,label.dist=5.4}{l2,l6}
\fmf{plain,right=0.3,tension=-0.5}{l2,l6}
\fmf{plain,right=-0.4,tension=-0.5,label=$K^*$, label.side=right,label.dist=2.2}{r4,r6}
\fmf{plain,right=0.4,tension=-0.5}{r4,r6}
\fmf{plain,right=-0.5,tension=-0.5,label=$J/\psi$, label.side=right,label.dist=2.2}{r0,r2}
\fmf{plain,right=0.5,tension=-0.5}{r0,r2}
%Make W
\fmf{photon,label=$W^+$, label.side=right}{c4,e5}
%Make b to c
\fmf{fermion,label=$c$,tension=1.0,label.side=left}{e5,r2}
\fmf{fermion,label=$\bar{s}$,tension=1.0,label.side=left}{r4,e5}
\end{fmfgraph*}
\end{fmffile}
\pagestyle{empty}
\end{document}

BIN
Feynman/BuToKstPhi.pdf

Binary file not shown.

56
Feynman/BuToKstPhi.tex

@ -0,0 +1,56 @@
\documentclass{article}
\usepackage[paperheight=110mm,paperwidth=125mm, margin=15mm]{geometry}
\usepackage{feynmp-auto}
%\addtolength{\oddsidemargin}{-10mm}
%\addtolength{\evensidemargin}{-10mm}
% commas in feynmp sometimes lead to strange errors, use \comma instead
\DeclareMathSymbol{\comma}{\mathpunct}{letters}{"3B}
\begin{document}\LARGE
\centering
\begin{fmffile}{BuToKstPhi_diagram} \centering
\begin{fmfgraph*}(280,220)
\fmfstraight
\fmfleft{l8,l7,l6,l5,l4,l3,l2,l1,l0}
\fmfright{r8,r7,r6,r5,r4,r3,r2,r1,r0}
\fmf{phantom}{l0,a1,a2,a3,a4,a5,a6,a7,a8,r0}
\fmf{phantom}{l4,d1,d2,d3,d4,d5,d6,d7,d8,d9,r4}
\fmf{phantom}{l8,g1,g2,g3,g4,g5,g6,g7,g8,r8}
\fmffreeze
%Make B+
\fmf{fermion,label=$\bar{b}$,tension=1.0, label.side=left}{l0,a3}
\fmf{fermion,label=$u$,tension=1.0, label.side=left}{l8,g3}
%Make photon
\fmf{photon,label=$W^+$, label.side=left}{a3,g3}
%Make s\bar{s}
\fmf{fermion,right=0.3,label=${s}$,tension=1.0,label.side=left}{d6,r5}
\fmf{fermion,right=0.3,label=$\bar{s}$,tension=1.0,label.side=left}{r3,d6}
%Make the W fermions
\fmf{fermion,label=$\bar{s}$,tension=1.0, label.side=left}{a3,r0}
\fmf{fermion,label=$s$,tension=1.0, label.side=left}{g3,r8}
%Make the blobs
\fmf{plain,right=-0.3,tension=-0.5,label=$B^+$, label.side=right,label.dist=0.5}{l0,l8}
\fmf{plain,right=0.3,tension=-0.5}{l0,l8}
\fmf{plain,right=-0.4,tension=-0.5,label=$K^*$, label.side=right,label.dist=0.45}{r8,r5}
\fmf{plain,right=0.4,tension=-0.5}{r8,r5}
\fmf{plain,right=-0.4,tension=-0.5,label=$\phi$, label.side=right,label.dist=0.45}{r0,r3}
\fmf{plain,right=0.4,tension=-0.5}{r0,r3}
\end{fmfgraph*}
\end{fmffile}
\pagestyle{empty}
\end{document}

BIN
Feynman/Documentation.pdf

Binary file not shown.

BIN
Feynman/GenericBox.pdf

Binary file not shown.

17
Feynman/GenericBox.tex

@ -0,0 +1,17 @@
\documentclass{standalone}
\usepackage{feynmp-auto}
\begin{document}
\begin{fmffile}{diagram}
\begin{fmfgraph*}(120,80)
\fmfleft{i1,i2}
\fmfright{o1,o2}
\fmf{fermion}{i1,w1,w2,i2}
\fmf{fermion}{o1,w3,w4,o2}
\fmf{photon}{w1,w3}
\fmf{photon}{w2,w4}
\end{fmfgraph*}
\end{fmffile}
\end{document}

BIN
Feynman/Kaon_box.pdf

Binary file not shown.

27
Feynman/Kaon_box.tex

@ -0,0 +1,27 @@
\documentclass{article}
\usepackage[paperheight=60mm,paperwidth=95mm, margin=5mm]{geometry}
\usepackage{feynmp-auto}
\addtolength{\oddsidemargin}{-10mm}
\addtolength{\evensidemargin}{-10mm}
% commas in feynmp sometimes lead to strange errors, use \comma instead
\DeclareMathSymbol{\comma}{\mathpunct}{letters}{"3B}
\begin{document}\LARGE
\centering
\begin{fmffile}{Kaon_box_diagram} \centering
\begin{fmfgraph*}(300,130)
\fmfleft{i1,i2}
\fmfright{o1,o2}
\fmf{fermion,label=$\bar{s}$}{w1,i1}
\fmf{fermion,label=$u\comma c$,tension=0.0}{w2,w1}
\fmf{fermion,label=$d$}{i2,w2}
\fmf{fermion,label=$\mu^+$}{o1,w3}
\fmf{fermion,label=$\nu$,tension=0.0}{w3,w4}
\fmf{fermion,label=$\mu^-$}{w4,o2}
\fmf{photon,label=$W^+$,tension=0.7}{w1,w3}
\fmf{photon,label=$W^-$,tension=0.7}{w2,w4}
\end{fmfgraph*}
\end{fmffile}
\pagestyle{empty}
\end{document}

BIN
Feynman/Kaon_box_cropped.pdf

Binary file not shown.

9
Feynman/Kaon_box_diagram.t1

@ -0,0 +1,9 @@
% Kaon_box_diagram.t1 -- generated from Kaon_box_diagram.mp
\fmfL(65.00125,94,t){$d$}%
\fmfL(234.99875,6,b){$\mu ^+$}%
\fmfL(65.00122,6,b){$\bar {s}$}%
\fmfL(150.00002,-6,t){$W^+$}%
\fmfL(94.00064,49.99998,r){$u\comma c$}%
\fmfL(150.00002,94,t){$W^-$}%
\fmfL(205.99936,50.00002,l){$\nu $}%
\fmfL(234.99878,94,t){$\mu ^-$}%

BIN
Feynman/Kaon_penguin1.pdf

Binary file not shown.

29
Feynman/Kaon_penguin1.tex

@ -0,0 +1,29 @@
\documentclass{article}
\usepackage[paperheight=60mm,paperwidth=95mm, margin=5mm]{geometry}
\usepackage{feynmp-auto}
\addtolength{\oddsidemargin}{-10mm}
\addtolength{\evensidemargin}{-10mm}
% commas in feynmp sometimes lead to strange errors, use \comma instead
\DeclareMathSymbol{\comma}{\mathpunct}{letters}{"3B}
\begin{document}\LARGE
\centering
\begin{fmffile}{Kaon_penguin1_diagram} \centering
\begin{fmfgraph*}(300,130)
\fmfleft{i1,i2}
\fmfright{o1,o2}
\fmf{fermion,label=$d$}{i2,w2}
\fmf{photon,label=$W$,tension=-0.5}{w2,w1}
\fmf{fermion,label=$\bar{s}$}{w1,i1}
\fmf{fermion,label=$u\comma c$}{w2,z1}
\fmf{fermion,label=$\bar{u}\comma \bar{c}$,label.side=left}{z1,w1}
\fmf{photon,label=$Z^0$,tension=1.5}{z1,z2}
\fmf{fermion,label=$\mu^+$,label.side=right}{o1,z2}
\fmf{fermion,label=$\mu^-$}{z2,o2}
\end{fmfgraph*}
\end{fmffile}
\pagestyle{empty}
\end{document}

BIN
Feynman/Kaon_penguin2.pdf

Binary file not shown.

27
Feynman/Kaon_penguin2.tex

@ -0,0 +1,27 @@
\documentclass{article}
\usepackage[paperheight=60mm,paperwidth=95mm, margin=5mm]{geometry}
\usepackage{feynmp-auto}
\addtolength{\oddsidemargin}{-10mm}
\addtolength{\evensidemargin}{-10mm}
% commas in feynmp sometimes lead to strange errors, use \comma instead
\DeclareMathSymbol{\comma}{\mathpunct}{letters}{"3B}
\begin{document}\LARGE
\centering
\begin{fmffile}{Kaon_penguin2_diagram} \centering
\begin{fmfgraph*}(300,130)
\fmfleft{i1,i2}
\fmfright{o1,o2}
\fmf{fermion,label=$d$}{i2,w2}
\fmf{fermion,label=$u\comma c$,tension=-0.5}{w2,w1}
\fmf{fermion,label=$\bar{s}$}{w1,i1}
\fmf{photon,label=$W^+$}{w2,z1}
\fmf{photon,label=$W^-$,label.side=left}{z1,w1}
\fmf{photon,label=$Z^0$,tension=1.5}{z1,z2}
\fmf{fermion,label=$\mu^+$,label.side=right}{o1,z2}
\fmf{fermion,label=$\mu^-$}{z2,o2}
\end{fmfgraph*}
\end{fmffile}
\pagestyle{empty}
\end{document}

BIN
Feynman/O1_a.pdf

Binary file not shown.

32
Feynman/O1_a.tex

@ -0,0 +1,32 @@
\documentclass{article}
\usepackage[paperheight=80mm,paperwidth=95mm, margin=5mm]{geometry}
\usepackage{feynmp-auto}
\addtolength{\oddsidemargin}{-10mm}
\addtolength{\evensidemargin}{-10mm}
% commas in feynmp sometimes lead to strange errors, use \comma instead
\DeclareMathSymbol{\comma}{\mathpunct}{letters}{"3B}
\begin{document}\LARGE
\centering
\begin{fmffile}{O1_a_diagram} \centering
\begin{fmfgraph*}(300,180)
\fmfstraight
\fmfleft{l0,l1,l2,l3,l4}
\fmfright{r0,r1,r2,r3,r4}
\fmf{phantom}{l1,w1,r1}
\fmf{phantom}{l3,v1,r3}
\fmffreeze
\fmf{fermion,label=$u$,label.side=left}{l4,v1}
\fmf{fermion,label=$s$,label.side=left}{v1,r4}
\fmf{photon,label=$W$,tension=1.0,label.side=right}{v1,w1}
\fmf{fermion,label=$d$,label.side=right}{l0,w1}
\fmf{fermion,label=$u$,label.side=right}{w1,r0}
\end{fmfgraph*}
\end{fmffile}
\pagestyle{empty}
\end{document}

BIN
Feynman/O1_b.pdf

Binary file not shown.

35
Feynman/O1_b.tex

@ -0,0 +1,35 @@
\documentclass{article}
\usepackage[paperheight=80mm,paperwidth=95mm, margin=5mm]{geometry}
\usepackage{feynmp-auto}
\addtolength{\oddsidemargin}{-10mm}
\addtolength{\evensidemargin}{-10mm}
% commas in feynmp sometimes lead to strange errors, use \comma instead
\DeclareMathSymbol{\comma}{\mathpunct}{letters}{"3B}
\begin{document}\LARGE
\centering
\begin{fmffile}{O1_b_diagram} \centering
\begin{fmfgraph*}(300,180)
\fmfstraight
\fmfleft{l0,l1,l2,l3,l4,l5,l6,l7,l8}
\fmfright{r0,r1,r2,r3,r4,r5,r6,r7,r8}
\fmf{phantom}{l1,a1,a2,a3,r1}
\fmf{phantom}{l2,w1,r2}
\fmf{phantom}{l6,v1,r6}
\fmf{phantom}{l7,b1,b2,b3,r7}
\fmffreeze
\fmf{fermion,label=$u$,label.side=left}{l8,v1}
\fmf{fermion,label=$s$,label.side=left}{v1,r8}
\fmf{photon,label=$W$,tension=1.0,label.side=right}{v1,w1}
\fmf{fermion,label=$d$,label.side=right}{l0,w1}
\fmf{fermion,label=$u$,label.side=right}{w1,r0}
\fmf{gluon,label=$g$,label.side=left}{a1,b1}
\end{fmfgraph*}
\end{fmffile}
\pagestyle{empty}
\end{document}

BIN
Feynman/O3_6.pdf

Binary file not shown.

41
Feynman/O3_6.tex

@ -0,0 +1,41 @@
\documentclass{article}
\usepackage[paperheight=60mm,paperwidth=95mm, margin=5mm]{geometry}
\usepackage{feynmp-auto}
\addtolength{\oddsidemargin}{-10mm}
\addtolength{\evensidemargin}{-10mm}
% commas in feynmp sometimes lead to strange errors, use \comma instead
\DeclareMathSymbol{\comma}{\mathpunct}{letters}{"3B}
\begin{document}\LARGE
\centering
\begin{fmffile}{O3_6_diagram} \centering
\begin{fmfgraph*}(300,130)
\fmfstraight
\fmfleft{l0,l1,l2,l3,l4}
\fmfright{r0,r1,r2,r3,r4}
\fmf{phantom}{l4,a1,a2,r4}
\fmf{phantom}{l3,b1,r3}
\fmf{phantom}{l2,c1,r2}
\fmf{phantom}{l1,d1,r1}
\fmf{phantom}{l0,e1,e2,r0}
\fmffreeze
\fmf{fermion,label=$b$,tension=1,label.side=right}{l4,a1}
\fmf{photon,label=$W^-$,tension=1,label.side=right}{a1,a2}
\fmf{fermion,label=$s$, tension=1}{a2,r4}
\fmf{fermion,right=0.5,tension=0.5}{a1,c1,a2}
\fmfv{label=$\bar{u}\comma \bar{c} \comma \bar{t}$,label.angle=80}{c1}
\fmf{gluon,label=$g$,tension=0.7,label.side=left}{d1,c1}
\fmf{fermion,label=$l^-$,tension=-0.475,label.side=left}{d1,r2}
\fmf{fermion,label=$l^+$,tension=0.6}{r0,d1}
% \fmf{phantom, tension = 1.5}{l2,l1}
\end{fmfgraph*}
\end{fmffile}
\pagestyle{empty}
\end{document}

BIN
Feynman/bsll_DM.pdf

Binary file not shown.

35
Feynman/bsll_DM.tex

@ -0,0 +1,35 @@
\documentclass{article}
\usepackage[paperheight=60mm,paperwidth=95mm, margin=10mm]{geometry}
\usepackage{feynmp-auto}
\addtolength{\oddsidemargin}{-10mm}
\addtolength{\evensidemargin}{-10mm}
% commas in feynmp sometimes lead to strange errors, use \comma instead
\DeclareMathSymbol{\comma}{\mathpunct}{letters}{"3B}
\begin{document}\LARGE
\centering
\begin{fmffile}{bsll_DM_diagram} \centering
\begin{fmfgraph*}(280,110)
\fmfleft{l1,l2}
\fmfright{r1,r2}
\fmf{phantom}{l1,w1,w2,r1}
\fmf{phantom}{l2,v1,v2,r2}
\fmffreeze
\fmf{fermion,label=$\l^+$,label.side=left}{r1,w2}
\fmf{fermion,label=$\chi$,label.side=left}{w2,w1}
\fmf{fermion,label=$\bar{s}$,label.side=left}{w1,l1}
\fmf{fermion,label=$b$,label.side=left}{l2,v1}
\fmf{fermion,label=$\chi$,label.side=left}{v1,v2}
\fmf{fermion,label=$\l^-$,label.side=left}{v2,r2}
\fmf{dashes,label=$\phi_q$,label.side=left}{w1,v1}
\fmf{dashes,label=$\phi_q$}{w2,v2}
\end{fmfgraph*}
\end{fmffile}
\pagestyle{empty}
\end{document}

BIN
Feynman/bsll_Zprime.pdf

Binary file not shown.

33
Feynman/bsll_Zprime.tex

@ -0,0 +1,33 @@
\documentclass{article}
\usepackage[paperheight=50mm,paperwidth=95mm, margin=5mm]{geometry}
\usepackage{feynmp-auto}
\addtolength{\oddsidemargin}{-10mm}
\addtolength{\evensidemargin}{-10mm}
% commas in feynmp sometimes lead to strange errors, use \comma instead
\DeclareMathSymbol{\comma}{\mathpunct}{letters}{"3B}
\begin{document}\LARGE
\centering
\begin{fmffile}{bsll_Zprime_diagram} \centering
\begin{fmfgraph*}(280,100)
\fmfstraight
\fmfleft{l1,l2,l3}
\fmfright{r1,r2,r3}
\fmf{phantom}{l1,q1,r1}
\fmf{phantom}{l2,p1,p2,p3,p4,r2}
\fmffreeze
\fmf{fermion,label=$b$,tension=1.0}{l1,q1}
\fmf{fermion,label=$s$,tension=1.0}{q1,r1}
\fmf{photon,label=$Z^{'}$, label.side=left,tension=1}{q1,p3}
\fmf{fermion,label=$\l^-$, label.side=left}{p3,r3}
\fmf{fermion,label=$\l^+$, label.side=left}{r2,p3}
\end{fmfgraph*}
\end{fmffile}
\pagestyle{empty}
\end{document}

BIN
Feynman/bsll_box.pdf

Binary file not shown.

38
Feynman/bsll_box.tex

@ -0,0 +1,38 @@
\documentclass{article}
\usepackage[paperheight=60mm,paperwidth=95mm, margin=5mm]{geometry}
\usepackage{feynmp-auto}
\addtolength{\oddsidemargin}{-10mm}
\addtolength{\evensidemargin}{-10mm}
% commas in feynmp sometimes lead to strange errors, use \comma instead
\DeclareMathSymbol{\comma}{\mathpunct}{letters}{"3B}
\begin{document}\LARGE
\centering
\begin{fmffile}{bsll_box_diagram} \centering
\begin{fmfgraph*}(300,130)
\fmfstraight
\fmfleft{l1,l2,l3,l4}
\fmfright{r1,r2,r3,r4}
\fmf{phantom}{l4,q1,q2,r4}
\fmf{phantom}{l2,m1,m2,r2}
\fmf{phantom}{l1,p1,p2,p3,p4,r1}
\fmffreeze
\fmf{fermion,label=$b$,tension=10.0}{l4,q1}
\fmf{fermion,label=$u\comma c \comma t$,tension=10.0}{q1,q2}
\fmf{fermion,label=$s$,tension=10.0}{q2,r4}
\fmf{photon,label=$W^+$,tension=1}{q1,m1}
\fmf{photon,label=$W^-$,tension=1}{q2,m2}
%TODO!
\fmf{fermion,label=$\nu$,tension=0.7}{m1,m2}
\fmf{fermion,label=$\l^+$, label.side=left}{p3,m1}
\fmf{fermion,label=$\l^-$}{m2,r1}
\end{fmfgraph*}
\end{fmffile}
\pagestyle{empty}
\end{document}

BIN
Feynman/bsll_eff.pdf

Binary file not shown.

35
Feynman/bsll_eff.tex

@ -0,0 +1,35 @@
\documentclass{article}
\usepackage[paperheight=50mm,paperwidth=95mm, margin=5mm]{geometry}
\usepackage{feynmp-auto}
\addtolength{\oddsidemargin}{-10mm}
\addtolength{\evensidemargin}{-10mm}
% commas in feynmp sometimes lead to strange errors, use \comma instead
\DeclareMathSymbol{\comma}{\mathpunct}{letters}{"3B}
\begin{document}\LARGE
\centering
\begin{fmffile}{bsll_eff_diagram} \centering
\begin{fmfgraph*}(300,100)
\fmfstraight
\fmfleft{l3,l4,l5}
\fmfright{r3,r4,r5}
\fmf{phantom}{l5,q1,r5}
\fmf{phantom}{l4,a1,r4}
\fmf{phantom}{l3,m1,m2,m3,r3}
\fmfv{d.shape=square,d.filled=hatched,d.size=10}{q1}
\fmffreeze
\fmf{fermion,label=$b$,tension=10.0}{l5,q1}
\fmf{fermion,label=$s$,tension=10.0}{q1,r5}
\fmf{fermion,label=$l^+$, label.side=right}{m2,q1}
\fmf{fermion,label=$l^-$}{q1,m3}
\end{fmfgraph*}
\end{fmffile}
\pagestyle{empty}
\end{document}

BIN
Feynman/bsll_eff_meson.pdf

Binary file not shown.

65
Feynman/bsll_eff_meson.tex

@ -0,0 +1,65 @@
\documentclass{article}
\usepackage[paperheight=110mm,paperwidth=125mm, margin=15mm]{geometry}
\usepackage{feynmp-auto}
%\addtolength{\oddsidemargin}{-10mm}
%\addtolength{\evensidemargin}{-10mm}
% commas in feynmp sometimes lead to strange errors, use \comma instead
\DeclareMathSymbol{\comma}{\mathpunct}{letters}{"3B}
\begin{document}\LARGE
\centering
\begin{fmffile}{bsll_eff_meson_diagram} \centering
\begin{fmfgraph*}(280,130)
\fmfstraight
\fmfleft{l6,l5,l4,l3,l2,l1,l0}
\fmfright{r6,r5,r4,r3,r2,r1,r0}
\fmf{phantom}{l0,a1,a2,a3,a4,a5,a6,a7,a8,a9,r0}
\fmf{phantom}{l1,b1,b2,b3,b4,b5,b7,b8,b9,r1}
\fmf{phantom}{l2,c1,c2,c3,c4,c5,c6,c7,c8,c9,r2}
\fmf{phantom}{l3,d1,d2,d3,d4,d5,d6,d7,d8,d9,r3}
\fmf{phantom}{l4,e1,e2,e3,e4,e5,e6,e7,e8,e9,r4}
\fmf{phantom}{l5,f1,f2,f3,r5}
\fmf{phantom}{l6,g1,g2,g3,r6}
\fmffreeze
%Make the spectator quark
\fmf{fermion,label=$u$,tension=1.0, label.side=left}{l0,r0}
%Make the blobs
\fmf{plain,right=-0.3,tension=-0.5,label=$B^+$, label.side=right,label.dist=0.5}{l0,l4}
\fmf{plain,right=0.3,tension=-0.5}{l0,l4}
\fmf{plain,right=-0.3,tension=-0.5,label=$K^*$, label.side=right,label.dist=0.45}{r0,r4}
\fmf{plain,right=0.3,tension=-0.5}{r0,r4}
%Make gluons
\fmf{gluon}{a1,e1}
\fmf{gluon}{b1,a2}
\fmf{gluon}{a9,c9,e9}
\fmf{gluon}{c9,e7}
\fmf{gluon,right=0.3,tension=-0.5}{a4,c6}
\fmf{gluon}{a7,c6}
\fmf{gluon}{c6,e6}
\fmf{gluon}{a3,c3}
\fmf{gluon}{d3,e3}
\fmf{plain,right=1.0,tension=-0.5}{c3,d3,c3}
%Make b to s
\fmf{fermion,label=$\bar{b}$,tension=1.0,label.side=left}{e5,l4}
\fmf{fermion,label=$\bar{s}$,tension=1.0,label.side=left}{r4,e5}
%Make effective vertex
\fmfv{d.shape=square,d.filled=hatched,d.size=10}{e5}
%Make muons
\fmf{fermion,label=$\mu^+$, label.side=left}{g2,e5}
\fmf{fermion,label=$\mu^-$,label.dist=0.2}{e5,g3}
\end{fmfgraph*}
\end{fmffile}
\pagestyle{empty}
\end{document}

BIN
Feynman/bsll_eff_meson_charm.pdf

Binary file not shown.

80
Feynman/bsll_eff_meson_charm.tex

@ -0,0 +1,80 @@
\documentclass{article}
\usepackage[paperheight=110mm,paperwidth=125mm, margin=15mm]{geometry}
\usepackage{feynmp-auto}
\usepackage{textpos}
%\addtolength{\oddsidemargin}{-10mm}
%\addtolength{\evensidemargin}{-10mm}
% commas in feynmp sometimes lead to strange errors, use \comma instead
\DeclareMathSymbol{\comma}{\mathpunct}{letters}{"3B}
\begin{document}\LARGE
\centering
\begin{fmffile}{bsll_eff_meson_charm_diagram} \centering
\begin{fmfgraph*}(280,220)
\fmfstraight
\fmfleft{l10,l9,l8,l7,l6,l5,l4,l3,l2,l1,l0}
\fmfright{r10,r9,r8,r7,r6,r5,r4,r3,r2,r1,r0}
\fmf{phantom}{l0,a1,a2,a3,a4,a5,a6,a7,a8,a9,r0}
\fmf{phantom}{l1,b1,b2,b3,b4,b5,b7,b8,b9,r1}
\fmf{phantom}{l2,c1,c2,c3,c4,c5,c6,c7,c8,c9,r2}
\fmf{phantom}{l3,d1,d2,d3,d4,d5,d6,d7,d8,d9,r3}
\fmf{phantom}{l4,e1,e2,e3,e4,e5,e6,e7,e8,e9,r4}
\fmf{phantom}{l5,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,r5}
\fmf{phantom}{l6,g1,g2,g3,g4,g5,g6,g7,g8,g9,r6}
\fmf{phantom}{l7,h1,h2,h3,h4,h5,h6,h7,h8,h9,r7}
\fmf{phantom}{l8,i1,i2,i3,i4,i5,i6,i7,i8,i9,r8}
\fmffreeze
%Make the spectator quark
\fmf{fermion,label=$u$,tension=1.0, label.side=left}{l0,r0}
%Make the blobs
\fmf{plain,right=-0.3,tension=-0.5,label=$B^+$, label.side=right,label.dist=0.5}{l0,l4}
\fmf{plain,right=0.3,tension=-0.5}{l0,l4}
\fmf{plain,right=-0.3,tension=-0.5,label=$K^*$, label.side=right,label.dist=1.5}{r0,r4}
\fmf{plain,right=0.3,tension=-0.5}{r0,r4}
%Make gluons
\fmf{gluon}{a1,e1}
\fmf{gluon}{b1,a2}
\fmf{gluon}{a9,c9,e9}
\fmf{gluon}{c9,e7}
\fmf{gluon,right=0.3,tension=-0.5}{a4,c6}
\fmf{gluon}{a7,c6}
\fmf{gluon}{c6,e6}
\fmf{gluon}{a3,c3}
\fmf{gluon}{d3,e3}
\fmf{plain,right=1.0,tension=-0.5}{c3,d3,c3}
%Make b to s
\fmf{fermion,label=$\bar{b}$,tension=1.0,label.side=left}{e5,l4}
\fmf{fermion,label=$\bar{s}$,tension=1.0,label.side=left}{r4,e5}
%Make effective vertex
\fmfv{d.shape=square,d.filled=hatched,d.size=10}{e5}
%Make the charm loop
\fmf{plain,right=1.0,tension=-0.5,label=\vspace{-19pt}$c$,label.side=left,label.dist=9.0}{e5,g5}
\fmf{plain,right=1.0,tension=-0.5,label=\vspace{-17pt}$\bar{c}$,label.side=left,label.dist=19.0}{g5,e5}
%Add gluons
\fmf{gluon,right=-1.0,tension=1.0}{f6,e4}
\fmf{gluon,right=1.0,tension=1.0}{f8,e7}
%Make photon
\fmf{photon,label=$\gamma \comma Z^0$, label.side=right}{g5,h6}
%Make muons
\fmf{fermion,label=$\mu^+$,label.side=right}{r6,h6}
\fmf{fermion,label=$\mu^-$,label.side=right}{h6,r8}
\end{fmfgraph*}
\end{fmffile}
\pagestyle{empty}
\end{document}

BIN
Feynman/bsll_leptoquark.pdf

Binary file not shown.

30
Feynman/bsll_leptoquark.tex

@ -0,0 +1,30 @@
\documentclass{article}
\usepackage[paperheight=50mm,paperwidth=95mm, margin=5mm]{geometry}
\usepackage{feynmp-auto}
% commas in feynmp sometimes lead to strange errors, use \comma instead
\DeclareMathSymbol{\comma}{\mathpunct}{letters}{"3B}
\begin{document}\LARGE
\centering
\begin{fmffile}{bsll_leptoquark_diagram} \centering
\begin{fmfgraph*}(250,100)
\fmfstraight
\fmfleft{l1,l2}
\fmfright{r1,r2}
\fmf{phantom}{l1,p1,r1}
\fmf{phantom}{l2,m1,m2,m3,r2}
\fmffreeze
\fmf{fermion,label=$b$,tension=1.0}{l2,m1}
\fmf{photon,label=$\phi$,tension=1}{m1,m3}
\fmf{fermion,label=$s$,tension=1.0}{m3,r2}
\fmf{fermion,label=$\l^-$, label.side=right}{m1,p1}
\fmf{fermion,label=$\l^+$}{r1,m3}
\end{fmfgraph*}
\end{fmffile}
\pagestyle{empty}
\end{document}

BIN
Feynman/bsll_penguin.pdf

Binary file not shown.

42
Feynman/bsll_penguin.tex

@ -0,0 +1,42 @@
\documentclass{article}
\usepackage[paperheight=60mm,paperwidth=95mm, margin=5mm]{geometry}
\usepackage{feynmp-auto}
\addtolength{\oddsidemargin}{-10mm}
\addtolength{\evensidemargin}{-10mm}
% commas in feynmp sometimes lead to strange errors, use \comma instead
\DeclareMathSymbol{\comma}{\mathpunct}{letters}{"3B}
\begin{document}\LARGE
\centering
\begin{fmffile}{bsll_penguin_diagram} \centering
\begin{fmfgraph*}(300,130)
\fmfstraight
\fmfleft{l0,l1,l2,l3,l4}
\fmfright{r0,r1,r2,r3,r4}
\fmf{phantom}{l4,a1,a2,r4}
\fmf{phantom}{l3,b1,r3}
\fmf{phantom}{l2,c1,r2}
\fmf{phantom}{l1,d1,r1}
\fmf{phantom}{l0,e1,e2,r0}
\fmffreeze
\fmf{fermion,label=$b$,tension=1,label.side=right}{l4,a1}
\fmf{photon,label=$W^-$,tension=1,label.side=right}{a1,a2}
\fmf{fermion,label=$s$, tension=1}{a2,r4}
\fmf{fermion,right=0.5,tension=0.5}{a1,c1,a2}
\fmfv{label=${u}\comma {c}\comma {t}$,label.angle=80}{c1}
\fmf{photon,label=$Z\comma\gamma$,tension=0.7}{c1,d1}
\fmf{fermion,label=$l^-$,tension=-0.475,label.side=left}{d1,r2}
\fmf{fermion,label=$l^+$,tension=0.6}{r0,d1}
% \fmf{phantom, tension = 1.5}{l2,l1}
\end{fmfgraph*}
\end{fmffile}
\pagestyle{empty}
\end{document}

BIN
Feynman/bsmumu_eff.pdf

Binary file not shown.

35
Feynman/bsmumu_eff.tex

@ -0,0 +1,35 @@
\documentclass{article}
\usepackage[paperheight=50mm,paperwidth=95mm, margin=5mm]{geometry}
\usepackage{feynmp-auto}
\addtolength{\oddsidemargin}{-10mm}
\addtolength{\evensidemargin}{-10mm}
% commas in feynmp sometimes lead to strange errors, use \comma instead
\DeclareMathSymbol{\comma}{\mathpunct}{letters}{"3B}
\begin{document}\LARGE
\centering
\begin{fmffile}{bsmumu_eff_diagram} \centering
\begin{fmfgraph*}(300,100)
\fmfstraight
\fmfleft{l3,l4,l5}
\fmfright{r3,r4,r5}
\fmf{phantom}{l5,q1,r5}
\fmf{phantom}{l4,a1,r4}
\fmf{phantom}{l3,m1,m2,m3,r3}
\fmfv{d.shape=square,d.filled=hatched,d.size=10}{q1}
\fmffreeze
\fmf{fermion,label=$b$,tension=10.0}{l5,q1}
\fmf{fermion,label=$s$,tension=10.0}{q1,r5}
\fmf{fermion,label=$\mu^+$, label.side=right}{m2,q1}
\fmf{fermion,label=$\mu^-$}{q1,m3}
\end{fmfgraph*}
\end{fmffile}
\pagestyle{empty}
\end{document}

51
Feynman/make_plot.sh

@ -0,0 +1,51 @@
#!/bin/bash
#
# compile a feynman diagram
if [ $# -eq 1 ]; then
echo "Building $1_diagram from $1.tex"
else
echo "Usage: bash make_plot.sh texfileName"
echo "texfileName is without .tex!"
exit 2
fi
pdflatex $1".tex" 2>/dev/null 1>&2
if [ $? -ne 0 ]; then
echo "First compilation failed. Check log."
exit 1
else
echo "First pdflatex succesfully completed."
fi
mpost $1"_diagram" 2>/dev/null 1>&2
if [ $? -ne 0 ]; then
echo "mpost compilation failed. Check log."
exit 1
else
echo "mpost succesfully completed."
fi
pdflatex $1".tex" 2>/dev/null 1>&2
if [ $? -ne 0 ]; then
echo "Second compilation failed. Check log."
exit 1
else
echo "Second pdflatex succesfully completed."
fi
#Remove the second pdf page, pdftk need output != input so rename it afterwards
#If it works, it aint stupid
pdftk $1.pdf cat 1 output $1_2.pdf
mv $1_2.pdf $1.pdf
#Always clean after build
rm $1.aux
rm $1.log
rm $1.synctex
rm $1_diagram.log
rm $1_diagram.mp
rm $1_diagram.t1
rm $1_diagram.1

Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save