figure tabularasa
|
@ -25,6 +25,9 @@ labelformat=brace, position=top]{subcaption}
|
|||
\usepackage{pgfplots}
|
||||
\usepackage{ifdraft}
|
||||
|
||||
%% use the current pgfplots
|
||||
\pgfplotsset{compat=1.16}
|
||||
|
||||
%% minted
|
||||
\usemintedstyle{colorful}
|
||||
\newmintedfile{yaml}{linenos,mathescape=true}
|
||||
|
|
|
@ -8,3 +8,4 @@ thesis: document.tex
|
|||
|
||||
clean:
|
||||
rm -rf $(OUTDIR)/*
|
||||
mkdir -p $(OUTDIR)/tikz
|
||||
|
|
|
@ -46,7 +46,7 @@ expected value \(\EX{X_i}=\mathbb{E}\) and variance
|
|||
\EX{\overline{X}} = \frac{1}{N}\sum_i\EX{X_i} = \mathbb{E} \label{eq:evalue-mean}\\
|
||||
\sigma^2_{\overline{X}} = \sum_i\frac{\sigma_i^2}{N^2} =
|
||||
\frac{\sigma^2}{N} \label{eq:variance-mean}
|
||||
\end{algin}
|
||||
\end{align}
|
||||
|
||||
Evidently \(\frac{\sigma^2}{N}\xrightarrow{N\rightarrow\infty} 0\)
|
||||
thus the~\eqref{eq:approxexp} really converges to \(I\). For finite
|
||||
|
|
|
@ -96,7 +96,7 @@ in the beams).
|
|||
\begin{equation}
|
||||
\label{eq:averagedm}
|
||||
\langle\abs{\mathcal{M}}^2\rangle = \frac{1}{4}\sum_{s_1 s_2}\sum_{\lambda_1
|
||||
\lambda_2} \abs{\mathcal{M}}^2=\overbrace{\frac{1}{3}\frac{1}{4}\frac{(gQ)^4}{(2p)^4}}^\mathfrak{F}\sum_{\lambda_1
|
||||
\lambda_2} \abs{\mathcal{M}}^2=\overbrace{\frac{1}{3}\frac{1}{4}\frac{\qty(gQ)^4}{\qty(2p)^4}}^\mathfrak{F}\sum_{\lambda_1
|
||||
\lambda_2}\tr[\qty(\frac{\Gamma_1}{s'^2}+\frac{\Gamma_2}{c'^2})
|
||||
\ps_2\qty(\frac{\bar{\Gamma}_1}{s'^2}+\frac{\bar{\Gamma}_2}{c'^2})\ps_1]
|
||||
\end{equation}
|
||||
|
|
After Width: | Height: | Size: 5.2 KiB |
After Width: | Height: | Size: 23 KiB |
Before Width: | Height: | Size: 6.5 KiB |
After Width: | Height: | Size: 6.3 KiB |
Before Width: | Height: | Size: 17 KiB |
Before Width: | Height: | Size: 5.1 KiB |
Before Width: | Height: | Size: 6.7 KiB |
After Width: | Height: | Size: 6.5 KiB |
After Width: | Height: | Size: 17 KiB |
After Width: | Height: | Size: 5.3 KiB |
Before Width: | Height: | Size: 6.4 KiB |
Before Width: | Height: | Size: 5.2 KiB |
Before Width: | Height: | Size: 18 KiB |
After Width: | Height: | Size: 5.9 KiB |
|
@ -271,7 +271,7 @@ Intergrate σ with the mc method.
|
|||
#+end_src
|
||||
|
||||
#+RESULTS:
|
||||
| 0.05441643331124812 | 0.000850414167068247 |
|
||||
| 0.05442051377646951 | 0.0008434404330817717 |
|
||||
|
||||
We gonna export that as tex.
|
||||
#+begin_src jupyter-python :exports both :results raw drawer
|
||||
|
@ -279,7 +279,7 @@ tex_value(xs_pb_mc, unit=r'\pico\barn', prefix=r'\sigma = ', err=xs_pb_mc_err, s
|
|||
#+end_src
|
||||
|
||||
#+RESULTS:
|
||||
: \(\sigma = \SI{0.0544\pm 0.0009}{\pico\barn}\)
|
||||
: \(\sigma = \SI{0.0544\pm 0.0008}{\pico\barn}\)
|
||||
|
||||
*** Integration over η
|
||||
Plot the intgrand of the pseudo rap.
|
||||
|
@ -302,7 +302,7 @@ save_fig(fig, 'xs_integrand_η', 'xs', size=[4, 4])
|
|||
#+end_src
|
||||
|
||||
#+RESULTS:
|
||||
| 0.05387227556308623 | 0.00015784230303183058 |
|
||||
| 0.05380040768597333 | 0.00015708385929804225 |
|
||||
|
||||
As we see, the result is a little better if we use pseudo rapidity,
|
||||
because the differential cross section does not difverge anymore. But
|
||||
|
@ -316,7 +316,7 @@ And yet again export that as tex.
|
|||
#+end_src
|
||||
|
||||
#+RESULTS:
|
||||
: \(\sigma = \SI{0.05387\pm 0.00016}{\pico\barn}\)
|
||||
: \(\sigma = \SI{0.05380\pm 0.00016}{\pico\barn}\)
|
||||
|
||||
*** Using =VEGAS=
|
||||
Now we use =VEGAS= on the θ parametrisation and see what happens.
|
||||
|
@ -329,7 +329,7 @@ Now we use =VEGAS= on the θ parametrisation and see what happens.
|
|||
#+end_src
|
||||
|
||||
#+RESULTS:
|
||||
| 0.05382003923613133 | 5.515086040159631e-05 |
|
||||
| 0.05379845560620143 | 4.388947283680102e-05 |
|
||||
|
||||
This is pretty good, although the variance reduction may be achieved
|
||||
partially by accumulating the results from all runns. The uncertainty
|
||||
|
@ -342,7 +342,7 @@ And export that as tex.
|
|||
#+end_src
|
||||
|
||||
#+RESULTS:
|
||||
: \(\sigma = \SI{0.05382\pm 0.00006}{\pico\barn}\)
|
||||
: \(\sigma = \SI{0.05380\pm 0.00004}{\pico\barn}\)
|
||||
|
||||
Surprisingly, without acumulation, the result ain't much different.
|
||||
This depends, of course, on the iteration count.
|
||||
|
@ -353,7 +353,7 @@ This depends, of course, on the iteration count.
|
|||
#+end_src
|
||||
|
||||
#+RESULTS:
|
||||
| 0.05378075568964776 | 7.452808684393069e-05 |
|
||||
| 0.05382687634979217 | 7.545090077243005e-05 |
|
||||
|
||||
*** Testing the Statistics
|
||||
Let's battle test the statistics.
|
||||
|
@ -370,7 +370,7 @@ Let's battle test the statistics.
|
|||
#+end_src
|
||||
|
||||
#+RESULTS:
|
||||
: 0.694
|
||||
: 0.681
|
||||
|
||||
So we see: the standard deviation is sound.
|
||||
|
||||
|
@ -390,7 +390,7 @@ Doing the same thing with =VEGAS= works as well.
|
|||
#+end_src
|
||||
|
||||
#+RESULTS:
|
||||
: 0.672
|
||||
: 0.704
|
||||
|
||||
** Sampling and Analysis
|
||||
Define the sample number.
|
||||
|
@ -422,7 +422,7 @@ Now we monte-carlo sample our distribution. We observe that the efficiency his v
|
|||
#+end_src
|
||||
|
||||
#+RESULTS:
|
||||
: 0.027772146766673424
|
||||
: 0.026353131968376242
|
||||
|
||||
Our distribution has a lot of variance, as can be seen by plotting it.
|
||||
#+begin_src jupyter-python :exports both :results raw drawer
|
||||
|
@ -474,7 +474,7 @@ at least a little bit better. The numeric inversion is horribly inefficent.
|
|||
#+end_src
|
||||
|
||||
#+RESULTS:
|
||||
: 0.07994628015406446
|
||||
: 0.07893269815528076
|
||||
<<cosθ-bare-eff>>
|
||||
|
||||
Nice! And now draw some histograms.
|
||||
|
@ -513,166 +513,7 @@ save_fig(fig, 'histo_cos_theta', 'xs', size=(4,3))
|
|||
#+end_src
|
||||
|
||||
#+RESULTS:
|
||||
:RESULTS:
|
||||
# [goto error]
|
||||
#+begin_example
|
||||
|
||||
BrokenPipeErrorTraceback (most recent call last)
|
||||
<ipython-input-188-c43713624618> in <module>
|
||||
1 fig, _ = draw_histo(cosθ_sample, r'$\cos\theta$')
|
||||
----> 2 save_fig(fig, 'histo_cos_theta', 'xs', size=(4,3))
|
||||
|
||||
~/Documents/Projects/UNI/Bachelor/prog/python/utility.py in save_fig(fig, title, folder, size)
|
||||
101
|
||||
102 fig.savefig(f'./figs/{folder}/{title}.pdf')
|
||||
--> 103 fig.savefig(f'./figs/{folder}/{title}.pgf')
|
||||
104
|
||||
105
|
||||
|
||||
/usr/lib/python3.8/site-packages/matplotlib/figure.py in savefig(self, fname, transparent, **kwargs)
|
||||
2201 self.patch.set_visible(frameon)
|
||||
2202
|
||||
-> 2203 self.canvas.print_figure(fname, **kwargs)
|
||||
2204
|
||||
2205 if frameon:
|
||||
|
||||
/usr/lib/python3.8/site-packages/matplotlib/backend_bases.py in print_figure(self, filename, dpi, facecolor, edgecolor, orientation, format, bbox_inches, **kwargs)
|
||||
2096
|
||||
2097 try:
|
||||
-> 2098 result = print_method(
|
||||
2099 filename,
|
||||
2100 dpi=dpi,
|
||||
|
||||
/usr/lib/python3.8/site-packages/matplotlib/backends/backend_pgf.py in print_pgf(self, fname_or_fh, *args, **kwargs)
|
||||
888 if not cbook.file_requires_unicode(file):
|
||||
889 file = codecs.getwriter("utf-8")(file)
|
||||
--> 890 self._print_pgf_to_fh(file, *args, **kwargs)
|
||||
891
|
||||
892 def _print_pdf_to_fh(self, fh, *args, **kwargs):
|
||||
|
||||
/usr/lib/python3.8/site-packages/matplotlib/cbook/deprecation.py in wrapper(*args, **kwargs)
|
||||
356 f"%(removal)s. If any parameter follows {name!r}, they "
|
||||
357 f"should be pass as keyword, not positionally.")
|
||||
--> 358 return func(*args, **kwargs)
|
||||
359
|
||||
360 return wrapper
|
||||
|
||||
/usr/lib/python3.8/site-packages/matplotlib/backends/backend_pgf.py in _print_pgf_to_fh(self, fh, dryrun, bbox_inches_restore, *args, **kwargs)
|
||||
870 RendererPgf(self.figure, fh),
|
||||
871 bbox_inches_restore=bbox_inches_restore)
|
||||
--> 872 self.figure.draw(renderer)
|
||||
873
|
||||
874 # end the pgfpicture environment
|
||||
|
||||
/usr/lib/python3.8/site-packages/matplotlib/artist.py in draw_wrapper(artist, renderer, *args, **kwargs)
|
||||
36 renderer.start_filter()
|
||||
37
|
||||
---> 38 return draw(artist, renderer, *args, **kwargs)
|
||||
39 finally:
|
||||
40 if artist.get_agg_filter() is not None:
|
||||
|
||||
/usr/lib/python3.8/site-packages/matplotlib/figure.py in draw(self, renderer)
|
||||
1733
|
||||
1734 self.patch.draw(renderer)
|
||||
-> 1735 mimage._draw_list_compositing_images(
|
||||
1736 renderer, self, artists, self.suppressComposite)
|
||||
1737
|
||||
|
||||
/usr/lib/python3.8/site-packages/matplotlib/image.py in _draw_list_compositing_images(renderer, parent, artists, suppress_composite)
|
||||
135 if not_composite or not has_images:
|
||||
136 for a in artists:
|
||||
--> 137 a.draw(renderer)
|
||||
138 else:
|
||||
139 # Composite any adjacent images together
|
||||
|
||||
/usr/lib/python3.8/site-packages/matplotlib/artist.py in draw_wrapper(artist, renderer, *args, **kwargs)
|
||||
36 renderer.start_filter()
|
||||
37
|
||||
---> 38 return draw(artist, renderer, *args, **kwargs)
|
||||
39 finally:
|
||||
40 if artist.get_agg_filter() is not None:
|
||||
|
||||
/usr/lib/python3.8/site-packages/matplotlib/axes/_base.py in draw(self, renderer, inframe)
|
||||
2628 renderer.stop_rasterizing()
|
||||
2629
|
||||
-> 2630 mimage._draw_list_compositing_images(renderer, self, artists)
|
||||
2631
|
||||
2632 renderer.close_group('axes')
|
||||
|
||||
/usr/lib/python3.8/site-packages/matplotlib/image.py in _draw_list_compositing_images(renderer, parent, artists, suppress_composite)
|
||||
135 if not_composite or not has_images:
|
||||
136 for a in artists:
|
||||
--> 137 a.draw(renderer)
|
||||
138 else:
|
||||
139 # Composite any adjacent images together
|
||||
|
||||
/usr/lib/python3.8/site-packages/matplotlib/artist.py in draw_wrapper(artist, renderer, *args, **kwargs)
|
||||
36 renderer.start_filter()
|
||||
37
|
||||
---> 38 return draw(artist, renderer, *args, **kwargs)
|
||||
39 finally:
|
||||
40 if artist.get_agg_filter() is not None:
|
||||
|
||||
/usr/lib/python3.8/site-packages/matplotlib/axis.py in draw(self, renderer, *args, **kwargs)
|
||||
1226
|
||||
1227 ticks_to_draw = self._update_ticks()
|
||||
-> 1228 ticklabelBoxes, ticklabelBoxes2 = self._get_tick_bboxes(ticks_to_draw,
|
||||
1229 renderer)
|
||||
1230
|
||||
|
||||
/usr/lib/python3.8/site-packages/matplotlib/axis.py in _get_tick_bboxes(self, ticks, renderer)
|
||||
1171 def _get_tick_bboxes(self, ticks, renderer):
|
||||
1172 """Return lists of bboxes for ticks' label1's and label2's."""
|
||||
-> 1173 return ([tick.label1.get_window_extent(renderer)
|
||||
1174 for tick in ticks if tick.label1.get_visible()],
|
||||
1175 [tick.label2.get_window_extent(renderer)
|
||||
|
||||
/usr/lib/python3.8/site-packages/matplotlib/axis.py in <listcomp>(.0)
|
||||
1171 def _get_tick_bboxes(self, ticks, renderer):
|
||||
1172 """Return lists of bboxes for ticks' label1's and label2's."""
|
||||
-> 1173 return ([tick.label1.get_window_extent(renderer)
|
||||
1174 for tick in ticks if tick.label1.get_visible()],
|
||||
1175 [tick.label2.get_window_extent(renderer)
|
||||
|
||||
/usr/lib/python3.8/site-packages/matplotlib/text.py in get_window_extent(self, renderer, dpi)
|
||||
903 raise RuntimeError('Cannot get window extent w/o renderer')
|
||||
904
|
||||
--> 905 bbox, info, descent = self._get_layout(self._renderer)
|
||||
906 x, y = self.get_unitless_position()
|
||||
907 x, y = self.get_transform().transform((x, y))
|
||||
|
||||
/usr/lib/python3.8/site-packages/matplotlib/text.py in _get_layout(self, renderer)
|
||||
297 clean_line, ismath = self._preprocess_math(line)
|
||||
298 if clean_line:
|
||||
--> 299 w, h, d = renderer.get_text_width_height_descent(
|
||||
300 clean_line, self._fontproperties, ismath=ismath)
|
||||
301 else:
|
||||
|
||||
/usr/lib/python3.8/site-packages/matplotlib/backends/backend_pgf.py in get_text_width_height_descent(self, s, prop, ismath)
|
||||
755
|
||||
756 # get text metrics in units of latex pt, convert to display units
|
||||
--> 757 w, h, d = (LatexManager._get_cached_or_new()
|
||||
758 .get_width_height_descent(s, prop))
|
||||
759 # TODO: this should be latex_pt_to_in instead of mpl_pt_to_in
|
||||
|
||||
/usr/lib/python3.8/site-packages/matplotlib/backends/backend_pgf.py in get_width_height_descent(self, text, prop)
|
||||
356
|
||||
357 # send textbox to LaTeX and wait for prompt
|
||||
--> 358 self._stdin_writeln(textbox)
|
||||
359 try:
|
||||
360 self._expect_prompt()
|
||||
|
||||
/usr/lib/python3.8/site-packages/matplotlib/backends/backend_pgf.py in _stdin_writeln(self, s)
|
||||
257 self.latex_stdin_utf8.write(s)
|
||||
258 self.latex_stdin_utf8.write("\n")
|
||||
--> 259 self.latex_stdin_utf8.flush()
|
||||
260
|
||||
261 def _expect(self, s):
|
||||
|
||||
BrokenPipeError: [Errno 32] Broken pipe
|
||||
#+end_example
|
||||
[[file:./.ob-jupyter/2109dc7fa61ccb899191ba1a68895df01b749783.png]]
|
||||
:END:
|
||||
[[file:./.ob-jupyter/83492aefe020fdbf846e8d1cfcd26a605ed9217e.png]]
|
||||
|
||||
*** Observables
|
||||
Now we define some utilities to draw real 4-momentum samples.
|
||||
|
@ -751,13 +592,13 @@ Lets try it out.
|
|||
#+end_src
|
||||
|
||||
#+RESULTS:
|
||||
: array([[100. , 38.24641519, 22.76579296, 89.5484807 ],
|
||||
: [100. , 48.14652483, 29.97226738, -82.36246314],
|
||||
: [100. , 65.02515029, 25.82470275, -71.44798498],
|
||||
: array([[100. , 85.92614934, 24.50107541, -44.90427778],
|
||||
: [100. , 36.88533839, 23.09524222, -90.03378032],
|
||||
: [100. , 92.13915742, 33.63915073, 19.4623536 ],
|
||||
: ...,
|
||||
: [100. , 77.44294062, 27.84365663, 56.80952151],
|
||||
: [100. , 51.47029015, 16.47983968, 84.13812522],
|
||||
: [100. , 40.1313622 , 23.07278301, -88.64039966]])
|
||||
: [100. , 54.741625 , 63.70705878, -54.2656904 ],
|
||||
: [100. , 13.85807314, 17.1158491 , -97.54486926],
|
||||
: [100. , 25.11845931, 16.28246943, -95.41459108]])
|
||||
|
||||
Now let's make a histogram of the η distribution.
|
||||
#+begin_src jupyter-python :exports both :results raw drawer
|
||||
|
@ -767,8 +608,8 @@ Now let's make a histogram of the η distribution.
|
|||
|
||||
#+RESULTS:
|
||||
:RESULTS:
|
||||
| <Figure | size | 432x288 | with | 1 | Axes> | <matplotlib.axes._subplots.AxesSubplot | at | 0x7f9f4c6f0700> |
|
||||
[[file:./.ob-jupyter/095e1870e99249bdc8fc6132b3fb473a09d41a08.png]]
|
||||
| <Figure | size | 432x288 | with | 1 | Axes> | <matplotlib.axes._subplots.AxesSubplot | at | 0x7f9f4ca17760> |
|
||||
[[file:./.ob-jupyter/cadc1b1460534226187087ea3ca5d084fa5fa2d7.png]]
|
||||
:END:
|
||||
|
||||
|
||||
|
@ -780,8 +621,8 @@ And the same for the p_t (transverse momentum) distribution.
|
|||
|
||||
#+RESULTS:
|
||||
:RESULTS:
|
||||
| <Figure | size | 432x288 | with | 1 | Axes> | <matplotlib.axes._subplots.AxesSubplot | at | 0x7f9f4c266f40> |
|
||||
[[file:./.ob-jupyter/47d65a874109d682e1f2ca862c1560d63060867c.png]]
|
||||
| <Figure | size | 432x288 | with | 1 | Axes> | <matplotlib.axes._subplots.AxesSubplot | at | 0x7f9f4f3b7d30> |
|
||||
[[file:./.ob-jupyter/4ffcb7836b3b067252cdeb95d9a6392550b0d262.png]]
|
||||
:END:
|
||||
|
||||
That looks somewhat fishy, but it isn't.
|
||||
|
@ -810,7 +651,7 @@ An again we see that the efficiency is way, way! better...
|
|||
#+end_src
|
||||
|
||||
#+RESULTS:
|
||||
: 0.4063
|
||||
: 0.40798
|
||||
<<η-eff>>
|
||||
|
||||
Let's draw a histogram to compare with the previous results.
|
||||
|
@ -820,8 +661,8 @@ Let's draw a histogram to compare with the previous results.
|
|||
|
||||
#+RESULTS:
|
||||
:RESULTS:
|
||||
| <Figure | size | 432x288 | with | 1 | Axes> | <matplotlib.axes._subplots.AxesSubplot | at | 0x7f9f4bdf4310> |
|
||||
[[file:./.ob-jupyter/87c196391955136cfeb479ee23972bf971d855d3.png]]
|
||||
| <Figure | size | 432x288 | with | 1 | Axes> | <matplotlib.axes._subplots.AxesSubplot | at | 0x7f9f4bef7ca0> |
|
||||
[[file:./.ob-jupyter/14643279f5f2f9c909fce6d2349de0dfb32288f2.png]]
|
||||
:END:
|
||||
|
||||
|
||||
|
@ -879,8 +720,8 @@ distribution. We throw away the integral, but keep the increments.
|
|||
#+end_src
|
||||
|
||||
#+RESULTS:
|
||||
: array([-0.9866143 , -0.87001384, -0.7269742 , -0.51294698, -0.26484439,
|
||||
: 0.00164873, 0.26764024, 0.5151555 , 0.72867548, 0.87054888,
|
||||
: array([-0.9866143 , -0.95760273, -0.90040994, -0.77763674, -0.51247914,
|
||||
: -0.00487403, 0.50718268, 0.77579038, 0.89960795, 0.9570795 ,
|
||||
: 0.9866143 ])
|
||||
|
||||
Visualizing the increment borders gives us the information we want.
|
||||
|
@ -898,8 +739,8 @@ Visualizing the increment borders gives us the information we want.
|
|||
|
||||
#+RESULTS:
|
||||
:RESULTS:
|
||||
: <matplotlib.legend.Legend at 0x7f9f4bdd9940>
|
||||
[[file:./.ob-jupyter/1979e2f4e4298c8863a47e15c2ccd833b29302bc.png]]
|
||||
: <matplotlib.legend.Legend at 0x7f9f4d3ce280>
|
||||
[[file:./.ob-jupyter/702ff465cf640f3fcc89e257b525ed6d9532c159.png]]
|
||||
:END:
|
||||
|
||||
We can now plot the reweighted distribution to observe the variance
|
||||
|
@ -919,8 +760,8 @@ reduction visually.
|
|||
|
||||
#+RESULTS:
|
||||
:RESULTS:
|
||||
: <matplotlib.legend.Legend at 0x7f9f4bb7ebb0>
|
||||
[[file:./.ob-jupyter/a506325aead3d6cf25f8e7cf5498c271cdc9ed7f.png]]
|
||||
: <matplotlib.legend.Legend at 0x7f9f4ae0c490>
|
||||
[[file:./.ob-jupyter/07e51a1bc82b177c00559879d2ee5d1dc43384da.png]]
|
||||
:END:
|
||||
|
||||
|
||||
|
@ -936,7 +777,7 @@ Now, draw a sample and look at the efficiency.
|
|||
#+end_src
|
||||
|
||||
#+RESULTS:
|
||||
: 0.0913
|
||||
: 0.363
|
||||
|
||||
If we compare that to [[cosθ-bare-eff]], we can see the improvement :P.
|
||||
It is even better the [[η-eff]]. The histogram looks just the same.
|
||||
|
@ -947,4 +788,4 @@ save_fig(fig, 'histo_cos_theta_strat', 'xs', size=(4,3))
|
|||
#+end_src
|
||||
|
||||
#+RESULTS:
|
||||
[[file:./.ob-jupyter/a227f2678a85eae05938eadc8a2be8c9a7fd6835.png]]
|
||||
[[file:./.ob-jupyter/06d680621fedb2b84b0f55586a7e2152ba47a27f.png]]
|
||||
|
|
|
@ -1 +0,0 @@
|
|||
\input{figs/xs/histo_cos_theta.pgf}
|
|
@ -1 +0,0 @@
|
|||
\input{figs/xs/xs_integrand.pgf}
|
|
@ -1 +1 @@
|
|||
\(\sigma = \SI{0.0544\pm 0.0009}{\pico\barn}\)
|
||||
\(\sigma = \SI{0.0544\pm 0.0008}{\pico\barn}\)
|
|
@ -1 +1 @@
|
|||
\(\sigma = \SI{0.05387\pm 0.00016}{\pico\barn}\)
|
||||
\(\sigma = \SI{0.05380\pm 0.00016}{\pico\barn}\)
|
|
@ -1 +1 @@
|
|||
\(\sigma = \SI{0.05382\pm 0.00006}{\pico\barn}\)
|
||||
\(\sigma = \SI{0.05380\pm 0.00004}{\pico\barn}\)
|