after_optimize the maxima

This commit is contained in:
hiro98 2020-05-19 12:01:50 +02:00
parent 764d6b9dfc
commit 22620ebbbb
39 changed files with 6855 additions and 6991 deletions

Binary file not shown.

After

Width:  |  Height:  |  Size: 11 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 7.6 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 9.9 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 11 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 8.6 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 9.7 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 9.4 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 9.9 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 8.9 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 12 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 9.9 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 10 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 8.6 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 7.8 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 11 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 11 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 7.8 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 10 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 11 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 10 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 12 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 8.6 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 12 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 11 KiB

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -13,7 +13,7 @@ import collections
from typing import Union, List, Tuple, Callable, Iterator
from multiprocessing import Pool, cpu_count
import functools
from scipy.optimize import minimize_scalar, root, shgo
from scipy.optimize import minimize_scalar, root, shgo, minimize
from dataclasses import dataclass
import utility
import joblib
@ -161,8 +161,8 @@ def find_upper_bound(f, interval, **kwargs):
raise RuntimeError("Could not find an upper bound.")
def find_upper_bound_vector(f, interval):
result = shgo(_negate(f), bounds=interval, options=dict(maxfev=10000))
def find_upper_bound_vector(f, interval, x0=None, tolerance=0.01):
result = minimize(lambda x: -f(*x), x0=x0, bounds=interval, tol=tolerance)
if not result.success:
raise RuntimeError("Could not find an upper bound.", result)
@ -888,14 +888,15 @@ def integrate_vegas_nd(
cubes = generate_cubes(increment_borders)
volumes = [get_integration_volume(cube) for cube in cubes]
cube_samples = [np.empty(0) for _ in cubes]
cube_sample_points = [np.empty((0, ndim)) for _ in cubes]
total_points_per_cube = 0
maxima = np.empty(num_cubes)
maxima = np.empty((num_cubes, 1 + ndim))
integrals = np.empty(num_cubes)
@joblib.delayed
def evaluate_integrand(cube):
points = np.random.uniform(cube[:, 0], cube[:, 1], (points_per_cube, ndim)).T
return f(*points, **kwargs)
return f(*points, **kwargs), points
with joblib.Parallel(n_jobs=proc) as parallel:
while True:
@ -903,9 +904,12 @@ def integrate_vegas_nd(
total_points_per_cube += points_per_cube
samples = parallel([evaluate_integrand(cube) for cube in cubes])
for sample, vol, i in zip(samples, volumes, range(num_cubes)):
for (sample, points), vol, i in zip(samples, volumes, range(num_cubes)):
# let's re-use the samples from earlier runs
cube_samples[i] = np.concatenate([cube_samples[i], sample]).T
cube_sample_points[i] = np.concatenate(
[cube_sample_points[i], points.T]
)
points = cube_samples[i] * vol
curr_integral = points.mean()
integral += curr_integral
@ -913,7 +917,12 @@ def integrate_vegas_nd(
cube_samples[i].var() * (vol ** 2) / (total_points_per_cube - 1)
)
maxima[i] = cube_samples[i].max()
max_index = cube_samples[i].argmax()
maxima[i] = [
cube_samples[i][max_index],
*cube_sample_points[i][max_index],
]
integrals[i] = curr_integral
if np.sqrt(variance) <= epsilon:
@ -1057,6 +1066,10 @@ def sample_stratified_vector(
integral = weights.sum()
weights = np.cumsum(weights / integral)
maxima = np.array(
[find_upper_bound_vector(f, cube[0], cube[1][1:]) for cube in cubes]
)
# compiling this function results in quite a speed-up
@numba.jit(numba.int32(), nopython=True)
def find_next_index():
@ -1074,7 +1087,8 @@ def sample_stratified_vector(
cube_index = find_next_index()
counter = 0
cube, maximum, _ = cubes[cube_index]
cube, _, _ = cubes[cube_index]
maximum = maxima[cube_index]
while counter < chunk_size:
points = np.random.uniform(
@ -1108,7 +1122,7 @@ the data about subvolumes.
"""
weights = np.array([weight for _, _, weight in cubes if weight > 0])
maxima = np.array([maximum for _, maximum, weight in cubes if weight > 0])
maxima = np.array([maximum[0] for _, maximum, weight in cubes if weight > 0])
volumes = np.array(
[get_integration_volume(cube) for cube, _, weight in cubes if weight > 0]
)

View file

@ -27,8 +27,6 @@ from tangled import observables
#+END_SRC
#+RESULTS:
: The autoreload extension is already loaded. To reload it, use:
: %reload_ext autoreload
** Global Config
#+begin_src jupyter-python :exports both :results raw drawer
@ -522,7 +520,7 @@ And calculate the XS.
alpha=1.8,
num_increments=increments,
num_points_per_cube=10,
cache="cache/pdf/total_xs_2_5_20_take19",
cache="cache/pdf/total_xs_2_5_20_take20",
)
total_xs = gev_to_pb(np.array(xs_int_res.combined_result)) * 2 * np.pi
@ -532,7 +530,7 @@ And calculate the XS.
#+RESULTS:
:RESULTS:
: Loading Cache: integrate_vegas_nd
: array([3.86636603e+01, 2.05680570e-02])
: array([3.86911687e+01, 1.96651183e-02])
:END:
#+begin_src jupyter-python :exports both :results raw drawer
@ -551,7 +549,7 @@ there are two identical protons.
#+end_src
#+RESULTS:
: 0.04327163497143981
: 0.01666617136615391
The efficiency will be around:
#+begin_src jupyter-python :exports both :results raw drawer
@ -559,7 +557,7 @@ The efficiency will be around:
#+end_src
#+RESULTS:
: 34.55338145802887
: 32.89204347314658
Let's export those results for TeX:
#+begin_src jupyter-python :exports both :results raw drawer
@ -724,7 +722,7 @@ figure out the cpu mapping.
cubes=xs_int_res.cubes,
proc="auto",
report_efficiency=True,
cache="cache/pdf/total_xs_10000_000_2_5_take6",
cache="cache/pdf/total_xs_10000_000_2_5_take8",
status_path="/tmp/status1",
overestimate_factor=overestimate,
)
@ -734,7 +732,7 @@ figure out the cpu mapping.
#+RESULTS:
:RESULTS:
: Loading Cache: sample_unweighted_array
: 0.30393438761119607
: 0.29610040880251154
:END:
That does look pretty good eh? So lets save it along with the sample size.
@ -763,7 +761,7 @@ Let's look at a histogramm of eta samples.
#+RESULTS:
:RESULTS:
: 10000000
[[file:./.ob-jupyter/4fdcf3895a4c50ebc772054351beb7a07022f64c.png]]
[[file:./.ob-jupyter/0b1b4f39201dac86ebfbfb8953561cfe81a6c70f.png]]
:END:
Let's use a uniform histogram image size.
@ -792,7 +790,7 @@ And now we compare all the observables with sherpa.
#+end_src
#+RESULTS:
[[file:./.ob-jupyter/e072f341c61916bbc430d0817d207b920a2fc9c2.png]]
[[file:./.ob-jupyter/8d9bb03fda7305d6effe0cc24b4dcdccc1582ccf.png]]
Hah! there we have it!
@ -823,7 +821,7 @@ both equal.
#+end_src
#+RESULTS:
[[file:./.ob-jupyter/79e756f2524fe167b531a82b217dc11c3bc7a496.png]]
[[file:./.ob-jupyter/9a7122aa4a3690c2785678f85d8ac1919de1187d.png]]
The invariant mass is not constant anymore.
#+begin_src jupyter-python :exports both :results raw drawer
@ -848,7 +846,7 @@ The invariant mass is not constant anymore.
#+end_src
#+RESULTS:
[[file:./.ob-jupyter/29e6b16eb0dda91744aefa36bf80359200a63b45.png]]
[[file:./.ob-jupyter/65d1b44d76a5f42086735c41d54bd8c6e980e942.png]]
The cosθ distribution looks more like the paronic one.
#+begin_src jupyter-python :exports both :results raw drawer
@ -867,7 +865,7 @@ The cosθ distribution looks more like the paronic one.
#+end_src
#+RESULTS:
[[file:./.ob-jupyter/0b56070aaf7db99ca2814c1d96810e46bcdac802.png]]
[[file:./.ob-jupyter/0358d3386a74d6aa6d2d8baeb8e94f216acdab13.png]]
#+begin_src jupyter-python :exports both :results raw drawer
@ -889,7 +887,7 @@ The cosθ distribution looks more like the paronic one.
#+end_src
#+RESULTS:
[[file:./.ob-jupyter/fa40f10260d89e3188aa8a526d1332aa49211a67.png]]
[[file:./.ob-jupyter/89103546935fc538120e54de7c7d4812accd7a58.png]]
In this case the opening angles are the same because the CS frame is
the same as the ordinary rest frame. The z-axis is the beam axis
@ -913,4 +911,4 @@ because pT=0!
#+end_src
#+RESULTS:
[[file:./.ob-jupyter/61220a314b39b71a585feadf17441891eb4b54a5.png]]
[[file:./.ob-jupyter/ac492be843ffd5262cc6d9b61b8e0eda4cd60b5c.png]]

View file

@ -1 +1 @@
\(\sigma = \SI{38.664\pm 0.021}{\pico\barn}\)
\(\sigma = \SI{38.691\pm 0.020}{\pico\barn}\)