#+PROPERTY: header-args :exports both :output-dir results :session xs :kernel python3 #+HTML_HEAD: #+OPTIONS: html-style:nil #+HTML_CONTAINER: section #+TITLE: Investigaton of Monte-Carlo Methods #+AUTHOR: Valentin Boettcher * Init ** Required Modules #+NAME: e988e3f2-ad1f-49a3-ad60-bedba3863283 #+begin_src jupyter-python :exports both :tangle tangled/xs.py import numpy as np import matplotlib.pyplot as plt import monte_carlo #+end_src #+RESULTS: e988e3f2-ad1f-49a3-ad60-bedba3863283 ** Utilities #+NAME: 53548778-a4c1-461a-9b1f-0f401df12b08 #+BEGIN_SRC jupyter-python :exports both %run ../utility.py %load_ext autoreload %aimport monte_carlo %autoreload 1 #+END_SRC #+RESULTS: 53548778-a4c1-461a-9b1f-0f401df12b08 * Implementation #+NAME: 777a013b-6c20-44bd-b58b-6a7690c21c0e #+BEGIN_SRC jupyter-python :exports both :results raw drawer :exports code :tangle tangled/xs.py """ Implementation of the analytical cross section for q q_bar -> gamma gamma Author: Valentin Boettcher """ import numpy as np # NOTE: a more elegant solution would be a decorator def energy_factor(charge, esp): """ Calculates the factor common to all other values in this module Arguments: esp -- center of momentum energy in GeV charge -- charge of the particle in units of the elementary charge """ return charge**4/(137.036*esp)**2/6 def diff_xs(θ, charge, esp): """ Calculates the differential cross section as a function of the azimuth angle θ in units of 1/GeV². Here dΩ=sinθdθdφ Arguments: θ -- azimuth angle esp -- center of momentum energy in GeV charge -- charge of the particle in units of the elementary charge """ f = energy_factor(charge, esp) return f*((np.cos(θ)**2+1)/np.sin(θ)**2) def diff_xs_cosθ(cosθ, charge, esp): """ Calculates the differential cross section as a function of the cosine of the azimuth angle θ in units of 1/GeV². Here dΩ=d(cosθ)dφ Arguments: cosθ -- cosine of the azimuth angle esp -- center of momentum energy in GeV charge -- charge of the particle in units of the elementary charge """ f = energy_factor(charge, esp) return f*((cosθ**2+1)/(1-cosθ**2)) def diff_xs_eta(η, charge, esp): """ Calculates the differential cross section as a function of the pseudo rapidity of the photons in units of 1/GeV^2. This is actually the crossection dσ/(dφdη). Arguments: η -- pseudo rapidity esp -- center of momentum energy in GeV charge -- charge of the particle in units of the elementary charge """ f = energy_factor(charge, esp) return f*(np.tanh(η)**2 + 1) def diff_xs_p_t(p_t, charge, esp): """ Calculates the differential cross section as a function of the transverse momentum (p_t) of the photons in units of 1/GeV^2. This is actually the crossection dσ/(dφdp_t). Arguments: p_t -- transverse momentum in GeV esp -- center of momentum energy in GeV charge -- charge of the particle in units of the elementary charge """ f = energy_factor(charge, esp) sqrt_fact = np.sqrt(1-(2*p_t/esp)**2) return f/p_t*(1/sqrt_fact + sqrt_fact) def total_xs_eta(η, charge, esp): """ Calculates the total cross section as a function of the pseudo rapidity of the photons in units of 1/GeV^2. If the rapditiy is specified as a tuple, it is interpreted as an interval. Otherwise the interval [-η, η] will be used. Arguments: η -- pseudo rapidity (tuple or number) esp -- center of momentum energy in GeV charge -- charge of the particle in units of the elementar charge """ f = energy_factor(charge, esp) if not isinstance(η, tuple): η = (-η, η) if len(η) != 2: raise ValueError('Invalid η cut.') def F(x): return np.tanh(x) - 2*x return 2*np.pi*f*(F(η[0]) - F(η[1])) #+END_SRC #+RESULTS: 777a013b-6c20-44bd-b58b-6a7690c21c0e * Calculations First, set up the input parameters. #+BEGIN_SRC jupyter-python :exports both :results raw drawer η = 2.5 charge = 1/3 esp = 200 # GeV #+END_SRC #+RESULTS: Set up the integration and plot intervals. #+begin_src jupyter-python :exports both :results raw drawer interval_η = [-η, η] interval = η_to_θ([-η, η]) interval_cosθ = np.cos(interval) interval_pt = np.sort(η_to_pt([0, η], esp/2)) plot_interval = [0.1, np.pi-.1] #+end_src #+RESULTS: #+begin_note Note that we could utilize the symetry of the integrand throughout, but that doen't reduce variance and would complicate things now. #+end_note ** Analytical Integration And now calculate the cross section in picobarn. #+BEGIN_SRC jupyter-python :exports both :results raw file :file xs.tex xs_gev = total_xs_eta(η, charge, esp) xs_pb = gev_to_pb(xs_gev) tex_value(xs_pb, unit=r'\pico\barn', prefix=r'\sigma = ', prec=6, save=('results', 'xs.tex')) #+END_SRC #+RESULTS: : \(\sigma = \SI{0.053793}{\pico\barn}\) Lets plot the total xs as a function of η. #+begin_src jupyter-python :exports both :results raw drawer fig, ax = set_up_plot() η_s = np.linspace(0, 3, 1000) ax.plot(η_s, gev_to_pb(total_xs_eta(η_s, charge, esp))) ax.set_xlabel(r'$\eta$') ax.set_ylabel(r'$\sigma$ [pb]') ax.set_xlim([0, max(η_s)]) ax.set_ylim(0) save_fig(fig, 'total_xs', 'xs', size=[2.5, 2.5]) #+end_src #+RESULTS: [[file:./.ob-jupyter/4522eb3fbeaa14978f9838371acb0650910b8dbf.png]] Compared to sherpa, it's pretty close. #+NAME: 81b5ed93-0312-45dc-beec-e2ba92e22626 #+BEGIN_SRC jupyter-python :exports both :results raw drawer sherpa = 0.05380 xs_pb - sherpa #+END_SRC #+RESULTS: 81b5ed93-0312-45dc-beec-e2ba92e22626 : -6.7112594623469635e-06 I had to set the runcard option ~EW_SCHEME: alpha0~ to use the pure QED coupling constant. ** Numerical Integration Plot our nice distribution: #+begin_src jupyter-python :exports both :results raw drawer plot_points = np.linspace(*plot_interval, 1000) fig, ax = set_up_plot() ax.plot(plot_points, gev_to_pb(diff_xs(plot_points, charge=charge, esp=esp))) ax.set_xlabel(r'$\theta$') ax.set_ylabel(r'$d\sigma/d\Omega$ [pb]') ax.set_xlim([plot_points.min(), plot_points.max()]) ax.axvline(interval[0], color='gray', linestyle='--') ax.axvline(interval[1], color='gray', linestyle='--', label=rf'$|\eta|={η}$') ax.legend() save_fig(fig, 'diff_xs', 'xs', size=[2.5, 2.5]) #+end_src #+RESULTS: [[file:./.ob-jupyter/3dd905e7608b91a9d89503cb41660152f3b4b55c.png]] Define the integrand. #+begin_src jupyter-python :exports both :results raw drawer def xs_pb_int(θ): return 2*np.pi*gev_to_pb(np.sin(θ)*diff_xs(θ, charge=charge, esp=esp)) def xs_pb_int_η(η): return 2*np.pi*gev_to_pb(diff_xs_eta(η, charge, esp)) #+end_src #+RESULTS: Plot the integrand. # TODO: remove duplication #+begin_src jupyter-python :exports both :results raw drawer fig, ax = set_up_plot() ax.plot(plot_points, xs_pb_int(plot_points)) ax.set_xlabel(r'$\theta$') ax.set_ylabel(r'$2\pi\cdot d\sigma/d\theta [pb]') ax.set_xlim([plot_points.min(), plot_points.max()]) ax.axvline(interval[0], color='gray', linestyle='--') ax.axvline(interval[1], color='gray', linestyle='--', label=rf'$|\eta|={η}$') save_fig(fig, 'xs_integrand', 'xs', size=[3, 2.2]) #+end_src #+RESULTS: [[file:./.ob-jupyter/ccb6653162c81c3f3e843225cb8d759178f497e0.png]] *** Integral over θ Intergrate σ with the mc method. #+begin_src jupyter-python :exports both :results raw drawer xs_pb_res = monte_carlo.integrate(xs_pb_int, interval, epsilon=1e-3) xs_pb_res #+end_src #+RESULTS: : IntegrationResult(result=0.05409249508068646, sigma=0.0007897018499681794, N=3286) We gonna export that as tex. #+begin_src jupyter-python :exports both :results raw drawer tex_value(*xs_pb_res.combined_result, unit=r'\pico\barn', prefix=r'\sigma = ', save=('results', 'xs_mc.tex')) tex_value(xs_pb_res.N, prefix=r'N = ', save=('results', 'xs_mc_N.tex')) #+end_src #+RESULTS: : \(N = 3286\) *** Integration over η Plot the intgrand of the pseudo rap. #+begin_src jupyter-python :exports both :results raw drawer fig, ax = set_up_plot() points = np.linspace(-4, 4, 1000) ax.set_xlim([-4, 4]) ax.plot(points, xs_pb_int_η(points)) ax.set_xlabel(r'$\eta$') ax.set_ylabel(r'$2\pi\cdot d\sigma/d\eta$ [pb]') ax.axvline(interval_η[0], color='gray', linestyle='--') ax.axvline(interval_η[1], color='gray', linestyle='--', label=rf'$|\eta|={η}$') save_fig(fig, 'xs_integrand_eta', 'xs', size=[3, 2]) #+end_src #+RESULTS: [[file:./.ob-jupyter/87a932866f779a2a07abed4ca251fa98113beca7.png]] #+begin_src jupyter-python :exports both :results raw drawer xs_pb_η = monte_carlo.integrate(xs_pb_int_η, interval_η, epsilon=1e-3) xs_pb_η #+end_src #+RESULTS: : IntegrationResult(result=0.05514934067591144, sigma=0.0009042241764415168, N=135) As we see, the result is a little better if we use pseudo rapidity, because the differential cross section does not difverge anymore. But becase our η interval is covering the range where all the variance is occuring, the improvement is rather marginal. And yet again export that as tex. #+begin_src jupyter-python :exports both :results raw drawer tex_value(*xs_pb_η.combined_result, unit=r'\pico\barn', prefix=r'\sigma = ', save=('results', 'xs_mc_eta.tex')) tex_value(xs_pb_η.N, prefix=r'N = ', save=('results', 'xs_mc_eta_N.tex')) #+end_src #+RESULTS: : \(N = 135\) *** Using =VEGAS= Now we use =VEGAS= on the θ parametrisation and see what happens. #+begin_src jupyter-python :exports both :results raw drawer xs_pb_vegas, xs_pb_vegas_σ, xs_θ_intervals = \ monte_carlo.integrate_vegas(xs_pb_int, interval, num_increments=20, alpha=4, point_density=1000, acumulate=True) xs_pb_vegas, xs_pb_vegas_σ #+end_src #+RESULTS: | 0.0538027817458234 | 5.484354854125085e-05 | This is pretty good, although the variance reduction may be achieved partially by accumulating the results from all runns. The uncertainty is being overestimated! And export that as tex. #+begin_src jupyter-python :exports both :results raw drawer tex_value(xs_pb_vegas, xs_pb_vegas_σ, unit=r'\pico\barn', prefix=r'\sigma = ', save=('results', 'xs_mc_θ_vegas.tex')) #+end_src #+RESULTS: : \(\sigma = \SI{0.05380\pm 0.00005}{\pico\barn}\) Surprisingly, without acumulation, the result ain't much different. This depends, of course, on the iteration count. #+begin_src jupyter-python :exports both :results raw drawer monte_carlo.integrate_vegas(xs_pb_int, interval, num_increments=20, alpha=4, point_density=1000, acumulate=False)[0:2] #+end_src #+RESULTS: | 0.05374045987757417 | 8.173017257626389e-05 | *** Testing the Statistics Let's battle test the statistics. #+begin_src jupyter-python :exports both :results raw drawer num_runs = 1000 num_within = 0 for _ in range(num_runs): val, err = \ monte_carlo.integrate(xs_pb_int, interval, epsilon=1e-3).combined_result if abs(xs_pb - val) <= err: num_within += 1 num_within/num_runs #+end_src #+RESULTS: : 0.689 So we see: the standard deviation is sound. Doing the same thing with =VEGAS= works as well. #+begin_src jupyter-python :exports both :results raw drawer num_runs = 1000 num_within = 0 for _ in range(num_runs): val, err, _ = \ monte_carlo.integrate_vegas(xs_pb_int, interval, num_increments=8, alpha=1, point_density=1000, acumulate=False) if abs(xs_pb - val) <= err: num_within += 1 num_within/num_runs #+end_src #+RESULTS: : 0.67 ** Sampling and Analysis Define the sample number. #+begin_src jupyter-python :exports both :results raw drawer sample_num = 1000 #+end_src #+RESULTS: Let's define shortcuts for our distributions. The 2π are just there for formal correctnes. Factors do not influecence the outcome. #+begin_src jupyter-python :exports both :results raw drawer def dist_cosθ(x): return gev_to_pb(diff_xs_cosθ(x, charge, esp))*2*np.pi def dist_η(x): return gev_to_pb(diff_xs_eta(x, charge, esp))*2*np.pi #+end_src #+RESULTS: *** Sampling the cosθ cross section Now we monte-carlo sample our distribution. We observe that the efficiency his very bad! #+begin_src jupyter-python :exports both :results raw drawer cosθ_sample, cosθ_efficiency = \ monte_carlo.sample_unweighted_array(sample_num, dist_cosθ, interval_cosθ, report_efficiency=True) cosθ_efficiency #+end_src #+RESULTS: : 0.028076221366308534 Our distribution has a lot of variance, as can be seen by plotting it. #+begin_src jupyter-python :exports both :results raw drawer pts = np.linspace(*interval_cosθ, 100) fig, ax = set_up_plot() ax.plot(pts, dist_cosθ(pts)) ax.set_xlabel(r'$\cos\theta$') ax.set_ylabel(r'$\frac{d\sigma}{d\Omega}$') #+end_src #+RESULTS: :RESULTS: : Text(0, 0.5, '$\\frac{d\\sigma}{d\\Omega}$') [[file:./.ob-jupyter/6921725d93ce91ce1e0364e6f745d46f3a76b3f2.png]] :END: We define a friendly and easy to integrate upper limit function. #+begin_src jupyter-python :exports both :results raw drawer upper_limit = dist_cosθ(interval_cosθ[0]) \ /interval_cosθ[0]**2 upper_base = dist_cosθ(0) def upper(x): return upper_base + upper_limit*x**2 def upper_int(x): return upper_base*x + upper_limit*x**3/3 ax.plot(pts, upper(pts), label='Upper bound') ax.legend() ax.set_xlabel(r'$\cos\theta$') ax.set_ylabel(r'$\frac{d\sigma}{d\Omega}$') save_fig(fig, 'upper_bound', 'xs_sampling', size=(4, 4)) fig #+end_src #+RESULTS: [[file:./.ob-jupyter/ddfcebac4157ce417e5b868a88731d554c726141.png]] To increase our efficiency, we have to specify an upper bound. That is at least a little bit better. The numeric inversion is horribly inefficent. #+begin_src jupyter-python :exports both :results raw drawer cosθ_sample, cosθ_efficiency = \ monte_carlo.sample_unweighted_array(sample_num, dist_cosθ, interval_cosθ, report_efficiency=True, upper_bound=[upper, upper_int]) cosθ_efficiency #+end_src #+RESULTS: : 0.07981958240421651 <> Nice! And now draw some histograms. We define an auxilliary method for convenience. #+begin_src jupyter-python :exports both :results raw drawer :tangle tangled/plot_utils.py """ Some shorthands for common plotting tasks related to the investigation of monte-carlo methods in one rimension. Author: Valentin Boettcher """ import matplotlib.pyplot as plt def draw_histo(points, xlabel, bins=20): heights, edges = np.histogram(points, bins) centers = (edges[1:] + edges[:-1])/2 deviations = np.sqrt(heights) fig, ax = set_up_plot() ax.errorbar(centers, heights, deviations, linestyle='none', color='orange') ax.step(edges, [heights[0], *heights], color='#1f77b4') ax.set_xlabel(xlabel) ax.set_xlim([points.min(), points.max()]) return fig, ax #+end_src #+RESULTS: The histogram for cosθ. #+begin_src jupyter-python :exports both :results raw drawer fig, _ = draw_histo(cosθ_sample, r'$\cos\theta$') save_fig(fig, 'histo_cos_theta', 'xs', size=(4,3)) #+end_src #+RESULTS: [[file:./.ob-jupyter/57f63adafce401329082b5362df69e3bfd651738.png]] *** Observables Now we define some utilities to draw real 4-momentum samples. #+begin_src jupyter-python :exports both :tangle tangled/xs.py def sample_momenta(sample_num, interval, charge, esp, seed=None): """Samples `sample_num` unweighted photon 4-momenta from the cross-section. :param sample_num: number of samples to take :param interval: cosθ interval to sample from :param charge: the charge of the quark :param esp: center of mass energy :param seed: the seed for the rng, optional, default is system time :returns: an array of 4 photon momenta :rtype: np.ndarray """ cosθ_sample = \ monte_carlo.sample_unweighted_array(sample_num, lambda x: diff_xs_cosθ(x, charge, esp), interval_cosθ) φ_sample = np.random.uniform(0, 1, sample_num) def make_momentum(esp, cosθ, φ): sinθ = np.sqrt(1-cosθ**2) return np.array([1, sinθ*np.cos(φ), sinθ*np.sin(φ), cosθ])*esp/2 momenta = np.array([make_momentum(esp, cosθ, φ) \ for cosθ, φ in np.array([cosθ_sample, φ_sample]).T]) return momenta #+end_src #+RESULTS: To generate histograms of other obeservables, we have to define them as functions on 4-impuleses. Using those to transform samples is analogous to transforming the distribution itself. #+begin_src jupyter-python :session obs :exports both :results raw drawer :tangle tangled/observables.py """This module defines some observables on arrays of 4-pulses.""" import numpy as np def p_t(p): """Transverse momentum :param p: array of 4-momenta """ return np.linalg.norm(p[:,1:3], axis=1) def η(p): """Pseudo rapidity. :param p: array of 4-momenta """ return np.arccosh(np.linalg.norm(p[:,1:], axis=1)/p_t(p))*np.sign(p[:, 3]) #+end_src #+RESULTS: And import them. #+begin_src jupyter-python :exports both :results raw drawer %aimport tangled.observables obs = tangled.observables #+end_src #+RESULTS: Lets try it out. #+begin_src jupyter-python :exports both :results raw drawer momentum_sample = sample_momenta(2000, interval_cosθ, charge, esp) momentum_sample #+end_src #+RESULTS: : array([[100. , 48.96033819, 57.4112904 , -65.62643537], : [100. , 41.96222688, 20.41198939, 88.44502363], : [100. , 58.16555642, 25.62592984, -77.20155287], : ..., : [100. , 60.81855224, 5.52673184, 79.1868609 ], : [100. , 39.11244981, 43.57566968, 81.06403198], : [100. , 63.55704011, 12.99669204, 76.10248779]]) Now let's make a histogram of the η distribution. #+begin_src jupyter-python :exports both :results raw drawer η_sample = obs.η(momentum_sample) draw_histo(η_sample, r'$\eta$') #+end_src #+RESULTS: :RESULTS: |
| | [[file:./.ob-jupyter/e04af38d1dcc52c09ded26b65234a071f2386835.png]] :END: And the same for the p_t (transverse momentum) distribution. #+begin_src jupyter-python :exports both :results raw drawer p_t_sample = obs.p_t(momentum_sample) draw_histo(p_t_sample, r'$p_T$ [GeV]') #+end_src #+RESULTS: :RESULTS: |
| | [[file:./.ob-jupyter/b9671fcec48bc029f5fe7e1cafb23a219045d90b.png]] :END: That looks somewhat fishy, but it isn't. #+begin_src jupyter-python :exports both :results raw drawer fig, ax = set_up_plot() points = np.linspace(interval_pt[0], interval_pt[1] - .01, 1000) ax.plot(points, gev_to_pb(diff_xs_p_t(points, charge, esp))) ax.set_xlabel(r'$p_T$') ax.set_xlim(interval_pt[0], interval_pt[1] + 1) ax.set_ylim([0, gev_to_pb(diff_xs_p_t(interval_pt[1] -.01, charge, esp))]) ax.set_ylabel(r'$\frac{d\sigma}{dp_t}$ [pb]') save_fig(fig, 'diff_xs_p_t', 'xs_sampling', size=[4, 3]) #+end_src #+RESULTS: [[file:./.ob-jupyter/e127df693158dd4194b53f0a6f66ca2fca18af41.png]] this is strongly peaked at p_t=100GeV. (The jacobian goes like 1/x there!) *** Sampling the η cross section An again we see that the efficiency is way, way! better... #+begin_src jupyter-python :exports both :results raw drawer η_sample, η_efficiency = \ monte_carlo.sample_unweighted_array(sample_num, dist_η, interval_η, report_efficiency=True) η_efficiency #+end_src #+RESULTS: : 0.40801 <<η-eff>> Let's draw a histogram to compare with the previous results. #+begin_src jupyter-python :exports both :results raw drawer draw_histo(η_sample, r'$\eta$') #+end_src #+RESULTS: :RESULTS: |
| | [[file:./.ob-jupyter/e2afd8b997280a89d7bcbf617018797c57909f79.png]] :END: Looks good to me :). *** Sampling with =VEGAS= Let's define some little helpers. #+begin_src jupyter-python :exports both :tangle tangled/plot_utils.py def plot_increments(ax, increment_borders, label=None, *args, **kwargs): """Plot the increment borders from a list. The first and last one :param ax: the axis on which to draw :param list increment_borders: the borders of the increments :param str label: the label to apply to one of the vertical lines """ ax.axvline(x=increment_borders[1], label=label, *args, **kwargs) for increment in increment_borders[2:-1]: ax.axvline(x=increment, *args, **kwargs) def plot_vegas_weighted_distribution(ax, points, dist, increment_borders, *args, **kwargs): """Plot the distribution with VEGAS weights applied. :param ax: axis :param points: points :param dist: distribution :param increment_borders: increment borders """ num_increments = increment_borders.size weighted_dist = dist.copy() for left_border, right_border in zip(increment_borders[:-1], increment_borders[1:]): length = right_border - left_border mask = (left_border <= points) & (points <= right_border) weighted_dist[mask] = dist[mask]*num_increments*length ax.plot(points, weighted_dist, *args, **kwargs) #+end_src #+RESULTS: To get the increments, we have to let =VEGAS= loose on our distribution. We throw away the integral, but keep the increments. #+begin_src jupyter-python :exports both :results raw drawer _, _, increments = monte_carlo.integrate_vegas(dist_cosθ, interval_cosθ, num_increments=10, alpha=1, epsilon=.01) increments #+end_src #+RESULTS: : array([-0.9866143 , -0.90976814, -0.80054812, -0.61044436, -0.33330198, : 0.00274503, 0.33807643, 0.61258903, 0.80033334, 0.90956851, : 0.9866143 ]) Visualizing the increment borders gives us the information we want. #+begin_src jupyter-python :exports both :results raw drawer pts = np.linspace(*interval_cosθ, 100) fig, ax = set_up_plot() ax.plot(pts, dist_cosθ(pts)) ax.set_xlabel(r'$\cos\theta$') ax.set_ylabel(r'$\frac{d\sigma}{d\Omega}$') ax.set_xlim(*interval_cosθ) plot_increments(ax, increments, label='Increment Borderds', color='gray', linestyle='--') ax.legend() #+end_src #+RESULTS: :RESULTS: : [[file:./.ob-jupyter/487a63dc470d0aa11e8ea6c8fedd3509fc81bb47.png]] :END: We can now plot the reweighted distribution to observe the variance reduction visually. #+begin_src jupyter-python :exports both :results raw drawer pts = np.linspace(*interval_cosθ, 1000) fig, ax = set_up_plot() plot_vegas_weighted_distribution(ax, pts, dist_cosθ(pts), increments) ax.set_xlabel(r'$\cos\theta$') ax.set_ylabel(r'$\frac{d\sigma}{d\Omega}$') ax.set_xlim(*interval_cosθ) plot_increments(ax, increments, label='Increment Borderds', color='gray', linestyle='--') ax.legend() #+end_src #+RESULTS: :RESULTS: : [[file:./.ob-jupyter/a4d7908401fba70cd0d8550f21c2151a0f3a2b8c.png]] :END: I am batman! Now, draw a sample and look at the efficiency. #+begin_src jupyter-python :exports both :results raw drawer cosθ_sample_strat, cosθ_efficiency_strat = \ monte_carlo.sample_unweighted_array(sample_num, dist_cosθ, increment_borders=increments, report_efficiency=True) cosθ_efficiency_strat #+end_src #+RESULTS: : 0.1365 If we compare that to [[cosθ-bare-eff]], we can see the improvement :P. It is even better the [[η-eff]]. The histogram looks just the same. #+begin_src jupyter-python :exports both :results raw drawer fig, _ = draw_histo(cosθ_sample_strat, r'$\cos\theta$') save_fig(fig, 'histo_cos_theta_strat', 'xs', size=(4,3)) #+end_src #+RESULTS: [[file:./.ob-jupyter/fe3cad135a6a8264071e025a7e8debdb15a79677.png]]