diff --git a/doc/source/StocProc_KLE.rst b/doc/source/StocProc_KLE.rst index ef1002b..2500c38 100644 --- a/doc/source/StocProc_KLE.rst +++ b/doc/source/StocProc_KLE.rst @@ -8,6 +8,8 @@ StocProc_KLE :inherited-members: :undoc-members: + .. automethod:: __call__ + .. py:currentmodule:: stocproc.method_kle .. autofunction:: stocproc.method_kle.solve_hom_fredholm .. autofunction:: stocproc.method_kle.get_mid_point_weights_times diff --git a/stocproc/method_kle.py b/stocproc/method_kle.py index 562480d..d58598b 100644 --- a/stocproc/method_kle.py +++ b/stocproc/method_kle.py @@ -48,7 +48,7 @@ def solve_hom_fredholm(r, w): .. note:: It has been noticed that the performance of the various weights depends on the auto correlation - function. As default one should use the simpson weights. 'four point', 'gauss legendre' and 'tanh sinh' + function. As default one should use the 'simpson weights'. 'four point', 'gauss legendre' and 'tanh sinh' might perform better for auto correlation function that decay slowly. Their advantage becomes evident for a large numbers of grid points only. So if one cares about relative differences below 1e-4 the more sophisticated weights are suitable. @@ -79,21 +79,27 @@ def _calc_corr_min_t_plus_t(s, bcf): return np.hstack((np.conj(bcf_n_plus[-1:0:-1]), bcf_n_plus)) -def _calc_corr_matrix(s, bcf): +def _calc_corr_matrix(s, bcf, is_equi=None): """calculates the matrix alpha_ij = bcf(t_i-s_j) - calls bcf only for s-s_0 and reconstructs the rest + for equidistant s: call bcf only for s-s_0 and reconstructs the rest """ - n_ = len(s) - bcf_n = _calc_corr_min_t_plus_t(s, bcf) - # we want - # r = bcf(0) bcf(-1), bcf(-2) - # bcf(1) bcf( 0), bcf(-1) - # bcf(2) bcf( 1), bcf( 0) - r = np.empty(shape=(n_, n_), dtype=np.complex128) - for i in range(n_): - idx = n_ - 1 - i - r[:, i] = bcf_n[idx:idx + n_] + if is_equi is None: + is_equi = is_axis_equidistant(s) + + if is_equi: + n_ = len(s) + bcf_n = _calc_corr_min_t_plus_t(s, bcf) + # we want + # r = bcf(0) bcf(-1), bcf(-2) + # bcf(1) bcf( 0), bcf(-1) + # bcf(2) bcf( 1), bcf( 0) + r = np.empty(shape=(n_, n_), dtype=np.complex128) + for i in range(n_): + idx = n_ - 1 - i + r[:, i] = bcf_n[idx:idx + n_] + else: + return bcf(s.reshape(-1,1) - s.reshape(1,-1)) return r @@ -105,9 +111,9 @@ def get_mid_point_weights_times(t_max, num_grid_points): :return: location of the grid points, corresponding weights Because this function is intended to provide the weights to be used in :py:func:`solve_hom_fredholm` - is stretches the homogeneous weights over the grid points starting from 0 up to t_max, so the + it stretches the homogeneous weights over the grid points starting from 0 up to t_max, so the term min_point is somewhat miss leading. This is possible because we want to simulate - stationary stochastic processes which allows :math:`\alpha(t_i+\Delta - (s_j+\Delta)) = \alpha(t_i-s_j)`. + stationary stochastic processes which allows :math:`\alpha(t_i+\Delta/2 - (s_j+\Delta/2)) = \alpha(t_i-s_j)`. The N grid points are located at @@ -115,9 +121,10 @@ def get_mid_point_weights_times(t_max, num_grid_points): and the corresponding weights are - .. math:: w_i = \Delta t = \frac{t_\mathrm{max}}{N-1} \qquad i = 0,1, ... N - 1 + .. math:: w_i = \Delta t = \frac{t_\mathrm{max}}{N} \qquad i = 0,1, ... N - 1 """ - t, delta_t = np.linspace(0, t_max, num_grid_points, retstep = True) + t = np.linspace(0, t_max, num_grid_points) + delta_t = t_max/num_grid_points w = np.ones(num_grid_points)*delta_t return t, w @@ -302,7 +309,8 @@ def subdevide_axis(t, ngfac): tfine[l::ngfac] = (l * t[1:] + (ngfac - l) * t[:-1]) / ngfac return tfine -def auto_ng(corr, t_max, ngfac=2, meth=get_mid_point_weights_times, tol=1e-3, diff_method='full', dm_random_samples=10**4): +def auto_ng(corr, t_max, ngfac=2, meth=get_mid_point_weights_times, tol=1e-3, diff_method='full', + dm_random_samples=10**4, ret_eigvals=False, relative_difference=False): r"""increase the number of gridpoints until the desired accuracy is met This function increases the number of grid points of the discrete Fredholm equation exponentially until @@ -325,6 +333,9 @@ def auto_ng(corr, t_max, ngfac=2, meth=get_mid_point_weights_times, tol=1e-3, di 'full': full grid in between the fine grid, such that the spline interpolation error is expected to be maximal 'random': pick a fixed number of random times t and s within the interval [0, t_max] :param dm_random_samples: the number of random times used for diff_method 'random' + :param ret_eigvals: if True, return also the eigen values + :param relative_difference: if True, use relative difference instead of absolute + :return: an array containing the necessary eigenfunctions of the Karhunen-Loève expansion for sampling the stochastic processes (shape=(num_eigen_functions, num_grid_points) @@ -332,7 +343,9 @@ def auto_ng(corr, t_max, ngfac=2, meth=get_mid_point_weights_times, tol=1e-3, di 1) Solve the discrete Fredholm equation on a grid with ng points. This gives ng eigenvalues/vectors where each ng-dimensional vector approximates the continuous eigenfunction. (:math:`t, u_i(t) \leftrightarrow t_k, u_{ik}` where the :math:`t_k` depend on the integration weights - method) + method) For performance reasons, especially when the auto correlation function evaluates slowly it + is advisable to use a method which uses equally distributed times :math;`t_k`. + 2) Approximate the eigenfunction on a finer, equidistant grid (:math:`ng_\mathrm{fine} = ng_\mathrm{fac}(ng-1)+1`) using @@ -355,7 +368,12 @@ def auto_ng(corr, t_max, ngfac=2, meth=get_mid_point_weights_times, tol=1e-3, di ng as follows :math:`ng = 2*ng-1` and start over at 1). (This update schema for ng asured that ng is odd which is needed for the 'simpson' and 'fourpoint' integration weights) + .. note:: + The scaling of the error of the various integration methods does not correspond to the scaling of + the number of eigenfunctions to use in order to reconstruct the auto correlation function within + a given tolerance. Surprisingly it turns out that in general the most trivial **mid-point method** performs + quite well. If other methods suite bettern needs to be check in every case. [1] Press, W.H., Teukolsky, S.A., Vetterling, W.T., Flannery, B.P., 2007. Numerical Recipes 3rd Edition: The Art of Scientific Computing, @@ -388,11 +406,11 @@ def auto_ng(corr, t_max, ngfac=2, meth=get_mid_point_weights_times, tol=1e-3, di ng = 2 ** k + 1 log.info("check {} grid points".format(ng)) t, w = meth(t_max, ng) - is_equi = is_axis_equidistant(t) + log.debug("equidistant axis: {}".format(is_equi)) t0 = time.time() # efficient way to construct the - r = _calc_corr_matrix(t, corr) # auto correlation matrix r + r = _calc_corr_matrix(t, corr, is_equi) # auto correlation matrix r time_calc_ac += (time.time() - t0) t0 = time.time() # solve the dicrete fredholm equation @@ -400,7 +418,7 @@ def auto_ng(corr, t_max, ngfac=2, meth=get_mid_point_weights_times, tol=1e-3, di time_fredholm += (time.time() - t0) tfine = subdevide_axis(t, ngfac) # setup fine - tsfine = subdevide_axis(tfine, 2)[1::2] # and super fine time grid + tsfine = subdevide_axis(tfine, 2) # and super fine time grid if is_equi: t0 = time.time() # efficient way to calculate the auto correlation @@ -419,7 +437,11 @@ def auto_ng(corr, t_max, ngfac=2, meth=get_mid_point_weights_times, tol=1e-3, di # alpha(ti+dt/2 - (tj+dt/2)) = alpha(ti - tj) diff = -alpha_ref - old_md = np.inf + if relative_difference: + abs_alpha_res = np.abs(alpha_ref) + else: + abs_alpha_res = 1 + sqrt_lambda_ui_fine_all = [] for i in range(ng): evec = _eig_vec[:, i] @@ -461,14 +483,11 @@ def auto_ng(corr, t_max, ngfac=2, meth=get_mid_point_weights_times, tol=1e-3, di elif diff_method == 'full': ui_super_fine = sqrt_lambda_ui_spl(tsfine) diff += ui_super_fine.reshape(-1, 1) * np.conj(ui_super_fine.reshape(1, -1)) - md = np.max(np.abs(diff)) / alpha_0 + md = np.max(np.abs(diff) / abs_alpha_res) time_calc_diff += (time.time() - t0) log.debug("num evec {} -> max diff {:.3e}".format(i+1, md)) - #if old_md < md: - # log.info("max diff increased -> break, use higher ng") - # break - #old_md = md + if md < tol: time_total = time_calc_diff + time_spline + time_integr_intp + time_calc_ac + time_fredholm time_overall = time.time() - time_start @@ -478,7 +497,11 @@ def auto_ng(corr, t_max, ngfac=2, meth=get_mid_point_weights_times, tol=1e-3, di time_calc_ac/time_overall, time_fredholm/time_overall, time_integr_intp/time_overall, time_spline/time_overall, time_calc_diff/time_overall, time_rest/time_overall)) log.info("auto ng SUCCESSFUL max diff {:.3e} < tol {:.3e} ng {} num evec {}".format(md, tol, ng, i+1)) - return np.asarray(sqrt_lambda_ui_fine_all) + if ret_eigvals: + return np.asarray(sqrt_lambda_ui_fine_all), tfine, _eig_val[:len(sqrt_lambda_ui_fine_all)] + else: + return np.asarray(sqrt_lambda_ui_fine_all), tfine + log.info("ng {} yields md {:.3e}".format(ng, md)) def is_axis_equidistant(ax): diff --git a/stocproc/stocproc.py b/stocproc/stocproc.py index f02b2a3..73ba4d8 100644 --- a/stocproc/stocproc.py +++ b/stocproc/stocproc.py @@ -83,9 +83,13 @@ class _absStocProc(abc.ABC): log.debug("init StocProc with t_max {} and {} grid points".format(t_max, num_grid_points)) def __call__(self, t=None): - r""" - :param t: time to evaluate the stochastic process as, float of array of floats - evaluates the stochastic process via spline interpolation between the discrete process + r"""evaluates the stochastic process via spline interpolation of the discrete process :math:`z_k` + + :param t: time to evaluate the stochastic process at, float of array of floats, if t is None + return the discrete process :math:`z_k` which corresponds to the times :math:`t_k` given by the + integration weights method + :return: a single complex value or a complex array of the shape of t that holds the values of + stochastic process at times t """ if self._z is None: raise RuntimeError("StocProc_FFT has NO random data, call 'new_process' to generate a new random process") @@ -116,27 +120,24 @@ class _absStocProc(abc.ABC): pass def get_time(self): - r""" - :return: time axis + r"""Returns the time :math:`t_k` corresponding to the values :math:`z_k` + + These times are determined by the integration weights method. """ return self.t def get_z(self): - r""" - use :py:func:`new_process` to generate a new process - :return: the current process - """ + r"""Returns the discrete process :math:`z_k`.""" return self._z def new_process(self, y=None, seed=None): - r""" - generate a new process by evaluating :py:func:`calc_z` - - When ``y`` is given use these random numbers as input for :py:func:`calc_z` - otherwise generate a new set of random numbers. - + r"""generate a new process by evaluating :py:func:`calc_z` with new random variables :math:`Y_i` + :param y: independent normal distributed complex valued random variables with :math:`\sigma_{ij}^2 = \langle y_i y_j^\ast \rangle = 2 \delta_{ij}` - :param seed: if not ``None`` set seed to ``seed`` before generating samples + :param seed: if not None set seed to seed before generating samples + + When y is given use these random numbers as input for :py:func:`calc_z` + otherwise generate a new set of random numbers. """ t0 = time.time() self._interpolator = None @@ -243,6 +244,9 @@ class StocProc_KLE(_absStocProc): return np.tensordot(y, self.sqrt_lambda_ui_fine, axes=([0], [0])).flatten() def get_num_y(self): + """The number of independent random variables Y is given by the number of used eigenfunction + to approximate the auto correlation kernel. + """ return self.num_ev diff --git a/tests/dev_method_kle.py b/tests/dev_method_kle.py new file mode 100644 index 0000000..f3f6355 --- /dev/null +++ b/tests/dev_method_kle.py @@ -0,0 +1,1030 @@ +import sys +import os + +import numpy as np +import math +from scipy.special import gamma as gamma_func +import scipy.integrate as sp_int +from scipy.linalg import eig +try: + import matplotlib.pyplot as plt +except ImportError: + print("matplotlib not found -> any plotting will crash") + + +import pathlib +p = pathlib.PosixPath(os.path.abspath(__file__)) +sys.path.insert(0, str(p.parent.parent)) + +from scipy.special import gamma +import stocproc as sp +from stocproc import tools +from stocproc import method_kle +from stocproc import stocproc_c +import pickle + +_S_ = 0.6 +_GAMMA_S_PLUS_1 = gamma(_S_ + 1) +_WC_ = 2 + +def oac(tau): + """ohmic bath correlation function""" + return (1 + 1j * (tau)) ** (-(_S_ + 1)) * _GAMMA_S_PLUS_1 / np.pi +def osd(omega): + return omega ** _S_ * np.exp(-omega) + +def lac(t): + """lorenzian bath correlation function""" + return np.exp(- np.abs(t) - 1j * _WC_ * t) +def lsd(w): + return 1 / (1 + (w - _WC_) ** 2) + + +def my_intp(ti, corr, w, t, u, lam): + return np.sum(corr(ti - t) * w * u) / lam + +def show_auto_ng(): + corr = lac + t_max = 15 + meth = [#method_kle.get_mid_point_weights_times, + method_kle.get_trapezoidal_weights_times, + method_kle.get_simpson_weights_times] + #method_kle.get_four_point_weights_times] + #method_kle.get_gauss_legendre_weights_times] + #method_kle.get_tanh_sinh_weights_times] + + ns = 10**4 + t_check = np.random.rand(ns)*t_max + s_check = np.random.rand(ns)*t_max + + _meth = method_kle.get_trapezoidal_weights_times + tol = 1e-2 + ng_fac = 1 + ui, t, ev = method_kle.auto_ng(corr, t_max, ngfac=ng_fac, meth=_meth, tol=tol, ret_eigvals=True) + tsf = method_kle.subdevide_axis(t, 4) + lef = tools.LorentzianEigenFunctions(t_max=t_max, gamma=1, w=_WC_, num=800) + c_all = corr(t_check - s_check) + + c_allsf = corr(tsf.reshape(-1,1) - tsf.reshape(1,-1)) + + s = 200 + d_num = [] + d_exa = [] + c_num = np.zeros(shape=ns, dtype=np.complex128) + c_exa = np.zeros(shape=ns, dtype=np.complex128) + + c_numsf = np.zeros(shape=(len(tsf), len(tsf)), dtype=np.complex128) + c_exasf = np.zeros(shape=(len(tsf), len(tsf)), dtype=np.complex128) + d_numsf = [] + d_exasf = [] + for i in range(s): + u = tools.ComplexInterpolatedUnivariateSpline(t, ui[i]) + c_num += u(t_check) * np.conj(u(s_check)) + d_num.append(np.max(np.abs(c_all - c_num)/np.abs(c_all))) + + usf = u(tsf) + c_numsf += usf.reshape(-1, 1) * np.conj(usf.reshape(1, -1)) + d_numsf.append(np.max(np.abs(c_allsf - c_numsf)/np.abs(c_allsf))) + + + u = lef.get_eigfunc(i) + c_exa += lef.get_eigval(i)*u(t_check) * np.conj(u(s_check)) + d_exa.append(np.max(np.abs(c_all - c_exa)/np.abs(c_all))) + + usf = u(tsf) + c_exasf += lef.get_eigval(i)*usf.reshape(-1, 1) * np.conj(usf.reshape(1, -1)) + d_exasf.append(np.max(np.abs(c_allsf - c_exasf)/np.abs(c_allsf))) + + + + fig, ax = plt.subplots(ncols=1) + + ax.plot(np.arange(s), d_num, label='num_rnd') + ax.plot(np.arange(s), d_exa, label='exa_rnd') + + ax.plot(np.arange(s), d_numsf, label='num super fine') + ax.plot(np.arange(s), d_exasf, label='exa super fine') + + ng_fac = 1 + ui, t, ev = method_kle.auto_ng(corr, t_max, ngfac=ng_fac, meth=_meth, tol=tol, ret_eigvals=True) + c_all = corr(t.reshape(-1, 1) - t.reshape((1, -1))) + + c0 = ui[0].reshape(-1, 1) * np.conj(ui[0]).reshape(1, -1) + u = lef.get_eigfunc(0) + ut = u(t) + c0_exa = lef.get_eigval(0)*ut.reshape(-1,1)*np.conj(ut.reshape(1,-1)) + d_num = [np.max(np.abs(c_all - c0))] + d_exa = [np.max(np.abs(c_all - c0_exa))] + for i in range(1, s): + c0 += ui[i].reshape(-1, 1) * np.conj(ui[i]).reshape(1, -1) + d_num.append(np.max(np.abs(c_all - c0)/np.abs(c_all))) + + u = lef.get_eigfunc(i) + ut = u(t) + c0_exa += lef.get_eigval(i) * ut.reshape(-1, 1) * np.conj(ut.reshape(1, -1)) + d_exa.append(np.max(np.abs(c_all - c0_exa)/np.abs(c_all))) + + print("re exa", np.max(np.abs(c_all.real - c0_exa.real))) + print("im exa", np.max(np.abs(c_all.imag - c0_exa.imag))) + print("re num", np.max(np.abs(c_all.real - c0.real))) + print("im num", np.max(np.abs(c_all.imag - c0.imag))) + + + ax.plot(np.arange(s), d_num, label='num') + ax.plot(np.arange(s), d_exa, label='exa') + + ax.set_yscale('log') + ax.legend() + ax.grid() + plt.show() + +def show_reconstr_ac(): + corr = lac + + t_max = 15 + meth = [method_kle.get_mid_point_weights_times, + method_kle.get_trapezoidal_weights_times, + method_kle.get_simpson_weights_times] + #method_kle.get_four_point_weights_times, + #method_kle.get_gauss_legendre_weights_times, + #method_kle.get_tanh_sinh_weights_times] + + + names = ['midp', 'trapz', 'simp', 'four', 'gl', 'ts'] + + + for _mi, _meth in enumerate(meth): + ng_fac = 1 + ng = 401 + + t, w = _meth(t_max=t_max, num_grid_points=ng) + is_equi = method_kle.is_axis_equidistant(t) + r = method_kle._calc_corr_matrix(t, corr, is_equi) + _eig_vals, _eig_vecs = method_kle.solve_hom_fredholm(r, w) + + tfine = method_kle.subdevide_axis(t, ng_fac) # setup fine + tsfine = method_kle.subdevide_axis(tfine, 2) + + if is_equi: + alpha_k = method_kle._calc_corr_min_t_plus_t(tfine, corr) # from -tmax untill tmax on the fine grid + + num_ev = ng + + csf = -corr(tsfine.reshape(-1,1) - tsfine.reshape(1,-1)) + abs_csf = np.abs(csf) + + cf = -corr(tfine.reshape(-1, 1) - tfine.reshape(1, -1)) + abs_cf = np.abs(cf) + + c = -corr(t.reshape(-1, 1) - t.reshape(1, -1)) + abs_c = np.abs(c) + + + dsf = [] + df = [] + d = [] + dsfa = [] + dfa = [] + da = [] + for i in range(0, num_ev): + evec = _eig_vecs[:, i] + if _eig_vals[i] < 0: + break + sqrt_eval = np.sqrt(_eig_vals[i]) + if ng_fac != 1: + if not is_equi: + sqrt_lambda_ui_fine = np.asarray([np.sum(corr(ti - t) * w * evec) / sqrt_eval for ti in tfine]) + else: + sqrt_lambda_ui_fine = stocproc_c.eig_func_interp(delta_t_fac=ng_fac, + time_axis=t, + alpha_k=alpha_k, + weights=w, + eigen_val=sqrt_eval, + eigen_vec=evec) + else: + sqrt_lambda_ui_fine = evec * sqrt_eval + + sqrt_lambda_ui_spl = tools.ComplexInterpolatedUnivariateSpline(tfine, sqrt_lambda_ui_fine) + ut = sqrt_lambda_ui_spl(tsfine) + csf += ut.reshape(-1,1)*np.conj(ut.reshape(1,-1)) + diff = np.abs(csf) / abs_csf + rmidx = np.argmax(diff) + rmidx = np.unravel_index(rmidx, diff.shape) + dsf.append(diff[rmidx]) + + diff = np.abs(csf) + amidx = np.argmax(diff) + amidx = np.unravel_index(amidx, diff.shape) + dsfa.append(diff[amidx]) + + if i == num_ev-5: + print(names[_mi], "rd max ", rmidx, "am", amidx) + print("csf", np.abs(csf[rmidx]), np.abs(csf[amidx])) + print("abs csf", abs_csf[rmidx], abs_csf[amidx]) + + ut = sqrt_lambda_ui_fine + cf += ut.reshape(-1, 1) * np.conj(ut.reshape(1, -1)) + df.append(np.max(np.abs(cf) / abs_cf)) + dfa.append(np.max(np.abs(cf))) + + ut = evec * sqrt_eval + c += ut.reshape(-1, 1) * np.conj(ut.reshape(1, -1)) + d.append(np.max(np.abs(c) / abs_c)) + da.append(np.max(np.abs(c))) + + print(names[_mi], "rd max ", rmidx, "am", amidx) + print("csf", np.abs(csf[rmidx]), np.abs(csf[amidx])) + print("abs csf", abs_csf[rmidx], abs_csf[amidx]) + + + p, = plt.plot(np.arange(len(d)), d, label=names[_mi], ls='', marker='.') + plt.plot(np.arange(len(df)), df, color=p.get_color(), ls='--') + plt.plot(np.arange(len(dsf)), dsf, color=p.get_color()) + + plt.plot(np.arange(len(d)), da, ls='', marker='.', color=p.get_color()) + plt.plot(np.arange(len(df)), dfa, color=p.get_color(), ls='--') + plt.plot(np.arange(len(dsf)), dsfa, color=p.get_color()) + + + plt.yscale('log') + plt.legend() + plt.grid() + plt.show() + + +def show_compare_weights_in_solve_fredholm_oac(): + """ + here we try to examine which integration weights perform best in order to + calculate the eigenfunctions -> well it seems to depend on the situation + + although simpson and gauss-legendre perform well + """ + t_max = 15 + corr = oac + + ng_ref = 3501 + + _meth_ref = method_kle.get_simpson_weights_times + t, w = _meth_ref(t_max, ng_ref) + + try: + with open("test_fredholm_interpolation.dump", 'rb') as f: + ref_data = pickle.load(f) + except FileNotFoundError: + ref_data = {} + key = (tuple(t), tuple(w), corr.__name__) + if key in ref_data: + eigval_ref, evec_ref = ref_data[key] + else: + r = corr(t.reshape(-1, 1) - t.reshape(1, -1)) + eigval_ref, evec_ref = method_kle.solve_hom_fredholm(r, w) + ref_data[key] = eigval_ref, evec_ref + with open("test_fredholm_interpolation.dump", 'wb') as f: + pickle.dump(ref_data, f) + + method_kle.align_eig_vec(evec_ref) + + ks = [20, 40, 80, 160] + + fig, ax = plt.subplots(ncols=2, nrows=2, sharex=True, sharey=True, figsize=(16, 12)) + + ax = ax.flatten() + + lines = [] + labels = [] + + eigvec_ref = [] + for i in range(ng_ref): + eigvec_ref.append(tools.ComplexInterpolatedUnivariateSpline(t, evec_ref[:, i])) + + meth = [method_kle.get_mid_point_weights_times, + method_kle.get_trapezoidal_weights_times, + method_kle.get_simpson_weights_times, + method_kle.get_four_point_weights_times, + method_kle.get_gauss_legendre_weights_times, + method_kle.get_tanh_sinh_weights_times] + cols = ['r', 'b', 'g', 'm', 'c', 'lime'] + for j, k in enumerate(ks): + axc = ax[j] + ng = 4 * k + 1 + + for i, _meth in enumerate(meth): + t, w = _meth(t_max, ng) + r = corr(t.reshape(-1, 1) - t.reshape(1, -1)) + _eig_val, _eig_vec = method_kle.solve_hom_fredholm(r, w) + method_kle.align_eig_vec(_eig_vec) + + dx = [] + dy = [] + dy2 = [] + + for l in range(len(_eig_val)): + evr = eigvec_ref[l](t) + diff = np.abs(_eig_vec[:, l] - evr) + dx.append(l) + dy.append(np.max(diff)) + dy2.append(abs(_eig_val[l] - eigval_ref[l])) + + p, = axc.plot(dx, dy, color=cols[i]) + axc.plot(dx, dy2, color=cols[i], ls='--') + if j == 0: + lines.append(p) + labels.append(_meth.__name__) + + t, w = method_kle.get_simpson_weights_times(t_max, ng) + r = corr(t.reshape(-1, 1) - t.reshape(1, -1)) + _eig_val, _eig_vec = method_kle.solve_hom_fredholm(r, w) + method_kle.align_eig_vec(_eig_vec) + _eig_vec = _eig_vec[1::2, :] + t = t[1::2] + + dx = [] + dy = [] + dy2 = [] + + for l in range(len(_eig_val)): + evr = eigvec_ref[l](t) + diff = np.abs(_eig_vec[:, l] - evr) + dx.append(l) + dy.append(np.max(diff)) + dy2.append(abs(_eig_val[l] - eigval_ref[l])) + + p, = axc.plot(dx, dy, color='orange') + p_lam, = axc.plot(list(range(ng)), eigval_ref[:ng], color='k') + if j == 0: + lines.append(p) + labels.append('simp 2nd') + lines.append(p_lam) + labels.append('abs(lamnda_i)') + + axc.grid() + axc.set_yscale('log') + axc.set_xlim([0, 100]) + axc.set_ylim([1e-15, 10]) + axc.set_title("ng {}".format(ng)) + + fig.suptitle("use ref with ng_ref {} and method '{}'".format(ng_ref, _meth_ref.__name__)) + fig.legend(handles=lines, labels=labels, ncol=3, loc='lower center') + fig.subplots_adjust(bottom=0.15) + plt.show() + +def show_solve_fredholm_error_scaling_oac(): + """ + """ + t_max = 15 + corr = oac + + ng_ref = 3501 + + _meth_ref = method_kle.get_simpson_weights_times + t, w = _meth_ref(t_max, ng_ref) + + t_3501 = t + + try: + with open("test_fredholm_interpolation.dump", 'rb') as f: + ref_data = pickle.load(f) + except FileNotFoundError: + ref_data = {} + key = (tuple(t), tuple(w), corr.__name__) + if key in ref_data: + eigval_ref, evec_ref = ref_data[key] + else: + r = corr(t.reshape(-1, 1) - t.reshape(1, -1)) + eigval_ref, evec_ref = method_kle.solve_hom_fredholm(r, w) + ref_data[key] = eigval_ref, evec_ref + with open("test_fredholm_interpolation.dump", 'wb') as f: + pickle.dump(ref_data, f) + + method_kle.align_eig_vec(evec_ref) + + ks = np.logspace(0.7, 2.3, 15, dtype=np.int) + + meth = [method_kle.get_mid_point_weights_times, + method_kle.get_trapezoidal_weights_times, + method_kle.get_simpson_weights_times, + method_kle.get_four_point_weights_times, + method_kle.get_gauss_legendre_weights_times, + method_kle.get_tanh_sinh_weights_times] + + names = ['midp', 'trapz', 'simp', 'fp', 'gl', 'ts'] + idxs = [0,10,20] + + eigvec_ref = [] + for idx in idxs: + eigvec_ref.append(tools.ComplexInterpolatedUnivariateSpline(t, evec_ref[:, idx])) + + data = np.empty(shape= (len(meth), len(ks), len(idxs))) + data_spline = np.empty(shape=(len(meth), len(ks), len(idxs))) + data_int = np.empty(shape=(len(meth), len(ks), len(idxs))) + for j, k in enumerate(ks): + print(j, len(ks)) + ng = 4 * k + 1 + for i, _meth in enumerate(meth): + t, w = _meth(t_max, ng) + r = corr(t.reshape(-1, 1) - t.reshape(1, -1)) + _eig_val, _eig_vec = method_kle.solve_hom_fredholm(r, w) + method_kle.align_eig_vec(_eig_vec) + for k, idx in enumerate(idxs): + d = np.max(np.abs(_eig_vec[:,idx]-eigvec_ref[k](t))) + data[i, j, k] = d + + uip = tools.ComplexInterpolatedUnivariateSpline(t, _eig_vec[:,idx]) + d = np.max(np.abs(uip(t_3501) - evec_ref[:, idx])) + data_spline[i, j, k] = d + + uip = np.asarray([my_intp(ti, corr, w, t, _eig_vec[:, idx], _eig_val[idx]) for ti in t_3501]) + d = np.max(np.abs(uip - evec_ref[:, idx])) + data_int[i, j, k] = d + + ng = 4*ks + 1 + for i in range(len(meth)): + p, = plt.plot(ng, data[i, :, 0], marker='o', label="no intp {}".format(names[i])) + c = p.get_color() + plt.plot(ng, data_spline[i, :, 0], marker='.', color=c, label="spline {}".format(names[i])) + plt.plot(ng, data_int[i, :, 0], marker='^', color=c, label="intp {}".format(names[i])) + + plt.yscale('log') + plt.xscale('log') + plt.legend() + plt.grid() + + plt.show() + +def show_compare_weights_in_solve_fredholm_lac(): + """ + here we try to examine which integration weights perform best in order to + calculate the eigenfunctions -> well it seems to depend on the situation + + although simpson and gauss-legendre perform well + """ + t_max = 15 + corr = lac + + ng_ref = 3501 + + _meth_ref = method_kle.get_simpson_weights_times + t, w = _meth_ref(t_max, ng_ref) + + try: + with open("test_fredholm_interpolation.dump", 'rb') as f: + ref_data = pickle.load(f) + except FileNotFoundError: + ref_data = {} + key = (tuple(t), tuple(w), corr.__name__) + if key in ref_data: + eigval_ref, evec_ref = ref_data[key] + else: + r = corr(t.reshape(-1, 1) - t.reshape(1, -1)) + eigval_ref, evec_ref = method_kle.solve_hom_fredholm(r, w) + ref_data[key] = eigval_ref, evec_ref + with open("test_fredholm_interpolation.dump", 'wb') as f: + pickle.dump(ref_data, f) + + + method_kle.align_eig_vec(evec_ref) + + ks = [20,40,80,160] + + fig, ax = plt.subplots(ncols=2, nrows=2, sharex=True, sharey=True, figsize=(16,12)) + + ax = ax.flatten() + + lines = [] + labels = [] + + eigvec_ref = [] + for i in range(ng_ref): + eigvec_ref.append(tools.ComplexInterpolatedUnivariateSpline(t, evec_ref[:, i])) + + meth = [method_kle.get_mid_point_weights_times, + method_kle.get_trapezoidal_weights_times, + method_kle.get_simpson_weights_times, + method_kle.get_four_point_weights_times, + method_kle.get_gauss_legendre_weights_times, + method_kle.get_sinh_tanh_weights_times] + cols = ['r', 'b', 'g', 'm', 'c', 'lime'] + for j, k in enumerate(ks): + axc = ax[j] + ng = 4*k+1 + + for i, _meth in enumerate(meth): + t, w = _meth(t_max, ng) + r = corr(t.reshape(-1, 1) - t.reshape(1, -1)) + _eig_val, _eig_vec = method_kle.solve_hom_fredholm(r, w, eig_val_min=0) + method_kle.align_eig_vec(_eig_vec) + + dx = [] + dy = [] + dy2 = [] + + for l in range(len(_eig_val)): + evr = eigvec_ref[l](t) + diff = np.abs(_eig_vec[:,l] - evr) + dx.append(l) + dy.append(np.max(diff)) + dy2.append(abs(_eig_val[l] - eigval_ref[l])) + + p, = axc.plot(dx, dy, color=cols[i]) + axc.plot(dx, dy2, color=cols[i], ls='--') + if j == 0: + lines.append(p) + labels.append(_meth.__name__) + + + t, w = method_kle.get_simpson_weights_times(t_max, ng) + r = corr(t.reshape(-1, 1) - t.reshape(1, -1)) + _eig_val, _eig_vec = method_kle.solve_hom_fredholm(r, w, eig_val_min=0) + method_kle.align_eig_vec(_eig_vec) + _eig_vec = _eig_vec[1::2, :] + t = t[1::2] + + dx = [] + dy = [] + dy2 = [] + + for l in range(len(_eig_val)): + evr = eigvec_ref[l](t) + diff = np.abs(_eig_vec[:, l] - evr) + dx.append(l) + dy.append(np.max(diff)) + dy2.append(abs(_eig_val[l] - eigval_ref[l])) + + p, = axc.plot(dx, dy, color='lime') + p_lam, = axc.plot(list(range(ng)), eigval_ref[:ng], color='k') + if j == 0: + lines.append(p) + labels.append('simp 2nd') + lines.append(p_lam) + labels.append('abs(lamnda_i)') + + axc.grid() + axc.set_yscale('log') + axc.set_xlim([0,100]) + axc.set_ylim([1e-5, 10]) + axc.set_title("ng {}".format(ng)) + + + fig.suptitle("use ref with ng_ref {} and method '{}'".format(ng_ref, _meth_ref.__name__)) + fig.legend(handles=lines, labels=labels, ncol=3, loc='lower center') + fig.subplots_adjust(bottom=0.15) + plt.show() + +def show_solve_fredholm_interp_eigenfunc(): + """ + here we take the discrete eigenfunctions of the Fredholm problem + and use qubic interpolation to check the integral equality. + + the difference between the midpoint weights and simpson weights become + visible. Although the simpson integration yields on average a better performance + there are high fluctuation in the error. + """ + _WC_ = 2 + def lac(t): + return np.exp(- np.abs(t) - 1j*_WC_*t) + + t_max = 10 + ng = 81 + ngfac = 2 + tfine = np.linspace(0, t_max, (ng-1)*ngfac+1) + + lef = tools.LorentzianEigenFunctions(t_max=t_max, gamma=1, w=_WC_, num=5) + + + fig, ax = plt.subplots(nrows=2, ncols=2, sharey=True, sharex=True) + ax = ax.flatten() + + for idx in range(4): + u_exact = lef.get_eigfunc(idx)(tfine) + method_kle.align_eig_vec(u_exact.reshape(-1,1)) + + t, w = sp.method_kle.get_mid_point_weights_times(t_max, ng) + r = lac(t.reshape(-1, 1) - t.reshape(1, -1)) + _eig_val, _eig_vec = sp.method_kle.solve_hom_fredholm(r, w) + method_kle.align_eig_vec(_eig_vec) + u0 = tools.ComplexInterpolatedUnivariateSpline(t, _eig_vec[:,idx]) + + err = np.abs(u0(tfine) - u_exact) + axc = ax[idx] + axc.plot(tfine, err, color='r', label='midp') + + t, w = sp.method_kle.get_trapezoidal_weights_times(t_max, ng) + r = lac(t.reshape(-1, 1) - t.reshape(1, -1)) + _eig_val, _eig_vec = sp.method_kle.solve_hom_fredholm(r, w) + method_kle.align_eig_vec(_eig_vec) + u0 = tools.ComplexInterpolatedUnivariateSpline(t, _eig_vec[:, idx]) + err = np.abs(u0(tfine) - u_exact) + axc.plot(tfine, err, color='b', label='trapz') + axc.plot(tfine[::ngfac], err[::ngfac], ls='', marker='x', color='b') + + t, w = sp.method_kle.get_simpson_weights_times(t_max, ng) + r = lac(t.reshape(-1, 1) - t.reshape(1, -1)) + _eig_val, _eig_vec = sp.method_kle.solve_hom_fredholm(r, w) + method_kle.align_eig_vec(_eig_vec) + u0 = tools.ComplexInterpolatedUnivariateSpline(t, _eig_vec[:, idx]) + err = np.abs(u0(tfine) - u_exact) + axc.plot(tfine, err, color='k', label='simp') + axc.plot(tfine[::ngfac], err[::ngfac], ls='', marker='x', color='k') + + axc.set_yscale('log') + axc.set_title("eigen function # {}".format(idx)) + axc.grid() + + + axc.set_yscale('log') + fig.suptitle("np.abs(int R(t-s)u_i(s) - lam_i * u_i(t))") + plt.show() + +def show_fredholm_eigvec_interpolation(): + """ + for ohmic sd : use 4 point and integral interpolation + for lorentzian : use simpson and spline interpolation + """ + t_max = 15 + corr = lac + corr = oac + + ng_ref = 3501 + + _meth_ref = method_kle.get_simpson_weights_times + _meth_ref = method_kle.get_trapezoidal_weights_times + _meth_ref = method_kle.get_four_point_weights_times + + t, w = _meth_ref(t_max, ng_ref) + + try: + with open("test_fredholm_interpolation.dump", 'rb') as f: + ref_data = pickle.load(f) + except FileNotFoundError: + ref_data = {} + key = (tuple(t), tuple(w), corr.__name__) + if key in ref_data: + eigval_ref, evec_ref = ref_data[key] + else: + r = corr(t.reshape(-1, 1) - t.reshape(1, -1)) + eigval_ref, evec_ref = method_kle.solve_hom_fredholm(r, w) + ref_data[key] = eigval_ref, evec_ref + with open("test_fredholm_interpolation.dump", 'wb') as f: + pickle.dump(ref_data, f) + + method_kle.align_eig_vec(evec_ref) + t_ref = t + + eigvec_ref = [] + for l in range(ng_ref): + eigvec_ref.append(tools.ComplexInterpolatedUnivariateSpline(t, evec_ref[:, l])) + + meth = [method_kle.get_mid_point_weights_times, + method_kle.get_trapezoidal_weights_times, + method_kle.get_simpson_weights_times, + method_kle.get_four_point_weights_times, + method_kle.get_gauss_legendre_weights_times, + method_kle.get_tanh_sinh_weights_times] + cols = ['r', 'b', 'g', 'm', 'c', 'lime'] + + + + fig, ax = plt.subplots(ncols=2, nrows=2, sharex=True, sharey=True, figsize=(16,12)) + ax = ax.flatten() + + ks = [10,14,18,26] + + lns, lbs = [], [] + + for ik, k in enumerate(ks): + axc = ax[ik] + + ng = 4*k+1 + for i, _meth in enumerate(meth): + print(ik, i) + t, w = _meth(t_max, ng) + r = corr(t.reshape(-1, 1) - t.reshape(1, -1)) + _eig_val, _eig_vec = method_kle.solve_hom_fredholm(r, w) + method_kle.align_eig_vec(_eig_vec) + + eigvec_intp = [] + for l in range(ng): + eigvec_intp.append(tools.ComplexInterpolatedUnivariateSpline(t, _eig_vec[:, l])) + + ydata_fixed = [] + ydata_spline = [] + ydata_integr_intp = [] + xdata = np.arange(min(ng, 100)) + + for idx in xdata: + evr = eigvec_ref[idx](t) + ydata_fixed.append(np.max(np.abs(_eig_vec[:,idx] - evr))) + ydata_spline.append(np.max(np.abs(eigvec_intp[idx](t_ref) - evec_ref[:,idx]))) + uip = np.asarray([my_intp(ti, corr, w, t, _eig_vec[:,idx], _eig_val[idx]) for ti in t_ref]) + ydata_integr_intp.append(np.max(np.abs(uip - evec_ref[:,idx]))) + + p1, = axc.plot(xdata, ydata_fixed, color=cols[i], label=_meth.__name__) + p2, = axc.plot(xdata, ydata_spline, color=cols[i], ls='--') + p3, = axc.plot(xdata, ydata_integr_intp, color=cols[i], alpha = 0.5) + if ik == 0: + lns.append(p1) + lbs.append(_meth.__name__) + + if ik == 0: + lines = [p1,p2,p3] + labels = ['fixed', 'spline', 'integral interp'] + axc.set_yscale('log') + axc.set_title("ng {}".format(ng)) + axc.set_xlim([0,100]) + axc.grid() + axc.legend() + + + fig.legend(lines, labels, loc = "lower right", ncol=3) + fig.legend(lns, lbs, loc="lower left", ncol=2) + plt.subplots_adjust(bottom = 0.15) + + plt.savefig("test_fredholm_eigvec_interpolation_{}_.pdf".format(corr.__name__)) + plt.show() + + + +def show_reconstr_ac_interp(): + t_max = 25 + corr = lac + #corr = oac + + meth = [method_kle.get_mid_point_weights_times, + method_kle.get_trapezoidal_weights_times, + method_kle.get_simpson_weights_times, + method_kle.get_four_point_weights_times, + method_kle.get_gauss_legendre_weights_times, + method_kle.get_tanh_sinh_weights_times] + + cols = ['r', 'b', 'g', 'm', 'c'] + + def my_intp(ti, corr, w, t, u, lam): + return np.sum(corr(ti - t) * w * u) / lam + + fig, ax = plt.subplots(figsize=(16, 12)) + + ks = [40] + for i, k in enumerate(ks): + axc = ax + ng = 4 * k + 1 + for i, _meth in enumerate(meth): + t, w = _meth(t_max, ng) + r = corr(t.reshape(-1, 1) - t.reshape(1, -1)) + _eig_val, _eig_vec = method_kle.solve_hom_fredholm(r, w) + + tf = method_kle.subdevide_axis(t, ngfac=3) + tsf = method_kle.subdevide_axis(tf, ngfac=2) + + diff1 = - corr(t.reshape(-1,1) - t.reshape(1,-1)) + diff2 = - corr(tf.reshape(-1, 1) - tf.reshape(1, -1)) + diff3 = - corr(tsf.reshape(-1, 1) - tsf.reshape(1, -1)) + + xdata = np.arange(ng) + ydata1 = np.ones(ng) + ydata2 = np.ones(ng) + ydata3 = np.ones(ng) + + for idx in xdata: + evec = _eig_vec[:, idx] + if _eig_val[idx] < 0: + break + sqrt_eval = np.sqrt(_eig_val[idx]) + + uip = np.asarray([my_intp(ti, corr, w, t, evec, sqrt_eval) for ti in tf]) + uip_spl = tools.ComplexInterpolatedUnivariateSpline(tf, uip) + uip_sf = uip_spl(tsf) + diff1 += _eig_val[idx] * evec.reshape(-1, 1) * np.conj(evec.reshape(1, -1)) + diff2 += uip.reshape(-1, 1) * np.conj(uip.reshape(1, -1)) + diff3 += uip_sf.reshape(-1,1) * np.conj(uip_sf.reshape(1,-1)) + ydata1[idx] = np.max(np.abs(diff1)) + ydata2[idx] = np.max(np.abs(diff2)) + ydata3[idx] = np.max(np.abs(diff3)) + + p, = axc.plot(xdata, ydata1, label=_meth.__name__, alpha = 0.5) + axc.plot(xdata, ydata2, color=p.get_color(), ls='--') + axc.plot(xdata, ydata3, color=p.get_color(), lw=2) + + axc.set_yscale('log') + axc.set_title("ng: {}".format(ng)) + axc.grid() + axc.legend() + plt.show() + + + + + + + +def show_lac_error_scaling(): + """ + """ + t_max = 15 + corr = lac + + idx = 0 + lef = tools.LorentzianEigenFunctions(t_max=t_max, gamma=1, w=_WC_, num=10) + u = lef.get_eigfunc(idx) + + ngs = np.logspace(1, 2.5, 60, dtype=np.int) + + _meth = method_kle.get_mid_point_weights_times + d = [] + for ng in ngs: + t, w = _meth(t_max, ng) + r = corr(t.reshape(-1, 1) - t.reshape(1, -1)) + _eig_val, _eig_vec = method_kle.solve_hom_fredholm(r, w) + method_kle.align_eig_vec(_eig_vec) + ut = u(t) + method_kle.align_eig_vec(ut.reshape(-1,1)) + _d = np.abs(_eig_vec[:, idx]-ut) + d.append(np.max(_d)) + plt.plot(ngs, d, label='midp', marker='o', ls='') + + + _meth = method_kle.get_trapezoidal_weights_times + d = [] + for ng in ngs: + t, w = _meth(t_max, ng) + r = corr(t.reshape(-1, 1) - t.reshape(1, -1)) + _eig_val, _eig_vec = method_kle.solve_hom_fredholm(r, w) + method_kle.align_eig_vec(_eig_vec) + ut = u(t) + method_kle.align_eig_vec(ut.reshape(-1, 1)) + _d = np.abs(_eig_vec[:, idx] - ut) + d.append(np.max(_d)) + plt.plot(ngs, d, label='trapz', marker='o', ls='') + + + _meth = method_kle.get_simpson_weights_times + d = [] + ng_used = [] + for ng in ngs: + ng = 2*(ng//2)+1 + if ng in ng_used: + continue + ng_used.append(ng) + t, w = _meth(t_max, ng) + r = corr(t.reshape(-1, 1) - t.reshape(1, -1)) + _eig_val, _eig_vec = method_kle.solve_hom_fredholm(r, w) + method_kle.align_eig_vec(_eig_vec) + ut = u(t) + method_kle.align_eig_vec(ut.reshape(-1, 1)) + _d = np.abs(_eig_vec[:, idx] - ut) + d.append(np.max(_d)) + plt.plot(ng_used, d, label='simp', marker='o', ls='') + + x = np.logspace(1, 3, 50) + plt.plot(x, 0.16 / x, color='0.5') + plt.plot(x, 1 / x ** 2, color='0.5') + plt.plot(x, 200 / x ** 4, color='0.5') + plt.plot(x, 200000 / x ** 6, color='0.5') + + + + + + plt.yscale('log') + plt.xscale('log') + + plt.legend() + plt.grid() + plt.show() + +def show_oac_error_scaling(): + """ + """ + t_max = 15 + corr = lac + + idx = 0 + lef = tools.LorentzianEigenFunctions(t_max=t_max, gamma=1, w=_WC_, num=10) + u = lef.get_eigfunc(idx) + + ngs = np.logspace(1, 2.5, 60, dtype=np.int) + + _meth = method_kle.get_mid_point_weights_times + d = [] + for ng in ngs: + t, w = _meth(t_max, ng) + r = corr(t.reshape(-1, 1) - t.reshape(1, -1)) + _eig_val, _eig_vec = method_kle.solve_hom_fredholm(r, w) + method_kle.align_eig_vec(_eig_vec) + ut = u(t) + method_kle.align_eig_vec(ut.reshape(-1,1)) + _d = np.abs(_eig_vec[:, idx]-ut) + d.append(np.max(_d)) + plt.plot(ngs, d, label='midp', marker='o', ls='') + + + _meth = method_kle.get_trapezoidal_weights_times + d = [] + for ng in ngs: + t, w = _meth(t_max, ng) + r = corr(t.reshape(-1, 1) - t.reshape(1, -1)) + _eig_val, _eig_vec = method_kle.solve_hom_fredholm(r, w) + method_kle.align_eig_vec(_eig_vec) + ut = u(t) + method_kle.align_eig_vec(ut.reshape(-1, 1)) + _d = np.abs(_eig_vec[:, idx] - ut) + d.append(np.max(_d)) + plt.plot(ngs, d, label='trapz', marker='o', ls='') + + + _meth = method_kle.get_simpson_weights_times + d = [] + ng_used = [] + for ng in ngs: + ng = 2*(ng//2)+1 + if ng in ng_used: + continue + ng_used.append(ng) + t, w = _meth(t_max, ng) + r = corr(t.reshape(-1, 1) - t.reshape(1, -1)) + _eig_val, _eig_vec = method_kle.solve_hom_fredholm(r, w) + method_kle.align_eig_vec(_eig_vec) + ut = u(t) + method_kle.align_eig_vec(ut.reshape(-1, 1)) + _d = np.abs(_eig_vec[:, idx] - ut) + d.append(np.max(_d)) + plt.plot(ng_used, d, label='simp', marker='o', ls='') + + x = np.logspace(1, 3, 50) + plt.plot(x, 0.16 / x, color='0.5') + plt.plot(x, 1 / x ** 2, color='0.5') + plt.plot(x, 200 / x ** 4, color='0.5') + plt.plot(x, 200000 / x ** 6, color='0.5') + + + + + + plt.yscale('log') + plt.xscale('log') + + plt.legend() + plt.grid() + plt.show() + +def show_lac_simp_scaling(): + """ + even using the exact eigen functions yields bad error scaling due to the + non smoothness of the correlation function at tau=0 + + the problem is cured if the integral over s runs from 0 to t and then from + t to t_max, this is only ensured for midpoint and trapezoidal weights. + """ + t_max = 15 + corr = lac + + idx = 0 + lef = tools.LorentzianEigenFunctions(t_max=t_max, gamma=1, w=_WC_, num=10) + u = lef.get_eigfunc(idx) + c = 1/lef.get_eigval(idx) + + ngs = np.logspace(1, 2.5, 60, dtype=np.int) + + _meth = method_kle.get_simpson_weights_times + d0 = [] + d1 = [] + ng_used = [] + for ng in ngs: + ng = 2*(ng//2)+1 + if ng in ng_used: + continue + ng_used.append(ng) + t, w = _meth(t_max, ng) + ti = 0 + di = abs(u(ti) - c*np.sum(w*corr(ti-t)*u(t))) + d0.append(di) + ti = 1 + di = abs(u(ti) - c * np.sum(w * corr(ti - t) * u(t))) + d1.append(di) + + plt.plot(ng_used, d0, marker='o', color='k', label="simps int corr(0-s) u(s)") + plt.plot(ng_used, d1, marker='o', color='r', label="simps int corr(1-s) u(s)") + + ng_used = np.asanyarray(ng_used) + plt.plot(ng_used, 1 / ng_used ** 2, label='1/ng**2') + plt.plot(ng_used, 1 / ng_used ** 4, label='1/ng**4') + + plt.yscale('log') + plt.xscale('log') + + + plt.legend() + plt.grid() + plt.show() + +if __name__ == "__main__": + # show_auto_ng() + show_reconstr_ac() + # show_compare_weights_in_solve_fredholm_oac() + # show_compare_weights_in_solve_fredholm_lac() + # show_solve_fredholm_error_scaling_oac() + # show_fredholm_eigvec_interpolation() + # show_solve_fredholm_interp_eigenfunc() + # show_reconstr_ac_interp() + # show_lac_error_scaling() + # show_lac_simp_scaling() + pass \ No newline at end of file diff --git a/tests/test_method_kle.py b/tests/test_method_kle.py index 2831cc2..9aa260b 100644 --- a/tests/test_method_kle.py +++ b/tests/test_method_kle.py @@ -2,9 +2,6 @@ import sys import os import numpy as np -import math -from scipy.special import gamma as gamma_func -import scipy.integrate as sp_int from scipy.linalg import eig try: import matplotlib.pyplot as plt @@ -22,7 +19,6 @@ import stocproc as sp from stocproc import tools from stocproc import method_kle from stocproc import stocproc_c -import pickle import logging _S_ = 0.6 @@ -287,809 +283,6 @@ def test_auto_ng(): ui = method_kle.auto_ng(corr, t_max, ngfac=ng_fac, meth = _meth) print(_meth.__name__, ui.shape) -def show_auto_ng(): - corr = lac - t_max = 15 - meth = [method_kle.get_mid_point_weights_times, - method_kle.get_trapezoidal_weights_times, - method_kle.get_simpson_weights_times, - method_kle.get_four_point_weights_times] - #method_kle.get_gauss_legendre_weights_times] - #method_kle.get_tanh_sinh_weights_times] - - ng_fac = 1 - tols = np.logspace(-2,-1, 10) - for _meth in meth: - d = [] - for tol in tols: - ui = method_kle.auto_ng(corr, t_max, ngfac=ng_fac, meth = _meth, tol=tol) - d.append(ui.shape[0]) - plt.plot(tols, d, label=_meth.__name__) - - t = np.linspace(0, t_max, 500) - - d = [] - for tol in tols: - lef = tools.LorentzianEigenFunctions(t_max=t_max, gamma=1, w=_WC_, num=800) - diff = -corr(t.reshape(-1,1) - t.reshape(1,-1)) - for i in range(800): - u = lef.get_eigfunc(i)(t) - diff += lef.get_eigval(i)*u.reshape(-1,1)*np.conj(u.reshape(1,-1)) - if np.max(np.abs(diff)) < tol: - d.append(i+1) - break - plt.plot(tols, d, label='exact') - plt.legend() - plt.xscale('log') - plt.yscale('log') - plt.grid() - plt.show() - -def show_compare_weights_in_solve_fredholm_oac(): - """ - here we try to examine which integration weights perform best in order to - calculate the eigenfunctions -> well it seems to depend on the situation - - although simpson and gauss-legendre perform well - """ - t_max = 15 - corr = oac - - ng_ref = 3501 - - _meth_ref = method_kle.get_simpson_weights_times - t, w = _meth_ref(t_max, ng_ref) - - try: - with open("test_fredholm_interpolation.dump", 'rb') as f: - ref_data = pickle.load(f) - except FileNotFoundError: - ref_data = {} - key = (tuple(t), tuple(w), corr.__name__) - if key in ref_data: - eigval_ref, evec_ref = ref_data[key] - else: - r = corr(t.reshape(-1, 1) - t.reshape(1, -1)) - eigval_ref, evec_ref = method_kle.solve_hom_fredholm(r, w) - ref_data[key] = eigval_ref, evec_ref - with open("test_fredholm_interpolation.dump", 'wb') as f: - pickle.dump(ref_data, f) - - method_kle.align_eig_vec(evec_ref) - - ks = [20, 40, 80, 160] - - fig, ax = plt.subplots(ncols=2, nrows=2, sharex=True, sharey=True, figsize=(16, 12)) - - ax = ax.flatten() - - lines = [] - labels = [] - - eigvec_ref = [] - for i in range(ng_ref): - eigvec_ref.append(tools.ComplexInterpolatedUnivariateSpline(t, evec_ref[:, i])) - - meth = [method_kle.get_mid_point_weights_times, - method_kle.get_trapezoidal_weights_times, - method_kle.get_simpson_weights_times, - method_kle.get_four_point_weights_times, - method_kle.get_gauss_legendre_weights_times, - method_kle.get_tanh_sinh_weights_times] - cols = ['r', 'b', 'g', 'm', 'c', 'lime'] - for j, k in enumerate(ks): - axc = ax[j] - ng = 4 * k + 1 - - for i, _meth in enumerate(meth): - t, w = _meth(t_max, ng) - r = corr(t.reshape(-1, 1) - t.reshape(1, -1)) - _eig_val, _eig_vec = method_kle.solve_hom_fredholm(r, w) - method_kle.align_eig_vec(_eig_vec) - - dx = [] - dy = [] - dy2 = [] - - for l in range(len(_eig_val)): - evr = eigvec_ref[l](t) - diff = np.abs(_eig_vec[:, l] - evr) - dx.append(l) - dy.append(np.max(diff)) - dy2.append(abs(_eig_val[l] - eigval_ref[l])) - - p, = axc.plot(dx, dy, color=cols[i]) - axc.plot(dx, dy2, color=cols[i], ls='--') - if j == 0: - lines.append(p) - labels.append(_meth.__name__) - - t, w = method_kle.get_simpson_weights_times(t_max, ng) - r = corr(t.reshape(-1, 1) - t.reshape(1, -1)) - _eig_val, _eig_vec = method_kle.solve_hom_fredholm(r, w) - method_kle.align_eig_vec(_eig_vec) - _eig_vec = _eig_vec[1::2, :] - t = t[1::2] - - dx = [] - dy = [] - dy2 = [] - - for l in range(len(_eig_val)): - evr = eigvec_ref[l](t) - diff = np.abs(_eig_vec[:, l] - evr) - dx.append(l) - dy.append(np.max(diff)) - dy2.append(abs(_eig_val[l] - eigval_ref[l])) - - p, = axc.plot(dx, dy, color='orange') - p_lam, = axc.plot(list(range(ng)), eigval_ref[:ng], color='k') - if j == 0: - lines.append(p) - labels.append('simp 2nd') - lines.append(p_lam) - labels.append('abs(lamnda_i)') - - axc.grid() - axc.set_yscale('log') - axc.set_xlim([0, 100]) - axc.set_ylim([1e-15, 10]) - axc.set_title("ng {}".format(ng)) - - fig.suptitle("use ref with ng_ref {} and method '{}'".format(ng_ref, _meth_ref.__name__)) - fig.legend(handles=lines, labels=labels, ncol=3, loc='lower center') - fig.subplots_adjust(bottom=0.15) - plt.show() - -def show_solve_fredholm_error_scaling_oac(): - """ - """ - t_max = 15 - corr = oac - - ng_ref = 3501 - - _meth_ref = method_kle.get_simpson_weights_times - t, w = _meth_ref(t_max, ng_ref) - - t_3501 = t - - try: - with open("test_fredholm_interpolation.dump", 'rb') as f: - ref_data = pickle.load(f) - except FileNotFoundError: - ref_data = {} - key = (tuple(t), tuple(w), corr.__name__) - if key in ref_data: - eigval_ref, evec_ref = ref_data[key] - else: - r = corr(t.reshape(-1, 1) - t.reshape(1, -1)) - eigval_ref, evec_ref = method_kle.solve_hom_fredholm(r, w) - ref_data[key] = eigval_ref, evec_ref - with open("test_fredholm_interpolation.dump", 'wb') as f: - pickle.dump(ref_data, f) - - method_kle.align_eig_vec(evec_ref) - - ks = np.logspace(0.7, 2.3, 15, dtype=np.int) - - meth = [method_kle.get_mid_point_weights_times, - method_kle.get_trapezoidal_weights_times, - method_kle.get_simpson_weights_times, - method_kle.get_four_point_weights_times, - method_kle.get_gauss_legendre_weights_times, - method_kle.get_tanh_sinh_weights_times] - - names = ['midp', 'trapz', 'simp', 'fp', 'gl', 'ts'] - idxs = [0,10,20] - - eigvec_ref = [] - for idx in idxs: - eigvec_ref.append(tools.ComplexInterpolatedUnivariateSpline(t, evec_ref[:, idx])) - - data = np.empty(shape= (len(meth), len(ks), len(idxs))) - data_spline = np.empty(shape=(len(meth), len(ks), len(idxs))) - data_int = np.empty(shape=(len(meth), len(ks), len(idxs))) - for j, k in enumerate(ks): - print(j, len(ks)) - ng = 4 * k + 1 - for i, _meth in enumerate(meth): - t, w = _meth(t_max, ng) - r = corr(t.reshape(-1, 1) - t.reshape(1, -1)) - _eig_val, _eig_vec = method_kle.solve_hom_fredholm(r, w) - method_kle.align_eig_vec(_eig_vec) - for k, idx in enumerate(idxs): - d = np.max(np.abs(_eig_vec[:,idx]-eigvec_ref[k](t))) - data[i, j, k] = d - - uip = tools.ComplexInterpolatedUnivariateSpline(t, _eig_vec[:,idx]) - d = np.max(np.abs(uip(t_3501) - evec_ref[:, idx])) - data_spline[i, j, k] = d - - uip = np.asarray([my_intp(ti, corr, w, t, _eig_vec[:, idx], _eig_val[idx]) for ti in t_3501]) - d = np.max(np.abs(uip - evec_ref[:, idx])) - data_int[i, j, k] = d - - ng = 4*ks + 1 - for i in range(len(meth)): - p, = plt.plot(ng, data[i, :, 0], marker='o', label="no intp {}".format(names[i])) - c = p.get_color() - plt.plot(ng, data_spline[i, :, 0], marker='.', color=c, label="spline {}".format(names[i])) - plt.plot(ng, data_int[i, :, 0], marker='^', color=c, label="intp {}".format(names[i])) - - plt.yscale('log') - plt.xscale('log') - plt.legend() - plt.grid() - - plt.show() - -def show_compare_weights_in_solve_fredholm_lac(): - """ - here we try to examine which integration weights perform best in order to - calculate the eigenfunctions -> well it seems to depend on the situation - - although simpson and gauss-legendre perform well - """ - t_max = 15 - corr = lac - - ng_ref = 3501 - - _meth_ref = method_kle.get_simpson_weights_times - t, w = _meth_ref(t_max, ng_ref) - - try: - with open("test_fredholm_interpolation.dump", 'rb') as f: - ref_data = pickle.load(f) - except FileNotFoundError: - ref_data = {} - key = (tuple(t), tuple(w), corr.__name__) - if key in ref_data: - eigval_ref, evec_ref = ref_data[key] - else: - r = corr(t.reshape(-1, 1) - t.reshape(1, -1)) - eigval_ref, evec_ref = method_kle.solve_hom_fredholm(r, w) - ref_data[key] = eigval_ref, evec_ref - with open("test_fredholm_interpolation.dump", 'wb') as f: - pickle.dump(ref_data, f) - - - method_kle.align_eig_vec(evec_ref) - - ks = [20,40,80,160] - - fig, ax = plt.subplots(ncols=2, nrows=2, sharex=True, sharey=True, figsize=(16,12)) - - ax = ax.flatten() - - lines = [] - labels = [] - - eigvec_ref = [] - for i in range(ng_ref): - eigvec_ref.append(tools.ComplexInterpolatedUnivariateSpline(t, evec_ref[:, i])) - - meth = [method_kle.get_mid_point_weights_times, - method_kle.get_trapezoidal_weights_times, - method_kle.get_simpson_weights_times, - method_kle.get_four_point_weights_times, - method_kle.get_gauss_legendre_weights_times, - method_kle.get_sinh_tanh_weights_times] - cols = ['r', 'b', 'g', 'm', 'c', 'lime'] - for j, k in enumerate(ks): - axc = ax[j] - ng = 4*k+1 - - for i, _meth in enumerate(meth): - t, w = _meth(t_max, ng) - r = corr(t.reshape(-1, 1) - t.reshape(1, -1)) - _eig_val, _eig_vec = method_kle.solve_hom_fredholm(r, w, eig_val_min=0) - method_kle.align_eig_vec(_eig_vec) - - dx = [] - dy = [] - dy2 = [] - - for l in range(len(_eig_val)): - evr = eigvec_ref[l](t) - diff = np.abs(_eig_vec[:,l] - evr) - dx.append(l) - dy.append(np.max(diff)) - dy2.append(abs(_eig_val[l] - eigval_ref[l])) - - p, = axc.plot(dx, dy, color=cols[i]) - axc.plot(dx, dy2, color=cols[i], ls='--') - if j == 0: - lines.append(p) - labels.append(_meth.__name__) - - - t, w = method_kle.get_simpson_weights_times(t_max, ng) - r = corr(t.reshape(-1, 1) - t.reshape(1, -1)) - _eig_val, _eig_vec = method_kle.solve_hom_fredholm(r, w, eig_val_min=0) - method_kle.align_eig_vec(_eig_vec) - _eig_vec = _eig_vec[1::2, :] - t = t[1::2] - - dx = [] - dy = [] - dy2 = [] - - for l in range(len(_eig_val)): - evr = eigvec_ref[l](t) - diff = np.abs(_eig_vec[:, l] - evr) - dx.append(l) - dy.append(np.max(diff)) - dy2.append(abs(_eig_val[l] - eigval_ref[l])) - - p, = axc.plot(dx, dy, color='lime') - p_lam, = axc.plot(list(range(ng)), eigval_ref[:ng], color='k') - if j == 0: - lines.append(p) - labels.append('simp 2nd') - lines.append(p_lam) - labels.append('abs(lamnda_i)') - - axc.grid() - axc.set_yscale('log') - axc.set_xlim([0,100]) - axc.set_ylim([1e-5, 10]) - axc.set_title("ng {}".format(ng)) - - - fig.suptitle("use ref with ng_ref {} and method '{}'".format(ng_ref, _meth_ref.__name__)) - fig.legend(handles=lines, labels=labels, ncol=3, loc='lower center') - fig.subplots_adjust(bottom=0.15) - plt.show() - -def show_solve_fredholm_interp_eigenfunc(): - """ - here we take the discrete eigenfunctions of the Fredholm problem - and use qubic interpolation to check the integral equality. - - the difference between the midpoint weights and simpson weights become - visible. Although the simpson integration yields on average a better performance - there are high fluctuation in the error. - """ - _WC_ = 2 - def lac(t): - return np.exp(- np.abs(t) - 1j*_WC_*t) - - t_max = 10 - ng = 81 - ngfac = 2 - tfine = np.linspace(0, t_max, (ng-1)*ngfac+1) - - lef = tools.LorentzianEigenFunctions(t_max=t_max, gamma=1, w=_WC_, num=5) - - - fig, ax = plt.subplots(nrows=2, ncols=2, sharey=True, sharex=True) - ax = ax.flatten() - - for idx in range(4): - u_exact = lef.get_eigfunc(idx)(tfine) - method_kle.align_eig_vec(u_exact.reshape(-1,1)) - - t, w = sp.method_kle.get_mid_point_weights_times(t_max, ng) - r = lac(t.reshape(-1, 1) - t.reshape(1, -1)) - _eig_val, _eig_vec = sp.method_kle.solve_hom_fredholm(r, w) - method_kle.align_eig_vec(_eig_vec) - u0 = tools.ComplexInterpolatedUnivariateSpline(t, _eig_vec[:,idx]) - - err = np.abs(u0(tfine) - u_exact) - axc = ax[idx] - axc.plot(tfine, err, color='r', label='midp') - - t, w = sp.method_kle.get_trapezoidal_weights_times(t_max, ng) - r = lac(t.reshape(-1, 1) - t.reshape(1, -1)) - _eig_val, _eig_vec = sp.method_kle.solve_hom_fredholm(r, w) - method_kle.align_eig_vec(_eig_vec) - u0 = tools.ComplexInterpolatedUnivariateSpline(t, _eig_vec[:, idx]) - err = np.abs(u0(tfine) - u_exact) - axc.plot(tfine, err, color='b', label='trapz') - axc.plot(tfine[::ngfac], err[::ngfac], ls='', marker='x', color='b') - - t, w = sp.method_kle.get_simpson_weights_times(t_max, ng) - r = lac(t.reshape(-1, 1) - t.reshape(1, -1)) - _eig_val, _eig_vec = sp.method_kle.solve_hom_fredholm(r, w) - method_kle.align_eig_vec(_eig_vec) - u0 = tools.ComplexInterpolatedUnivariateSpline(t, _eig_vec[:, idx]) - err = np.abs(u0(tfine) - u_exact) - axc.plot(tfine, err, color='k', label='simp') - axc.plot(tfine[::ngfac], err[::ngfac], ls='', marker='x', color='k') - - axc.set_yscale('log') - axc.set_title("eigen function # {}".format(idx)) - axc.grid() - - - axc.set_yscale('log') - fig.suptitle("np.abs(int R(t-s)u_i(s) - lam_i * u_i(t))") - plt.show() - -def show_fredholm_eigvec_interpolation(): - """ - for ohmic sd : use 4 point and integral interpolation - for lorentzian : use simpson and spline interpolation - """ - t_max = 15 - corr = lac - corr = oac - - ng_ref = 3501 - - _meth_ref = method_kle.get_simpson_weights_times - _meth_ref = method_kle.get_trapezoidal_weights_times - _meth_ref = method_kle.get_four_point_weights_times - - t, w = _meth_ref(t_max, ng_ref) - - try: - with open("test_fredholm_interpolation.dump", 'rb') as f: - ref_data = pickle.load(f) - except FileNotFoundError: - ref_data = {} - key = (tuple(t), tuple(w), corr.__name__) - if key in ref_data: - eigval_ref, evec_ref = ref_data[key] - else: - r = corr(t.reshape(-1, 1) - t.reshape(1, -1)) - eigval_ref, evec_ref = method_kle.solve_hom_fredholm(r, w) - ref_data[key] = eigval_ref, evec_ref - with open("test_fredholm_interpolation.dump", 'wb') as f: - pickle.dump(ref_data, f) - - method_kle.align_eig_vec(evec_ref) - t_ref = t - - eigvec_ref = [] - for l in range(ng_ref): - eigvec_ref.append(tools.ComplexInterpolatedUnivariateSpline(t, evec_ref[:, l])) - - meth = [method_kle.get_mid_point_weights_times, - method_kle.get_trapezoidal_weights_times, - method_kle.get_simpson_weights_times, - method_kle.get_four_point_weights_times, - method_kle.get_gauss_legendre_weights_times, - method_kle.get_tanh_sinh_weights_times] - cols = ['r', 'b', 'g', 'm', 'c', 'lime'] - - - - fig, ax = plt.subplots(ncols=2, nrows=2, sharex=True, sharey=True, figsize=(16,12)) - ax = ax.flatten() - - ks = [10,14,18,26] - - lns, lbs = [], [] - - for ik, k in enumerate(ks): - axc = ax[ik] - - ng = 4*k+1 - for i, _meth in enumerate(meth): - print(ik, i) - t, w = _meth(t_max, ng) - r = corr(t.reshape(-1, 1) - t.reshape(1, -1)) - _eig_val, _eig_vec = method_kle.solve_hom_fredholm(r, w) - method_kle.align_eig_vec(_eig_vec) - - eigvec_intp = [] - for l in range(ng): - eigvec_intp.append(tools.ComplexInterpolatedUnivariateSpline(t, _eig_vec[:, l])) - - ydata_fixed = [] - ydata_spline = [] - ydata_integr_intp = [] - xdata = np.arange(min(ng, 100)) - - for idx in xdata: - evr = eigvec_ref[idx](t) - ydata_fixed.append(np.max(np.abs(_eig_vec[:,idx] - evr))) - ydata_spline.append(np.max(np.abs(eigvec_intp[idx](t_ref) - evec_ref[:,idx]))) - uip = np.asarray([my_intp(ti, corr, w, t, _eig_vec[:,idx], _eig_val[idx]) for ti in t_ref]) - ydata_integr_intp.append(np.max(np.abs(uip - evec_ref[:,idx]))) - - p1, = axc.plot(xdata, ydata_fixed, color=cols[i], label=_meth.__name__) - p2, = axc.plot(xdata, ydata_spline, color=cols[i], ls='--') - p3, = axc.plot(xdata, ydata_integr_intp, color=cols[i], alpha = 0.5) - if ik == 0: - lns.append(p1) - lbs.append(_meth.__name__) - - if ik == 0: - lines = [p1,p2,p3] - labels = ['fixed', 'spline', 'integral interp'] - axc.set_yscale('log') - axc.set_title("ng {}".format(ng)) - axc.set_xlim([0,100]) - axc.grid() - axc.legend() - - - fig.legend(lines, labels, loc = "lower right", ncol=3) - fig.legend(lns, lbs, loc="lower left", ncol=2) - plt.subplots_adjust(bottom = 0.15) - - plt.savefig("test_fredholm_eigvec_interpolation_{}_.pdf".format(corr.__name__)) - plt.show() - - - -def show_reconstr_ac_interp(): - t_max = 25 - corr = lac - #corr = oac - - meth = [method_kle.get_mid_point_weights_times, - method_kle.get_trapezoidal_weights_times, - method_kle.get_simpson_weights_times, - method_kle.get_four_point_weights_times, - method_kle.get_gauss_legendre_weights_times, - method_kle.get_tanh_sinh_weights_times] - - cols = ['r', 'b', 'g', 'm', 'c'] - - def my_intp(ti, corr, w, t, u, lam): - return np.sum(corr(ti - t) * w * u) / lam - - fig, ax = plt.subplots(figsize=(16, 12)) - - ks = [40] - for i, k in enumerate(ks): - axc = ax - ng = 4 * k + 1 - for i, _meth in enumerate(meth): - t, w = _meth(t_max, ng) - r = corr(t.reshape(-1, 1) - t.reshape(1, -1)) - _eig_val, _eig_vec = method_kle.solve_hom_fredholm(r, w) - - tf = method_kle.subdevide_axis(t, ngfac=3) - tsf = method_kle.subdevide_axis(tf, ngfac=2) - - diff1 = - corr(t.reshape(-1,1) - t.reshape(1,-1)) - diff2 = - corr(tf.reshape(-1, 1) - tf.reshape(1, -1)) - diff3 = - corr(tsf.reshape(-1, 1) - tsf.reshape(1, -1)) - - xdata = np.arange(ng) - ydata1 = np.ones(ng) - ydata2 = np.ones(ng) - ydata3 = np.ones(ng) - - for idx in xdata: - evec = _eig_vec[:, idx] - if _eig_val[idx] < 0: - break - sqrt_eval = np.sqrt(_eig_val[idx]) - - uip = np.asarray([my_intp(ti, corr, w, t, evec, sqrt_eval) for ti in tf]) - uip_spl = tools.ComplexInterpolatedUnivariateSpline(tf, uip) - uip_sf = uip_spl(tsf) - diff1 += _eig_val[idx] * evec.reshape(-1, 1) * np.conj(evec.reshape(1, -1)) - diff2 += uip.reshape(-1, 1) * np.conj(uip.reshape(1, -1)) - diff3 += uip_sf.reshape(-1,1) * np.conj(uip_sf.reshape(1,-1)) - ydata1[idx] = np.max(np.abs(diff1)) - ydata2[idx] = np.max(np.abs(diff2)) - ydata3[idx] = np.max(np.abs(diff3)) - - p, = axc.plot(xdata, ydata1, label=_meth.__name__, alpha = 0.5) - axc.plot(xdata, ydata2, color=p.get_color(), ls='--') - axc.plot(xdata, ydata3, color=p.get_color(), lw=2) - - axc.set_yscale('log') - axc.set_title("ng: {}".format(ng)) - axc.grid() - axc.legend() - plt.show() - - - - - - - -def show_lac_error_scaling(): - """ - """ - t_max = 15 - corr = lac - - idx = 0 - lef = tools.LorentzianEigenFunctions(t_max=t_max, gamma=1, w=_WC_, num=10) - u = lef.get_eigfunc(idx) - - ngs = np.logspace(1, 2.5, 60, dtype=np.int) - - _meth = method_kle.get_mid_point_weights_times - d = [] - for ng in ngs: - t, w = _meth(t_max, ng) - r = corr(t.reshape(-1, 1) - t.reshape(1, -1)) - _eig_val, _eig_vec = method_kle.solve_hom_fredholm(r, w) - method_kle.align_eig_vec(_eig_vec) - ut = u(t) - method_kle.align_eig_vec(ut.reshape(-1,1)) - _d = np.abs(_eig_vec[:, idx]-ut) - d.append(np.max(_d)) - plt.plot(ngs, d, label='midp', marker='o', ls='') - - - _meth = method_kle.get_trapezoidal_weights_times - d = [] - for ng in ngs: - t, w = _meth(t_max, ng) - r = corr(t.reshape(-1, 1) - t.reshape(1, -1)) - _eig_val, _eig_vec = method_kle.solve_hom_fredholm(r, w) - method_kle.align_eig_vec(_eig_vec) - ut = u(t) - method_kle.align_eig_vec(ut.reshape(-1, 1)) - _d = np.abs(_eig_vec[:, idx] - ut) - d.append(np.max(_d)) - plt.plot(ngs, d, label='trapz', marker='o', ls='') - - - _meth = method_kle.get_simpson_weights_times - d = [] - ng_used = [] - for ng in ngs: - ng = 2*(ng//2)+1 - if ng in ng_used: - continue - ng_used.append(ng) - t, w = _meth(t_max, ng) - r = corr(t.reshape(-1, 1) - t.reshape(1, -1)) - _eig_val, _eig_vec = method_kle.solve_hom_fredholm(r, w) - method_kle.align_eig_vec(_eig_vec) - ut = u(t) - method_kle.align_eig_vec(ut.reshape(-1, 1)) - _d = np.abs(_eig_vec[:, idx] - ut) - d.append(np.max(_d)) - plt.plot(ng_used, d, label='simp', marker='o', ls='') - - x = np.logspace(1, 3, 50) - plt.plot(x, 0.16 / x, color='0.5') - plt.plot(x, 1 / x ** 2, color='0.5') - plt.plot(x, 200 / x ** 4, color='0.5') - plt.plot(x, 200000 / x ** 6, color='0.5') - - - - - - plt.yscale('log') - plt.xscale('log') - - plt.legend() - plt.grid() - plt.show() - -def show_oac_error_scaling(): - """ - """ - t_max = 15 - corr = lac - - idx = 0 - lef = tools.LorentzianEigenFunctions(t_max=t_max, gamma=1, w=_WC_, num=10) - u = lef.get_eigfunc(idx) - - ngs = np.logspace(1, 2.5, 60, dtype=np.int) - - _meth = method_kle.get_mid_point_weights_times - d = [] - for ng in ngs: - t, w = _meth(t_max, ng) - r = corr(t.reshape(-1, 1) - t.reshape(1, -1)) - _eig_val, _eig_vec = method_kle.solve_hom_fredholm(r, w) - method_kle.align_eig_vec(_eig_vec) - ut = u(t) - method_kle.align_eig_vec(ut.reshape(-1,1)) - _d = np.abs(_eig_vec[:, idx]-ut) - d.append(np.max(_d)) - plt.plot(ngs, d, label='midp', marker='o', ls='') - - - _meth = method_kle.get_trapezoidal_weights_times - d = [] - for ng in ngs: - t, w = _meth(t_max, ng) - r = corr(t.reshape(-1, 1) - t.reshape(1, -1)) - _eig_val, _eig_vec = method_kle.solve_hom_fredholm(r, w) - method_kle.align_eig_vec(_eig_vec) - ut = u(t) - method_kle.align_eig_vec(ut.reshape(-1, 1)) - _d = np.abs(_eig_vec[:, idx] - ut) - d.append(np.max(_d)) - plt.plot(ngs, d, label='trapz', marker='o', ls='') - - - _meth = method_kle.get_simpson_weights_times - d = [] - ng_used = [] - for ng in ngs: - ng = 2*(ng//2)+1 - if ng in ng_used: - continue - ng_used.append(ng) - t, w = _meth(t_max, ng) - r = corr(t.reshape(-1, 1) - t.reshape(1, -1)) - _eig_val, _eig_vec = method_kle.solve_hom_fredholm(r, w) - method_kle.align_eig_vec(_eig_vec) - ut = u(t) - method_kle.align_eig_vec(ut.reshape(-1, 1)) - _d = np.abs(_eig_vec[:, idx] - ut) - d.append(np.max(_d)) - plt.plot(ng_used, d, label='simp', marker='o', ls='') - - x = np.logspace(1, 3, 50) - plt.plot(x, 0.16 / x, color='0.5') - plt.plot(x, 1 / x ** 2, color='0.5') - plt.plot(x, 200 / x ** 4, color='0.5') - plt.plot(x, 200000 / x ** 6, color='0.5') - - - - - - plt.yscale('log') - plt.xscale('log') - - plt.legend() - plt.grid() - plt.show() - -def show_lac_simp_scaling(): - """ - even using the exact eigen functions yields bad error scaling due to the - non smoothness of the correlation function at tau=0 - - the problem is cured if the integral over s runs from 0 to t and then from - t to t_max, this is only ensured for midpoint and trapezoidal weights. - """ - t_max = 15 - corr = lac - - idx = 0 - lef = tools.LorentzianEigenFunctions(t_max=t_max, gamma=1, w=_WC_, num=10) - u = lef.get_eigfunc(idx) - c = 1/lef.get_eigval(idx) - - ngs = np.logspace(1, 2.5, 60, dtype=np.int) - - _meth = method_kle.get_simpson_weights_times - d0 = [] - d1 = [] - ng_used = [] - for ng in ngs: - ng = 2*(ng//2)+1 - if ng in ng_used: - continue - ng_used.append(ng) - t, w = _meth(t_max, ng) - ti = 0 - di = abs(u(ti) - c*np.sum(w*corr(ti-t)*u(t))) - d0.append(di) - ti = 1 - di = abs(u(ti) - c * np.sum(w * corr(ti - t) * u(t))) - d1.append(di) - - plt.plot(ng_used, d0, marker='o', color='k', label="simps int corr(0-s) u(s)") - plt.plot(ng_used, d1, marker='o', color='r', label="simps int corr(1-s) u(s)") - - ng_used = np.asanyarray(ng_used) - plt.plot(ng_used, 1 / ng_used ** 2, label='1/ng**2') - plt.plot(ng_used, 1 / ng_used ** 4, label='1/ng**4') - - plt.yscale('log') - plt.xscale('log') - - - plt.legend() - plt.grid() - plt.show() - - if __name__ == "__main__": logging.basicConfig(level=logging.INFO) @@ -1103,14 +296,4 @@ if __name__ == "__main__": # test_solve_fredholm() # test_solve_fredholm_reconstr_ac() # test_auto_ng() - - show_auto_ng() - # show_compare_weights_in_solve_fredholm_oac() - # show_compare_weights_in_solve_fredholm_lac() - # show_solve_fredholm_error_scaling_oac() - # show_fredholm_eigvec_interpolation() - # show_solve_fredholm_interp_eigenfunc() - # show_reconstr_ac_interp() - # show_lac_error_scaling() - # show_lac_simp_scaling() pass