This commit is contained in:
Richard Hartmann 2016-11-10 16:28:28 +01:00
parent aceccd44e2
commit f372b346e7
2 changed files with 65 additions and 9 deletions

View file

@ -400,10 +400,23 @@ def auto_ng(corr, t_max, ngfac=2, meth=get_mid_point_weights_times, tol=1e-3, di
time_fredholm += (time.time() - t0)
tfine = subdevide_axis(t, ngfac) # setup fine
tsfine = subdevide_axis(tfine, 2) # and super fine time grid
tsfine = subdevide_axis(tfine, 2)[1::2] # and super fine time grid
if is_equi:
t0 = time.time() # efficient way to calculate the auto correlation
alpha_k = _calc_corr_min_t_plus_t(tfine, corr) # from -tmax untill tmax on the fine grid
time_calc_ac += (time.time() - t0) # needed for integral interpolation
if diff_method == 'full':
alpha_ref = corr(tsfine.reshape(-1,1) - tsfine.reshape(1,-1))
if not is_equi:
alpha_ref = corr(tsfine.reshape(-1,1) - tsfine.reshape(1,-1))
else:
ng_sfine = len(tsfine)
alpha_ref = np.empty(shape=(ng_sfine, ng_sfine), dtype=np.complex128)
for i in range(ng_sfine):
idx = ng_sfine - i
alpha_ref[:, i] = alpha_k[idx:idx + ng_sfine] # note we can use alpha_k as
# alpha(ti+dt/2 - (tj+dt/2)) = alpha(ti - tj)
diff = -alpha_ref
old_md = np.inf
@ -417,12 +430,17 @@ def auto_ng(corr, t_max, ngfac=2, meth=get_mid_point_weights_times, tol=1e-3, di
if ngfac != 1:
t0 = time.time()
# when using sqrt_lambda instead of lambda we get sqrt_lamda time u
# which is the quantity needed for the stochastic process
# generation
# which is the quantity needed for the stochastic process generation
if not is_equi:
sqrt_lambda_ui_fine = np.asarray([np.sum(corr(ti - t) * w * evec) / sqrt_eval for ti in tfine])
else:
raise NotImplementedError
sqrt_lambda_ui_fine = stocproc_c.eig_func_interp(delta_t_fac=ngfac,
time_axis=t,
alpha_k=alpha_k,
weights=w,
eigen_val=sqrt_eval,
eigen_vec=evec)
time_integr_intp += (time.time() - t0)
else:
sqrt_lambda_ui_fine = evec*sqrt_eval

View file

@ -278,8 +278,8 @@ def test_auto_ng():
meth = [method_kle.get_mid_point_weights_times,
method_kle.get_trapezoidal_weights_times,
method_kle.get_simpson_weights_times,
method_kle.get_four_point_weights_times,
method_kle.get_gauss_legendre_weights_times]
method_kle.get_four_point_weights_times]
#method_kle.get_gauss_legendre_weights_times]
#method_kle.get_tanh_sinh_weights_times]
@ -287,6 +287,44 @@ def test_auto_ng():
ui = method_kle.auto_ng(corr, t_max, ngfac=ng_fac, meth = _meth)
print(_meth.__name__, ui.shape)
def show_auto_ng():
corr = lac
t_max = 15
meth = [method_kle.get_mid_point_weights_times,
method_kle.get_trapezoidal_weights_times,
method_kle.get_simpson_weights_times,
method_kle.get_four_point_weights_times]
#method_kle.get_gauss_legendre_weights_times]
#method_kle.get_tanh_sinh_weights_times]
ng_fac = 1
tols = np.logspace(-2,-1, 10)
for _meth in meth:
d = []
for tol in tols:
ui = method_kle.auto_ng(corr, t_max, ngfac=ng_fac, meth = _meth, tol=tol)
d.append(ui.shape[0])
plt.plot(tols, d, label=_meth.__name__)
t = np.linspace(0, t_max, 500)
d = []
for tol in tols:
lef = tools.LorentzianEigenFunctions(t_max=t_max, gamma=1, w=_WC_, num=800)
diff = -corr(t.reshape(-1,1) - t.reshape(1,-1))
for i in range(800):
u = lef.get_eigfunc(i)(t)
diff += lef.get_eigval(i)*u.reshape(-1,1)*np.conj(u.reshape(1,-1))
if np.max(np.abs(diff)) < tol:
d.append(i+1)
break
plt.plot(tols, d, label='exact')
plt.legend()
plt.xscale('log')
plt.yscale('log')
plt.grid()
plt.show()
def show_compare_weights_in_solve_fredholm_oac():
"""
here we try to examine which integration weights perform best in order to
@ -1056,7 +1094,7 @@ def show_lac_simp_scaling():
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
# test_weights(plot=True)
test_is_axis_equidistant()
# test_is_axis_equidistant()
# test_subdevide_axis()
# test_analytic_lorentzian_eigenfunctions()
# test_solve_fredholm()
@ -1066,7 +1104,7 @@ if __name__ == "__main__":
# test_solve_fredholm_reconstr_ac()
# test_auto_ng()
show_auto_ng()
# show_compare_weights_in_solve_fredholm_oac()
# show_compare_weights_in_solve_fredholm_lac()
# show_solve_fredholm_error_scaling_oac()