This commit is contained in:
Richard Hartmann 2016-11-08 18:02:09 +01:00
parent d11cbf6631
commit dc7fa8de99
2 changed files with 124 additions and 23 deletions

View file

@ -292,6 +292,16 @@ def get_rel_diff(corr, t_max, ng, weights_times, ng_fac):
rd = np.abs(recs_bcf - refc_bcf) / np.abs(refc_bcf)
return tfine, rd
def subdevide_axis(t, ngfac):
ng = len(t)
if not isinstance(t, np.ndarray):
t = np.asarray(t)
tfine = np.empty(shape=((ng - 1) * ngfac + 1))
tfine[::ngfac] = t
for l in range(1, ngfac):
tfine[l::ngfac] = (l * t[1:] + (ngfac - l) * t[:-1]) / ngfac
return tfine
def auto_ng(corr, t_max, ngfac=2, meth=get_mid_point_weights_times, tol=1e-3, diff_method='full', dm_random_samples=10**4):
r"""increase the number of gridpoints until the desired accuracy is met
@ -387,37 +397,27 @@ def auto_ng(corr, t_max, ngfac=2, meth=get_mid_point_weights_times, tol=1e-3, di
_eig_val, _eig_vec = solve_hom_fredholm(r, w) # using integration weights w
time_fredholm += (time.time() - t0)
tfine, dt = np.linspace(0, t_max, (ng - 1) * ngfac + 1, retstep=True) # setup fine
tsfine = tfine[:-1] + dt/2 # and super fine time grid
t0 = time.time() # efficient way to calculate the auto correlation
alpha_k = _calc_corr_min_t_plus_t(tfine, corr) # from -tmax untill tmax on the fine grid
time_calc_ac += (time.time() - t0) # needed for integral interpolation
tfine = subdevide_axis(t, ngfac) # setup fine
tsfine = subdevide_axis(tfine, 2) # and super fine time grid
if diff_method == 'full':
ng_sfine = len(tsfine)
alpha_ref = np.empty(shape=(ng_sfine, ng_sfine), dtype=np.complex128)
for i in range(ng_sfine):
idx = ng_sfine - i
alpha_ref[:, i] = alpha_k[idx:idx + ng_sfine] # note we can use alpha_k as
# alpha(ti+dt/2 - (tj+dt/2)) = alpha(ti - tj)
alpha_ref = corr(tsfine.reshape(-1,1) - tsfine.reshape(1,-1))
diff = -alpha_ref
old_md = np.inf
sqrt_lambda_ui_fine_all = []
for i in range(ng):
evec = _eig_vec[:, i]
if _eig_val[i] < 0:
print(ng, i)
break
sqrt_eval = np.sqrt(_eig_val[i])
if ngfac != 1:
t0 = time.time()
# when using sqrt_lambda instead of lambda we get sqrt_lamda time u
# which is the quantity needed for the stochastic process
# generation
sqrt_lambda_ui_fine = stocproc_c.eig_func_interp(delta_t_fac=ngfac,
time_axis=t,
alpha_k=alpha_k,
weights=w,
eigen_val=sqrt_eval,
eigen_vec=evec)
sqrt_lambda_ui_fine = np.asarray([np.sum(corr(ti - t) * w * evec) / sqrt_eval for ti in tfine])
time_integr_intp += (time.time() - t0)
else:
sqrt_lambda_ui_fine = evec*sqrt_eval

View file

@ -52,7 +52,7 @@ def test_weights(plot=False):
method_kle.get_simpson_weights_times,
method_kle.get_four_point_weights_times,
method_kle.get_gauss_legendre_weights_times,
method_kle.get_sinh_tanh_weights_times]
method_kle.get_tanh_sinh_weights_times]
cols = ['r', 'b', 'g', 'm', 'c']
errs = [2e-3, 2e-8, 7e-11, 5e-16, 8e-15]
I_exact = np.log(tm**2 + 1)/2
@ -370,6 +370,7 @@ def test_fredholm_eigvec_interpolation():
ng_ref = 3501
_meth_ref = method_kle.get_simpson_weights_times
_meth_ref = method_kle.get_mid_point_weights_times
t, w = _meth_ref(t_max, ng_ref)
try:
@ -395,10 +396,12 @@ def test_fredholm_eigvec_interpolation():
eigvec_ref.append(tools.ComplexInterpolatedUnivariateSpline(t, evec_ref[:, l]))
meth = [method_kle.get_mid_point_weights_times,
method_kle.get_trapezoidal_weights_times,
method_kle.get_simpson_weights_times,
method_kle.get_four_point_weights_times,
method_kle.get_gauss_legendre_weights_times]
cols = ['r', 'b', 'g', 'm', 'c']
method_kle.get_gauss_legendre_weights_times,
method_kle.get_tanh_sinh_weights_times]
cols = ['r', 'b', 'g', 'm', 'c', 'lime']
def my_intp(ti, corr, w, t, u, lam):
return np.sum(corr(ti - t) * w * u) / lam
@ -406,7 +409,7 @@ def test_fredholm_eigvec_interpolation():
fig, ax = plt.subplots(ncols=2, nrows=2, sharex=True, sharey=True, figsize=(16,12))
ax = ax.flatten()
ks = [20,40,80,160]
ks = [10,20,40,80]
lns, lbs = [], []
@ -451,6 +454,7 @@ def test_fredholm_eigvec_interpolation():
axc.set_title("ng {}".format(ng))
axc.set_xlim([0,100])
axc.grid()
axc.legend()
fig.legend(lines, labels, loc = "lower right", ncol=3)
@ -497,6 +501,72 @@ def test_cython_interpolation():
eigen_vec = evec)
assert np.max(np.abs(ui_fine - ui_fine2)) < 2e-11
def test_reconstr_ac_interp():
t_max = 25
corr = lac
#corr = oac
meth = [method_kle.get_mid_point_weights_times,
method_kle.get_trapezoidal_weights_times,
method_kle.get_simpson_weights_times,
method_kle.get_four_point_weights_times,
method_kle.get_gauss_legendre_weights_times,
method_kle.get_tanh_sinh_weights_times]
cols = ['r', 'b', 'g', 'm', 'c']
def my_intp(ti, corr, w, t, u, lam):
return np.sum(corr(ti - t) * w * u) / lam
fig, ax = plt.subplots(figsize=(16, 12))
ks = [40]
for i, k in enumerate(ks):
axc = ax
ng = 4 * k + 1
for i, _meth in enumerate(meth):
t, w = _meth(t_max, ng)
r = corr(t.reshape(-1, 1) - t.reshape(1, -1))
_eig_val, _eig_vec = method_kle.solve_hom_fredholm(r, w)
tf = method_kle.subdevide_axis(t, ngfac=3)
tsf = method_kle.subdevide_axis(tf, ngfac=2)
diff1 = - corr(t.reshape(-1,1) - t.reshape(1,-1))
diff2 = - corr(tf.reshape(-1, 1) - tf.reshape(1, -1))
diff3 = - corr(tsf.reshape(-1, 1) - tsf.reshape(1, -1))
xdata = np.arange(ng)
ydata1 = np.ones(ng)
ydata2 = np.ones(ng)
ydata3 = np.ones(ng)
for idx in xdata:
evec = _eig_vec[:, idx]
if _eig_val[idx] < 0:
break
sqrt_eval = np.sqrt(_eig_val[idx])
uip = np.asarray([my_intp(ti, corr, w, t, evec, sqrt_eval) for ti in tf])
uip_spl = tools.ComplexInterpolatedUnivariateSpline(tf, uip)
uip_sf = uip_spl(tsf)
diff1 += _eig_val[idx] * evec.reshape(-1, 1) * np.conj(evec.reshape(1, -1))
diff2 += uip.reshape(-1, 1) * np.conj(uip.reshape(1, -1))
diff3 += uip_sf.reshape(-1,1) * np.conj(uip_sf.reshape(1,-1))
ydata1[idx] = np.max(np.abs(diff1))
ydata2[idx] = np.max(np.abs(diff2))
ydata3[idx] = np.max(np.abs(diff3))
p, = axc.plot(xdata, ydata1, label=_meth.__name__, alpha = 0.5)
axc.plot(xdata, ydata2, color=p.get_color(), ls='--')
axc.plot(xdata, ydata3, color=p.get_color(), lw=2)
axc.set_yscale('log')
axc.set_title("ng: {}".format(ng))
axc.grid()
axc.legend()
plt.show()
def test_reconstr_ac():
t_max = 15
res = method_kle.auto_ng(corr=oac,
@ -643,9 +713,38 @@ def test_solve_fredholm_interp_eigenfunc(plot=False):
plt.show()
def test_subdevide_axis():
t = [0, 1, 3]
tf1 = method_kle.subdevide_axis(t, ngfac=1)
assert np.max(np.abs(tf1 - np.asarray([0, 1, 3]))) < 1e-15
tf2 = method_kle.subdevide_axis(t, ngfac=2)
assert np.max(np.abs(tf2 - np.asarray([0, 0.5, 1, 2, 3]))) < 1e-15
tf3 = method_kle.subdevide_axis(t, ngfac=3)
assert np.max(np.abs(tf3 - np.asarray([0, 1 / 3, 2 / 3, 1, 1 + 2 / 3, 1 + 4 / 3, 3]))) < 1e-15
tf4 = method_kle.subdevide_axis(t, ngfac=4)
assert np.max(np.abs(tf4 - np.asarray([0, 0.25, 0.5, 0.75, 1, 1.5, 2, 2.5, 3]))) < 1e-15
def test_auto_ng():
corr = oac
t_max = 8
ng_fac = 1
meth = [method_kle.get_mid_point_weights_times,
method_kle.get_trapezoidal_weights_times,
method_kle.get_simpson_weights_times,
method_kle.get_four_point_weights_times,
method_kle.get_gauss_legendre_weights_times]
#method_kle.get_tanh_sinh_weights_times]
for _meth in meth:
ui = method_kle.auto_ng(corr, t_max, ngfac=ng_fac, meth = _meth)
print(_meth.__name__, ui.shape)
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
logging.basicConfig(level=logging.INFO)
# test_weights(plot=True)
# test_subdevide_axis()
# test_solve_fredholm()
# test_compare_weights_in_solve_fredholm_oac()
# test_compare_weights_in_solve_fredholm_lac()
@ -656,4 +755,6 @@ if __name__ == "__main__":
# test_solve_fredholm()
# test_solve_fredholm_reconstr_ac()
# test_solve_fredholm_interp_eigenfunc(plot=True)
# test_reconstr_ac_interp()
# test_auto_ng()
pass