PRE-RESULT: input field at A site + EOM stepping

This commit is contained in:
Valentin Boettcher 2024-08-20 15:32:18 -04:00
parent 2c865dfc0f
commit 1c7b259b86
4 changed files with 191 additions and 136 deletions

View file

@ -11,6 +11,62 @@ from scipy.ndimage import rotate
# %% interactive
class WelfordAggregator:
"""A class to aggregate values using the Welford algorithm.
The Welford algorithm is an online algorithm to calculate the mean
and variance of a series of values.
The aggregator keeps track of the number of samples the mean and
the variance. Aggregation of identical values is prevented by
checking the sample index. Tracking can be disabled by setting
the initial index to ``None``.
See also the `Wikipedia article
<https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Online_algorithm>`_.
:param first_value: The first value to aggregate.
"""
__slots__ = ["n", "mean", "_m_2"]
def __init__(self, first_value: np.ndarray):
self.n = 1
self.mean = first_value
self._m_2 = np.zeros_like(first_value)
def update(self, new_value: np.ndarray):
"""Updates the aggregator with a new value."""
self.n += 1
delta = new_value - self.mean
self.mean += delta / self.n
delta2 = new_value - self.mean
self._m_2 += np.abs(delta) * np.abs(delta2)
@property
def sample_variance(self) -> np.ndarray:
"""
The empirical sample variance. (:math:`\sqrt{N-1}`
normalization.)
"""
if self.n == 1:
return np.zeros_like(self.mean)
return self._m_2 / (self.n - 1)
@property
def ensemble_variance(self) -> np.ndarray:
"""The ensemble variance."""
return self.sample_variance / self.n
@property
def ensemble_std(self) -> np.ndarray:
"""The ensemble standard deviation."""
return np.sqrt(self.ensemble_variance)
def solve_shot(t, params, t_before, t_after):
solution = solve(t, params)
amps = solution.y[::, len(t_before) - 1 :]
@ -18,7 +74,9 @@ def solve_shot(t, params, t_before, t_after):
return t_after, amps
def make_shots(params, total_lifetimes, eom_range, eom_steps, σ_modulation_time):
def make_shots(
params, total_lifetimes, eom_range, eom_steps, σ_modulation_time, num_freq
):
solutions = []
analyze_time = params.lifetimes(total_lifetimes) - params.laser_off_time
@ -30,15 +88,13 @@ def make_shots(params, total_lifetimes, eom_range, eom_steps, σ_modulation_time
rng = np.random.default_rng(seed=0)
for step in range(eom_steps):
base = params.Ω * (
eom_range[0] + (eom_range[1] - eom_range[0]) * step / eom_steps
)
current_params = copy.deepcopy(params)
current_params.drive_override = (
np.array(
[
params.Ω
* (eom_range[0] + (eom_range[1] - eom_range[0]) * step / eom_steps)
]
),
np.array([1.0]),
base + params.Ω * np.arange(num_freq),
np.ones(num_freq),
)
off_time = rng.normal(
@ -69,6 +125,8 @@ def process_shots(solutions, noise_amplitude, params):
signals.append((t, signal))
noise_amplitude *= 2 * 2 * np.pi / params.η
aggregate = None
for t, signal in signals:
signal += rng.normal(scale=noise_amplitude, size=len(signal))
window = (0, t[-1])
@ -76,51 +134,55 @@ def process_shots(solutions, noise_amplitude, params):
freq, fft = fourier_transform(
t,
signal,
low_cutoff=0.5 * params.Ω,
high_cutoff=params.Ω * 2.5,
low_cutoff=0.1 * params.Ω,
high_cutoff=params.Ω * 5,
window=window,
)
power = np.abs(fft) ** 2
power = power / power.max()
if average_power_spectrum is None:
average_power_spectrum = power
# ugly hack because shape is hard to predict
if aggregate is None:
aggregate = WelfordAggregator(power)
else:
average_power_spectrum += power
aggregate.update(power)
power = average_power_spectrum / len(solutions)
power -= np.median(power)
power /= power.max()
return (freq, power)
max_power = np.max(aggregate.mean)
return (freq, aggregate.mean / max_power, aggregate.ensemble_std / max_power)
def plot_power_spectrum(
ax_spectrum, freq, average_power_spectrum, params, annotate=True
ax_spectrum, freq, average_power_spectrum, σ_power_spectrum, params, annotate=True
):
# ax_spectrum.plot(freq, average_power_spectrum)
runtime = RuntimeParams(params)
ringdown_params = RingdownParams(
fω_shift=params.measurement_detuning,
mode_window=(3, 3),
mode_window=(4, 4),
fΩ_guess=params.Ω,
fδ_guess=params.Ω * params.δ,
η_guess=0.5,
absolute_low_cutoff=8,
absolute_low_cutoff=0.1 * params.Ω,
)
peak_info = find_peaks(
freq, average_power_spectrum, ringdown_params, prominence=0.1 / 2
freq, average_power_spectrum, ringdown_params, prominence=0.1, height=0.5
)
peak_info, lm_result = refine_peaks(peak_info, ringdown_params, height_cutoff=0.05)
peak_info, lm_result = refine_peaks(
peak_info, ringdown_params, height_cutoff=0.05, σ=σ_power_spectrum
)
print(lm_result.fit_report())
peak_info.power = average_power_spectrum
plot_spectrum_and_peak_info(
ax_spectrum, peak_info, ringdown_params, annotate=annotate
)
if lm_result is not None:
ax_spectrum.plot(freq, lm_result.best_fit, color="red")
fine_freq = np.linspace(freq.min(), freq.max(), 5000)
fine_fit = lm_result.eval(ω=fine_freq)
ax_spectrum.plot(fine_freq, fine_fit, color="red")
ax_spectrum.set_ylim(-0.1, max(1, fine_fit.max() * 1.1))
for i, peak_freq in enumerate(runtime.Ωs):
pos = np.abs(
@ -149,7 +211,7 @@ def plot_power_spectrum(
def generate_data(
g_0=0.3,
g_0=0.5,
η_factor=5,
noise_amplitude=0.3,
laser_detuning=0,
@ -160,6 +222,7 @@ def generate_data(
excitation_lifetimes=2,
measurement_lifetimes=4,
σ_modulation_time=0.01,
num_freq=3,
):
η = 0.2
Ω = 13
@ -171,7 +234,7 @@ def generate_data(
δ=1 / 4,
ω_c=0.1,
g_0=g_0,
laser_detuning=13 * (-1 - 1 / 4) + laser_detuning,
laser_detuning=laser_detuning, # 13 * (-1 - 1 / 4) + laser_detuning,
N=N,
N_couplings=N,
measurement_detuning=0,
@ -193,27 +256,34 @@ def generate_data(
eom_ranges,
eom_steps,
σ_modulation_time,
num_freq,
)
(sol_on_res) = make_shots(
params,
excitation_lifetimes + measurement_lifetimes,
((1 + params.δ), (1 + params.δ)),
((1 - params.δ), (1 - params.δ)),
1,
0,
num_freq,
)
(sol_on_res_bath) = make_shots(
params,
excitation_lifetimes + measurement_lifetimes,
((1), (1)),
((1 - params.δ * 1.1), (1 - params.δ * 1.1)),
1,
0,
num_freq,
)
freq, average_power_spectrum = process_shots(solutions, noise_amplitude, params)
_, spectrum_on_resonance = process_shots(sol_on_res, noise_amplitude, params)
_, spectrum_on_resonance_bath = process_shots(
freq, average_power_spectrum, σ_power_spectrum = process_shots(
solutions, noise_amplitude, params
)
_, spectrum_on_resonance, σ_power_spectrum_on_resonance = process_shots(
sol_on_res, noise_amplitude, params
)
_, spectrum_on_resonance_bath, σ_power_spectrum_on_resonance_bath = process_shots(
sol_on_res_bath, noise_amplitude, params
)
@ -225,13 +295,28 @@ def generate_data(
Ω/2π = {params.Ω}MHz, η/2π = {params.η}MHz, g_0 = {params.g_0}Ω, N = {params.N}
noise amplitude = {noise_amplitude} * 2/η, η_A = {η_factor} x η, EOM stepped from {eom_ranges[0]:.2f}Ω to {eom_ranges[1]:.2f}Ω in {eom_steps} steps
total time = {(excitation_lifetimes + measurement_lifetimes) * eom_steps} / η
""")
ax_multi, ax_single, ax_single_bath = fig.subplot_mosaic("AA\nBC").values()
plot_power_spectrum(ax_multi, freq, average_power_spectrum, params)
plot_power_spectrum(ax_single, freq, spectrum_on_resonance, params, annotate=False)
plot_power_spectrum(
ax_single_bath, freq, spectrum_on_resonance_bath, params, annotate=False
ax_multi, freq, average_power_spectrum, σ_power_spectrum, params
)
plot_power_spectrum(
ax_single,
freq,
spectrum_on_resonance,
σ_power_spectrum_on_resonance,
params,
annotate=False,
)
plot_power_spectrum(
ax_single_bath,
freq,
spectrum_on_resonance_bath,
σ_power_spectrum_on_resonance_bath,
params,
annotate=False,
)
runtime = RuntimeParams(params)
@ -249,9 +334,9 @@ def generate_data(
ax_ticks.plot(freq, np.zeros_like(freq), alpha=0)
ax_ticks.set_xlim(ax.get_xlim())
ax_multi.set_title("Averaged Power Spectrum (sans noise offset)")
ax_single.set_title("Single-shot, EOM on A site")
ax_single_bath.set_title("Single-shot, EOM on bath mode")
ax_multi.set_title("Averaged Power Spectrum")
ax_single.set_title("Single-shot, No detuning")
ax_single_bath.set_title("Single-shot, EOM 10% detuned")
# ax_spectrum.set_yscale(yscale)
@ -262,15 +347,16 @@ def generate_data(
# %% save
if __name__ == "__main__":
fig = generate_data(
g_0=0.5,
g_0=0.6,
η_factor=5,
noise_amplitude=2e-4,
N=2,
eom_ranges=(1.1, 1.35),
noise_amplitude=8e-3,
N=4,
eom_ranges=(0.7, 0.9),
eom_steps=100,
small_loop_detuning=0,
laser_detuning=0,
excitation_lifetimes=2,
measurement_lifetimes=30,
excitation_lifetimes=1,
measurement_lifetimes=4,
σ_modulation_time=0.2,
num_freq=4,
)

View file

@ -52,6 +52,14 @@ def lorentzian(ω, A, ω0, γ):
return A * (γ / 2) ** 2 * (1 / ((ω - ω0) ** 2 + (γ / 2) ** 2))
def one_over_freq_noise(ω, A, γ):
"""A lorentzian with center frequency 0.
See :any:`lorentzian` for the parameters.
"""
return lorentzian(ω, A, 0, γ)
def complex_lorentzian(ω, A, ω0, γ):
"""A Lorentzian function with amplitude ``A``, center frequency
``ω0``, and decay rate ``γ`` and offset ``offset``.
@ -149,6 +157,7 @@ def find_peaks(
power_spectrum: np.ndarray,
params: RingdownParams,
prominence: float = 0.005,
height: float = 0.1,
) -> RingdownPeakData:
"""Determine the peaks of the power spectrum of the
ringdown data.
@ -158,16 +167,24 @@ def find_peaks(
:param params: The ringdown parameters, see :any:`RingdownParams`.
:param prominence: The prominence (vertical distance of peak from
surrounding valleys) of the peaks.
:param height: The minimum height of the peaks.
"""
freq_step = freq[1] - freq[0]
distance = params.fδ_guess / 2 / freq_step
if distance < 1:
raise ValueError("Insufficient frequency resolution.")
normalized = power_spectrum - np.median(power_spectrum)
normalized /= normalized.max()
peaks, peak_info = scipy.signal.find_peaks(
power_spectrum / power_spectrum.max(),
normalized,
distance=distance,
wlen=distance // 4,
wlen=distance,
prominence=prominence,
height=height,
)
peak_freqs = freq[peaks]
@ -201,7 +218,7 @@ def filter_peaks(
i in to_be_deleted
or Δω0 > uncertainty_threshold * params.fΩ_guess
or A < height_cutoff
or A > 1
or A > 10
or Δγ > uncertainty_threshold * params.fΩ_guess
):
np.delete(peaks.peaks, i)
@ -226,6 +243,7 @@ def refine_peaks(
params: RingdownParams,
uncertainty_threshold: float = 0.2,
height_cutoff: float = 0.1,
σ: np.ndarray | None = None,
):
"""
Refine the peak positions and frequencies by fitting Lorentzians.
@ -242,98 +260,21 @@ def refine_peaks(
peak_freqs = peaks.peak_freqs
power = peaks.power
new_freqs = []
new_widths = []
Δfreqs = []
Δwidths = []
lorentz_params = []
window = params.η_guess * 1
deleted_peaks = []
for i, peak_freq in enumerate(peak_freqs):
mask = (freqs > peak_freq - window) & (freqs < peak_freq + window)
windowed_freqs = freqs[mask]
windowed_power = power[mask]
scale_freqs = windowed_freqs[-1] - windowed_freqs[0]
root_freq = windowed_freqs[0]
scale_power = windowed_power.max()
windowed_freqs -= root_freq
windowed_freqs /= scale_freqs
windowed_power /= scale_power
p0 = [
peaks.power[peaks.peaks[i]] / scale_power,
(peak_freq - root_freq) / scale_freqs,
params.η_guess / scale_freqs,
]
bounds = (
[0, windowed_freqs[0], 0],
[np.inf, windowed_freqs[-1], np.inf],
)
try:
scipy_popt, pcov = scipy.optimize.curve_fit(
lorentzian,
windowed_freqs,
windowed_power,
p0=p0,
bounds=bounds,
)
perr = np.sqrt(np.diag(pcov))
if (
perr[1] * scale_freqs > uncertainty_threshold * params.fΩ_guess
or scipy_popt[0] * scale_power / np.max(power) < height_cutoff
):
deleted_peaks.append(i)
continue
scipy_popt[0] = scipy_popt[0] * scale_power
scipy_popt[1] = scipy_popt[1] * scale_freqs + root_freq
scipy_popt[2] = scipy_popt[2] * scale_freqs
lorentz_params.append(scipy_popt)
if perr[1] > uncertainty_threshold * params.fΩ_guess:
deleted_peaks.append(i)
continue
new_freqs.append(scipy_popt[1])
Δfreqs.append(perr[1])
new_widths.append(scipy_popt[2])
Δwidths.append(perr[2])
except:
deleted_peaks.append(i)
peaks.peaks = np.delete(peaks.peaks, deleted_peaks)
for key, value in peaks.peak_info.items():
if isinstance(value, np.ndarray):
peaks.peak_info[key] = np.delete(value, deleted_peaks)
peaks.peak_freqs = np.array(new_freqs)
peaks.Δpeak_freqs = np.array(Δfreqs)
peaks.peak_widths = np.array(new_widths)
peaks.Δpeak_widths = np.array(Δwidths)
peaks.lorentz_params = lorentz_params
total_model = None
model_params = []
global_power_scale = 1 # power.max()
scaled_power = power / global_power_scale
scaled_power = power
for i, (A, ω0, γ) in enumerate(lorentz_params):
if σ is None:
σ = np.zeros_like(power)
for i, (A, ω0) in enumerate(zip(peaks.peak_info["peak_heights"], peak_freqs)):
model = lmfit.Model(lorentzian, prefix=f"peak_{i}_")
initial_params = model.make_params(
A=dict(value=A / global_power_scale, min=0, max=np.inf),
A=dict(value=A, min=0, max=np.inf),
ω0=dict(value=ω0, min=0, max=np.inf),
γ=dict(value=γ, min=0, max=np.inf),
γ=dict(value=params.η_guess, min=0, max=np.inf),
)
if total_model is None:
@ -343,6 +284,17 @@ def refine_peaks(
model_params.append(initial_params)
model = lmfit.Model(one_over_freq_noise, prefix=f"zero_peak")
initial_params = model.make_params(
A=dict(value=1, min=0, max=np.inf),
ω0=dict(value=0, min=0, max=np.inf),
γ=dict(value=params.η_guess, min=0, max=np.inf),
)
total_model += model
model_params.append(initial_params)
aggregate_params = total_model.make_params()
for lm_params in model_params:
aggregate_params.update(lm_params)
@ -351,7 +303,19 @@ def refine_peaks(
aggregate_params.update(offset_model.make_params(offset=0, min=0, max=1))
total_model += offset_model
lm_result = total_model.fit(scaled_power, params=aggregate_params, ω=freqs)
lm_result = total_model.fit(
scaled_power,
params=aggregate_params,
ω=freqs,
weights=1 / σ if np.all(σ > 0) else None,
)
peaks.peak_freqs = np.zeros_like(peaks.peak_freqs)
peaks.Δpeak_freqs = np.zeros_like(peaks.peak_freqs)
peaks.peak_widths = np.zeros_like(peaks.peak_freqs)
peaks.Δpeak_widths = np.zeros_like(peaks.peak_freqs)
peaks.lorentz_params = [None] * len(peaks.peak_freqs)
for i in reversed(range(len(peaks.peak_freqs))):
peak_prefix = f"peak_{i}_"
@ -376,7 +340,12 @@ def refine_peaks(
peaks.lorentz_params[i] = A, ω0, γ
before_filter = len(peaks.peaks)
peaks = filter_peaks(peaks, params, uncertainty_threshold, height_cutoff)
if len(peaks.peaks) < before_filter:
return refine_peaks(peaks, params, uncertainty_threshold, height_cutoff)
return peaks, lm_result

View file

@ -251,7 +251,7 @@ def plot_spectrum_and_peak_info(
t = ax.text(
freq,
lorentz[0],
ax.get_ylim()[1] * 0.9,
rf"{i} (${roundfreq}\pm {rounderr}$)",
fontsize=20,
)

View file

@ -177,10 +177,10 @@ class RuntimeParams:
a_shift = 0
self.detunings *= 0
norm = np.sqrt(np.sum(self.g**2))
norm = np.sqrt(np.sum(abs(self.g) ** 2))
if norm > 0:
self.g *= params.g_0
if norm > 0:
self.g *= params.g_0 / norm
self.g *= 2 * np.pi
self.Ωs = Ωs