mirror of
https://github.com/vale981/fibre_walk_project_code
synced 2025-03-04 09:21:38 -05:00
try global lmfit
randomize phase peak finding works, but is suboptimal
This commit is contained in:
parent
4b0dee16ae
commit
392a560f00
7 changed files with 988 additions and 597 deletions
1124
poetry.lock
generated
1124
poetry.lock
generated
File diff suppressed because it is too large
Load diff
|
@ -12,6 +12,7 @@ click = "^8.1.7"
|
||||||
matplotlib = "^3.8.4"
|
matplotlib = "^3.8.4"
|
||||||
networkx = "^3.3"
|
networkx = "^3.3"
|
||||||
pyyaml = "^6.0.1"
|
pyyaml = "^6.0.1"
|
||||||
|
lmfit = "^1.3.2"
|
||||||
|
|
||||||
|
|
||||||
[tool.poetry.group.dev.dependencies]
|
[tool.poetry.group.dev.dependencies]
|
||||||
|
|
|
@ -11,13 +11,24 @@ from scipy.ndimage import rotate
|
||||||
# %% interactive
|
# %% interactive
|
||||||
|
|
||||||
|
|
||||||
def make_shots(params, total_lifetimes, eom_range, eom_steps):
|
def solve_shot(t, params, t_before, t_after):
|
||||||
|
solution = solve(t, params)
|
||||||
|
amps = solution.y[::, len(t_before) - 1 :]
|
||||||
|
|
||||||
|
return t_after, amps
|
||||||
|
|
||||||
|
|
||||||
|
def make_shots(params, total_lifetimes, eom_range, eom_steps, σ_modulation_time):
|
||||||
solutions = []
|
solutions = []
|
||||||
t = time_axis(params, lifetimes=total_lifetimes, resolution=0.01)
|
|
||||||
|
analyze_time = params.lifetimes(total_lifetimes) - params.laser_off_time
|
||||||
|
t_after = time_axis(params, total_time=analyze_time, resolution=0.01)
|
||||||
|
|
||||||
pool = multiprocessing.Pool()
|
pool = multiprocessing.Pool()
|
||||||
|
|
||||||
shot_params = []
|
shot_params = []
|
||||||
|
rng = np.random.default_rng(seed=0)
|
||||||
|
|
||||||
for step in range(eom_steps):
|
for step in range(eom_steps):
|
||||||
current_params = copy.deepcopy(params)
|
current_params = copy.deepcopy(params)
|
||||||
current_params.drive_override = (
|
current_params.drive_override = (
|
||||||
|
@ -30,102 +41,111 @@ def make_shots(params, total_lifetimes, eom_range, eom_steps):
|
||||||
np.array([1.0]),
|
np.array([1.0]),
|
||||||
)
|
)
|
||||||
|
|
||||||
shot_params.append((t, current_params))
|
off_time = rng.normal(
|
||||||
|
params.laser_off_time, σ_modulation_time * params.lifetimes(1)
|
||||||
|
)
|
||||||
|
current_params.laser_off_time = off_time
|
||||||
|
current_params.drive_off_time = off_time
|
||||||
|
current_params.total_lifetimes = (off_time + analyze_time) / params.lifetimes(1)
|
||||||
|
|
||||||
solutions = pool.starmap(solve, shot_params)
|
t_before = time_axis(params, total_time=off_time, resolution=0.01)
|
||||||
return t, solutions
|
t = np.concatenate([t_before[:-1], t_after + t_before[-1]])
|
||||||
|
|
||||||
|
shot_params.append((t, current_params, t_before, t_after))
|
||||||
|
|
||||||
|
solutions = pool.starmap(solve_shot, shot_params)
|
||||||
|
return solutions
|
||||||
|
|
||||||
|
|
||||||
def process_shots(t, solutions, noise_amplitude, params):
|
def process_shots(solutions, noise_amplitude, params):
|
||||||
average_power_spectrum = None
|
average_power_spectrum = None
|
||||||
window = (float(params.laser_off_time) or 0, t[-1])
|
|
||||||
|
|
||||||
rng = np.random.default_rng(seed=0)
|
rng = np.random.default_rng(seed=0)
|
||||||
|
|
||||||
# let us get a measure calibrate the noise strength
|
# let us get a measure calibrate the noise strength
|
||||||
signal_amp = 0
|
|
||||||
signals = []
|
signals = []
|
||||||
for solution in solutions:
|
for t, amps in solutions:
|
||||||
signal = output_signal(t, solution.y, params)
|
signal = output_signal(t, amps, params)
|
||||||
signals.append(signal)
|
signals.append((t, signal))
|
||||||
signal_amp += abs(signal).max()
|
|
||||||
|
|
||||||
signal_amp /= len(solutions)
|
noise_amplitude *= 2 * 2 * np.pi / params.η
|
||||||
|
for t, signal in signals:
|
||||||
for signal in signals:
|
signal += rng.normal(scale=noise_amplitude, size=len(signal))
|
||||||
signal += rng.normal(scale=noise_amplitude * signal_amp, size=len(signal))
|
window = (0, t[-1])
|
||||||
|
|
||||||
freq, fft = fourier_transform(
|
freq, fft = fourier_transform(
|
||||||
t,
|
t,
|
||||||
signal,
|
signal,
|
||||||
low_cutoff=0.5 * params.Ω,
|
low_cutoff=0.5 * params.Ω,
|
||||||
high_cutoff=params.Ω * (params.N + 1),
|
high_cutoff=params.Ω * 2.5,
|
||||||
window=window,
|
window=window,
|
||||||
)
|
)
|
||||||
|
|
||||||
power = np.abs(fft) ** 2
|
power = np.abs(fft) ** 2
|
||||||
|
power = power / power.max()
|
||||||
if average_power_spectrum is None:
|
if average_power_spectrum is None:
|
||||||
average_power_spectrum = power
|
average_power_spectrum = power
|
||||||
|
|
||||||
else:
|
else:
|
||||||
average_power_spectrum += power
|
average_power_spectrum += power
|
||||||
|
|
||||||
return freq, (average_power_spectrum / len(solutions))
|
power = average_power_spectrum / len(solutions)
|
||||||
|
power -= np.median(power)
|
||||||
|
power /= power.max()
|
||||||
|
|
||||||
|
return (freq, power)
|
||||||
|
|
||||||
|
|
||||||
def plot_power_spectrum(ax_spectrum, freq, average_power_spectrum, params):
|
def plot_power_spectrum(
|
||||||
offset = np.median(average_power_spectrum)
|
ax_spectrum, freq, average_power_spectrum, params, annotate=True
|
||||||
|
):
|
||||||
# ax_spectrum.plot(freq, average_power_spectrum)
|
# ax_spectrum.plot(freq, average_power_spectrum)
|
||||||
# runtime = RuntimeParams(params)
|
runtime = RuntimeParams(params)
|
||||||
# lorentz_freqs = np.linspace(freq.min(), freq.max(), 10000)
|
|
||||||
# fake_spectrum = np.zeros_like(lorentz_freqs)
|
|
||||||
|
|
||||||
# for i, peak_freq in enumerate(runtime.Ωs):
|
|
||||||
# pos = np.abs(
|
|
||||||
# params.measurement_detuning
|
|
||||||
# - peak_freq.real / (2 * np.pi)
|
|
||||||
# + params.δ * params.Ω
|
|
||||||
# + params.laser_detuning,
|
|
||||||
# )
|
|
||||||
|
|
||||||
# ax_spectrum.axvline(
|
|
||||||
# pos,
|
|
||||||
# color="red",
|
|
||||||
# linestyle="--",
|
|
||||||
# zorder=-100,
|
|
||||||
# )
|
|
||||||
|
|
||||||
# amplitude = average_power_spectrum[np.argmin(abs(freq - pos))] - offset
|
|
||||||
# ax_spectrum.annotate(
|
|
||||||
# mode_name(i),
|
|
||||||
# (
|
|
||||||
# pos + 0.1,
|
|
||||||
# (amplitude + offset) * (1 if peak_freq.real < 0 else 1.1),
|
|
||||||
# ),
|
|
||||||
# )
|
|
||||||
|
|
||||||
# fake_spectrum += lorentzian(
|
|
||||||
# lorentz_freqs, amplitude, pos, peak_freq.imag / np.pi
|
|
||||||
# )
|
|
||||||
|
|
||||||
# ax_spectrum.plot(lorentz_freqs, fake_spectrum + offset)
|
|
||||||
|
|
||||||
ringdown_params = RingdownParams(
|
ringdown_params = RingdownParams(
|
||||||
fω_shift=params.measurement_detuning,
|
fω_shift=params.measurement_detuning,
|
||||||
mode_window=(5, 5),
|
mode_window=(3, 3),
|
||||||
fΩ_guess=params.Ω,
|
fΩ_guess=params.Ω,
|
||||||
fδ_guess=params.Ω * params.δ,
|
fδ_guess=params.Ω * params.δ,
|
||||||
η_guess=0.5,
|
η_guess=0.5,
|
||||||
absolute_low_cutoff=1,
|
absolute_low_cutoff=8,
|
||||||
)
|
)
|
||||||
|
|
||||||
peak_info = find_peaks(
|
peak_info = find_peaks(
|
||||||
freq, average_power_spectrum, ringdown_params, prominence=0.001
|
freq, average_power_spectrum, ringdown_params, prominence=0.1
|
||||||
)
|
)
|
||||||
peak_info = refine_peaks(peak_info, ringdown_params)
|
peak_info, lm_result = refine_peaks(peak_info, ringdown_params, height_cutoff=0.05)
|
||||||
peak_info.power = average_power_spectrum
|
peak_info.power = average_power_spectrum
|
||||||
plot_spectrum_and_peak_info(ax_spectrum, peak_info, ringdown_params, annotate=True)
|
plot_spectrum_and_peak_info(
|
||||||
print(peak_info.peak_freqs)
|
ax_spectrum, peak_info, ringdown_params, annotate=annotate
|
||||||
|
)
|
||||||
|
if lm_result is not None:
|
||||||
|
ax_spectrum.plot(freq, lm_result.best_fit, color="red")
|
||||||
|
|
||||||
|
for i, peak_freq in enumerate(runtime.Ωs):
|
||||||
|
pos = np.abs(
|
||||||
|
params.measurement_detuning
|
||||||
|
- peak_freq.real / (2 * np.pi)
|
||||||
|
+ params.δ * params.Ω
|
||||||
|
+ params.laser_detuning,
|
||||||
|
)
|
||||||
|
|
||||||
|
ax_spectrum.axvline(
|
||||||
|
pos,
|
||||||
|
color="black",
|
||||||
|
alpha=0.5,
|
||||||
|
linestyle="--",
|
||||||
|
zorder=-100,
|
||||||
|
)
|
||||||
|
|
||||||
|
ax_spectrum.axvspan(
|
||||||
|
pos - peak_freq.imag / (2 * np.pi),
|
||||||
|
pos + peak_freq.imag / (2 * np.pi),
|
||||||
|
color="black",
|
||||||
|
alpha=0.05,
|
||||||
|
linestyle="--",
|
||||||
|
zorder=-100,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def generate_data(
|
def generate_data(
|
||||||
|
@ -139,6 +159,7 @@ def generate_data(
|
||||||
small_loop_detuning=0,
|
small_loop_detuning=0,
|
||||||
excitation_lifetimes=2,
|
excitation_lifetimes=2,
|
||||||
measurement_lifetimes=4,
|
measurement_lifetimes=4,
|
||||||
|
σ_modulation_time=0.01,
|
||||||
):
|
):
|
||||||
η = 0.2
|
η = 0.2
|
||||||
Ω = 13
|
Ω = 13
|
||||||
|
@ -166,40 +187,75 @@ def generate_data(
|
||||||
params.laser_off_time = params.lifetimes(excitation_lifetimes)
|
params.laser_off_time = params.lifetimes(excitation_lifetimes)
|
||||||
params.drive_off_time = params.lifetimes(excitation_lifetimes)
|
params.drive_off_time = params.lifetimes(excitation_lifetimes)
|
||||||
|
|
||||||
t, solutions = make_shots(
|
solutions = make_shots(
|
||||||
params, excitation_lifetimes + measurement_lifetimes, eom_ranges, eom_steps
|
params,
|
||||||
|
excitation_lifetimes + measurement_lifetimes,
|
||||||
|
eom_ranges,
|
||||||
|
eom_steps,
|
||||||
|
σ_modulation_time,
|
||||||
)
|
)
|
||||||
|
|
||||||
_, (sol_on_res) = make_shots(
|
(sol_on_res) = make_shots(
|
||||||
params,
|
params,
|
||||||
excitation_lifetimes + measurement_lifetimes,
|
excitation_lifetimes + measurement_lifetimes,
|
||||||
((1 + params.δ), (1 + params.δ)),
|
((1 + params.δ), (1 + params.δ)),
|
||||||
1,
|
1,
|
||||||
|
0,
|
||||||
)
|
)
|
||||||
|
|
||||||
freq, average_power_spectrum = process_shots(t, solutions, noise_amplitude, params)
|
(sol_on_res_bath) = make_shots(
|
||||||
_, spectrum_on_resonance = process_shots(t, sol_on_res, noise_amplitude, params)
|
params,
|
||||||
|
excitation_lifetimes + measurement_lifetimes,
|
||||||
|
((1), (1)),
|
||||||
|
1,
|
||||||
|
0,
|
||||||
|
)
|
||||||
|
|
||||||
|
freq, average_power_spectrum = process_shots(solutions, noise_amplitude, params)
|
||||||
|
_, spectrum_on_resonance = process_shots(sol_on_res, noise_amplitude, params)
|
||||||
|
_, spectrum_on_resonance_bath = process_shots(
|
||||||
|
sol_on_res_bath, noise_amplitude, params
|
||||||
|
)
|
||||||
|
|
||||||
fig = make_figure()
|
fig = make_figure()
|
||||||
fig.clear()
|
fig.clear()
|
||||||
|
|
||||||
ax_multi, ax_single = fig.subplot_mosaic("AA\nBB").values()
|
fig.suptitle(f"""
|
||||||
ax_multi.set_title("Averaged Power Spectrum")
|
Spectroscopy Protocol V2
|
||||||
ax_single.set_title("Single-shot Power-Spectrum with EOM on resonance")
|
|
||||||
|
|
||||||
for ax in [ax_multi, ax_single]:
|
Ω/2π = {params.Ω}MHz, η/2π = {params.η}MHz, g_0 = {params.g_0}Ω, N = {params.N}
|
||||||
ax.set_xlabel("Frequency (MHz)")
|
noise amplitude = {noise_amplitude} * 2/η, η_A = {η_factor} x η, EOM stepped from {eom_ranges[0]:.2f}Ω to {eom_ranges[1]:.2f}Ω in {eom_steps} steps
|
||||||
ax.set_ylabel("Power")
|
""")
|
||||||
ax.set_yscale("log")
|
ax_multi, ax_single, ax_single_bath = fig.subplot_mosaic("AA\nBC").values()
|
||||||
|
|
||||||
plot_power_spectrum(ax_multi, freq, average_power_spectrum, params)
|
plot_power_spectrum(ax_multi, freq, average_power_spectrum, params)
|
||||||
plot_power_spectrum(ax_single, freq, spectrum_on_resonance, params)
|
plot_power_spectrum(ax_single, freq, spectrum_on_resonance, params, annotate=False)
|
||||||
|
plot_power_spectrum(
|
||||||
|
ax_single_bath, freq, spectrum_on_resonance_bath, params, annotate=False
|
||||||
|
)
|
||||||
|
|
||||||
runtime = RuntimeParams(params)
|
runtime = RuntimeParams(params)
|
||||||
print(runtime.Ωs.real / (2 * np.pi) - params.laser_detuning)
|
for ax in [ax_multi, ax_single, ax_single_bath]:
|
||||||
|
ax.set_xlabel("Frequency (MHz)")
|
||||||
|
ax.sharex(ax_multi)
|
||||||
|
ax.sharey(ax_multi)
|
||||||
|
|
||||||
|
ax_ticks = ax.twiny()
|
||||||
|
ax_ticks.sharey(ax)
|
||||||
|
ax_ticks.set_xticks(runtime.ringdown_frequencies)
|
||||||
|
ax_ticks.set_xticklabels(
|
||||||
|
[mode_name(i, params.N) for i in range(2 * params.N + 2)]
|
||||||
|
)
|
||||||
|
ax_ticks.plot(freq, np.zeros_like(freq), alpha=0)
|
||||||
|
ax_ticks.set_xlim(ax.get_xlim())
|
||||||
|
|
||||||
|
ax_multi.set_title("Averaged Power Spectrum (sans noise offset)")
|
||||||
|
ax_single.set_title("Single-shot, EOM on A site")
|
||||||
|
ax_single_bath.set_title("Single-shot, EOM on bath mode")
|
||||||
|
|
||||||
# ax_spectrum.set_yscale(yscale)
|
# ax_spectrum.set_yscale(yscale)
|
||||||
|
|
||||||
|
fig.tight_layout()
|
||||||
return fig
|
return fig
|
||||||
|
|
||||||
|
|
||||||
|
@ -208,12 +264,13 @@ if __name__ == "__main__":
|
||||||
fig = generate_data(
|
fig = generate_data(
|
||||||
g_0=1,
|
g_0=1,
|
||||||
η_factor=5,
|
η_factor=5,
|
||||||
noise_amplitude=0.25 * 0,
|
noise_amplitude=2e-3,
|
||||||
N=2,
|
N=2,
|
||||||
eom_ranges=(1.1, 1.3),
|
eom_ranges=(1.1, 1.35),
|
||||||
eom_steps=20,
|
eom_steps=100,
|
||||||
small_loop_detuning=0,
|
small_loop_detuning=0,
|
||||||
laser_detuning=0,
|
laser_detuning=0,
|
||||||
excitation_lifetimes=2,
|
excitation_lifetimes=2,
|
||||||
measurement_lifetimes=20,
|
measurement_lifetimes=30,
|
||||||
|
σ_modulation_time=0.2,
|
||||||
)
|
)
|
||||||
|
|
|
@ -7,6 +7,8 @@ import inspect
|
||||||
import subprocess
|
import subprocess
|
||||||
import pathlib
|
import pathlib
|
||||||
import sys
|
import sys
|
||||||
|
import numpy as np
|
||||||
|
|
||||||
|
|
||||||
P = ParamSpec("P")
|
P = ParamSpec("P")
|
||||||
R = TypeVar("R")
|
R = TypeVar("R")
|
||||||
|
@ -177,3 +179,51 @@ def quick_load_pickle(name):
|
||||||
|
|
||||||
with open(path, "rb") as f:
|
with open(path, "rb") as f:
|
||||||
return pickle.load(f)
|
return pickle.load(f)
|
||||||
|
|
||||||
|
|
||||||
|
def scientific_round(val, *err, retprec=False):
|
||||||
|
"""Scientifically rounds the values to the given errors."""
|
||||||
|
val, err = np.asarray(val), np.asarray(err)
|
||||||
|
if len(err.shape) == 1:
|
||||||
|
err = np.array([err])
|
||||||
|
err = err.T
|
||||||
|
err = err.T
|
||||||
|
|
||||||
|
if err.size == 1 and val.size > 1:
|
||||||
|
err = np.ones_like(val) * err
|
||||||
|
|
||||||
|
if len(err.shape) == 0:
|
||||||
|
err = np.array([err])
|
||||||
|
|
||||||
|
if val.size == 1 and err.shape[0] > 1:
|
||||||
|
val = np.ones_like(err) * val
|
||||||
|
|
||||||
|
i = np.floor(np.log10(err))
|
||||||
|
first_digit = (err // 10**i).astype(int)
|
||||||
|
prec = (-i + np.ones_like(err) * (first_digit <= 3)).astype(int)
|
||||||
|
prec = np.max(prec, axis=1)
|
||||||
|
|
||||||
|
def smart_round(value, precision):
|
||||||
|
value = np.round(value, precision)
|
||||||
|
if precision <= 0:
|
||||||
|
value = value.astype(int)
|
||||||
|
return value
|
||||||
|
|
||||||
|
if val.size > 1:
|
||||||
|
rounded = np.empty_like(val)
|
||||||
|
rounded_err = np.empty_like(err)
|
||||||
|
for n, (value, error, precision) in enumerate(zip(val, err, prec)):
|
||||||
|
rounded[n] = smart_round(value, precision)
|
||||||
|
rounded_err[n] = smart_round(error, precision)
|
||||||
|
|
||||||
|
if retprec:
|
||||||
|
return rounded, rounded_err, prec
|
||||||
|
else:
|
||||||
|
return rounded, rounded_err
|
||||||
|
|
||||||
|
else:
|
||||||
|
prec = prec[0]
|
||||||
|
if retprec:
|
||||||
|
return (smart_round(val, prec), *smart_round(err, prec)[0], prec)
|
||||||
|
else:
|
||||||
|
return (smart_round(val, prec), *smart_round(err, prec)[0])
|
||||||
|
|
|
@ -7,6 +7,7 @@ from dataclasses import dataclass
|
||||||
|
|
||||||
from scipy.optimize import Bounds
|
from scipy.optimize import Bounds
|
||||||
from enum import Enum
|
from enum import Enum
|
||||||
|
import lmfit
|
||||||
|
|
||||||
|
|
||||||
def fourier_transform(
|
def fourier_transform(
|
||||||
|
@ -180,8 +181,51 @@ def find_peaks(
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def offset(ω, offset):
|
||||||
|
return offset
|
||||||
|
|
||||||
|
|
||||||
|
def filter_peaks(
|
||||||
|
peaks: RingdownPeakData,
|
||||||
|
params: RingdownParams,
|
||||||
|
uncertainty_threshold: float = 0.2,
|
||||||
|
height_cutoff: float = 0.1,
|
||||||
|
to_be_deleted: list = [],
|
||||||
|
):
|
||||||
|
deleted_peaks = []
|
||||||
|
for i in reversed(range(len(peaks.peak_freqs))):
|
||||||
|
A, ω0, γ = peaks.lorentz_params[i]
|
||||||
|
Δω0, Δγ = peaks.Δpeak_freqs[i], peaks.Δpeak_widths[i]
|
||||||
|
|
||||||
|
if (
|
||||||
|
i in to_be_deleted
|
||||||
|
or Δω0 > uncertainty_threshold * params.fΩ_guess
|
||||||
|
or A < height_cutoff
|
||||||
|
or A > 1
|
||||||
|
or Δγ > uncertainty_threshold * params.fΩ_guess
|
||||||
|
):
|
||||||
|
np.delete(peaks.peaks, i)
|
||||||
|
np.delete(peaks.peak_freqs, i)
|
||||||
|
np.delete(peaks.Δpeak_freqs, i)
|
||||||
|
np.delete(peaks.peak_widths, i)
|
||||||
|
np.delete(peaks.Δpeak_widths, i)
|
||||||
|
|
||||||
|
del peaks.lorentz_params[i]
|
||||||
|
deleted_peaks.append(i)
|
||||||
|
continue
|
||||||
|
|
||||||
|
for key, value in peaks.peak_info.items():
|
||||||
|
if isinstance(value, np.ndarray):
|
||||||
|
peaks.peak_info[key] = np.delete(value, deleted_peaks)
|
||||||
|
|
||||||
|
return peaks
|
||||||
|
|
||||||
|
|
||||||
def refine_peaks(
|
def refine_peaks(
|
||||||
peaks: RingdownPeakData, params: RingdownParams, uncertainty_threshold: float = 0.1
|
peaks: RingdownPeakData,
|
||||||
|
params: RingdownParams,
|
||||||
|
uncertainty_threshold: float = 0.2,
|
||||||
|
height_cutoff: float = 0.1,
|
||||||
):
|
):
|
||||||
"""
|
"""
|
||||||
Refine the peak positions and frequencies by fitting Lorentzians.
|
Refine the peak positions and frequencies by fitting Lorentzians.
|
||||||
|
@ -204,8 +248,9 @@ def refine_peaks(
|
||||||
Δwidths = []
|
Δwidths = []
|
||||||
lorentz_params = []
|
lorentz_params = []
|
||||||
|
|
||||||
window = params.η_guess * 3
|
window = params.η_guess * 1
|
||||||
deleted_peaks = []
|
deleted_peaks = []
|
||||||
|
|
||||||
for i, peak_freq in enumerate(peak_freqs):
|
for i, peak_freq in enumerate(peak_freqs):
|
||||||
mask = (freqs > peak_freq - window) & (freqs < peak_freq + window)
|
mask = (freqs > peak_freq - window) & (freqs < peak_freq + window)
|
||||||
windowed_freqs = freqs[mask]
|
windowed_freqs = freqs[mask]
|
||||||
|
@ -231,7 +276,7 @@ def refine_peaks(
|
||||||
)
|
)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
popt, pcov = scipy.optimize.curve_fit(
|
scipy_popt, pcov = scipy.optimize.curve_fit(
|
||||||
lorentzian,
|
lorentzian,
|
||||||
windowed_freqs,
|
windowed_freqs,
|
||||||
windowed_power,
|
windowed_power,
|
||||||
|
@ -239,20 +284,28 @@ def refine_peaks(
|
||||||
bounds=bounds,
|
bounds=bounds,
|
||||||
)
|
)
|
||||||
perr = np.sqrt(np.diag(pcov))
|
perr = np.sqrt(np.diag(pcov))
|
||||||
popt[0] = popt[0] * scale_power
|
|
||||||
popt[1] = popt[1] * scale_freqs + root_freq
|
|
||||||
popt[2] = popt[2] * scale_freqs
|
|
||||||
|
|
||||||
lorentz_params.append(popt)
|
if (
|
||||||
|
perr[1] * scale_freqs > uncertainty_threshold * params.fΩ_guess
|
||||||
|
or scipy_popt[0] * scale_power / np.max(power) < height_cutoff
|
||||||
|
):
|
||||||
|
deleted_peaks.append(i)
|
||||||
|
continue
|
||||||
|
|
||||||
|
scipy_popt[0] = scipy_popt[0] * scale_power
|
||||||
|
scipy_popt[1] = scipy_popt[1] * scale_freqs + root_freq
|
||||||
|
scipy_popt[2] = scipy_popt[2] * scale_freqs
|
||||||
|
|
||||||
|
lorentz_params.append(scipy_popt)
|
||||||
|
|
||||||
if perr[1] > uncertainty_threshold * params.fΩ_guess:
|
if perr[1] > uncertainty_threshold * params.fΩ_guess:
|
||||||
deleted_peaks.append(i)
|
deleted_peaks.append(i)
|
||||||
continue
|
continue
|
||||||
|
|
||||||
new_freqs.append(popt[1])
|
new_freqs.append(scipy_popt[1])
|
||||||
Δfreqs.append(perr[1])
|
Δfreqs.append(perr[1])
|
||||||
|
|
||||||
new_widths.append(popt[2])
|
new_widths.append(scipy_popt[2])
|
||||||
Δwidths.append(perr[2])
|
Δwidths.append(perr[2])
|
||||||
except:
|
except:
|
||||||
deleted_peaks.append(i)
|
deleted_peaks.append(i)
|
||||||
|
@ -268,10 +321,63 @@ def refine_peaks(
|
||||||
peaks.Δpeak_widths = np.array(Δwidths)
|
peaks.Δpeak_widths = np.array(Δwidths)
|
||||||
peaks.lorentz_params = lorentz_params
|
peaks.lorentz_params = lorentz_params
|
||||||
|
|
||||||
return peaks
|
total_model = None
|
||||||
|
model_params = []
|
||||||
|
|
||||||
|
global_power_scale = 1 # power.max()
|
||||||
|
scaled_power = power / global_power_scale
|
||||||
|
|
||||||
import matplotlib.pyplot as plt
|
for i, (A, ω0, γ) in enumerate(lorentz_params):
|
||||||
|
model = lmfit.Model(lorentzian, prefix=f"peak_{i}_")
|
||||||
|
|
||||||
|
initial_params = model.make_params(
|
||||||
|
A=dict(value=A / global_power_scale, min=0, max=np.inf),
|
||||||
|
ω0=dict(value=ω0, min=0, max=np.inf),
|
||||||
|
γ=dict(value=γ, min=0, max=np.inf),
|
||||||
|
)
|
||||||
|
|
||||||
|
if total_model is None:
|
||||||
|
total_model = model
|
||||||
|
else:
|
||||||
|
total_model += model
|
||||||
|
|
||||||
|
model_params.append(initial_params)
|
||||||
|
|
||||||
|
aggregate_params = total_model.make_params()
|
||||||
|
for lm_params in model_params:
|
||||||
|
aggregate_params.update(lm_params)
|
||||||
|
|
||||||
|
offset_model = lmfit.Model(offset)
|
||||||
|
aggregate_params.update(offset_model.make_params(offset=0, min=0, max=1))
|
||||||
|
total_model += offset_model
|
||||||
|
|
||||||
|
lm_result = total_model.fit(scaled_power, params=aggregate_params, ω=freqs)
|
||||||
|
|
||||||
|
for i in reversed(range(len(peaks.peak_freqs))):
|
||||||
|
peak_prefix = f"peak_{i}_"
|
||||||
|
|
||||||
|
A, ω0, γ = (
|
||||||
|
lm_result.best_values[peak_prefix + "A"],
|
||||||
|
lm_result.best_values[peak_prefix + "ω0"],
|
||||||
|
lm_result.best_values[peak_prefix + "γ"],
|
||||||
|
)
|
||||||
|
|
||||||
|
ΔA, Δω0, Δγ = (
|
||||||
|
lm_result.params[peak_prefix + "A"].stderr,
|
||||||
|
lm_result.params[peak_prefix + "ω0"].stderr,
|
||||||
|
lm_result.params[peak_prefix + "γ"].stderr,
|
||||||
|
)
|
||||||
|
|
||||||
|
peaks.peak_freqs[i] = ω0
|
||||||
|
peaks.Δpeak_freqs[i] = Δω0
|
||||||
|
|
||||||
|
peaks.peak_widths[i] = γ
|
||||||
|
peaks.Δpeak_widths[i] = Δγ
|
||||||
|
|
||||||
|
peaks.lorentz_params[i] = A, ω0, γ
|
||||||
|
|
||||||
|
peaks = filter_peaks(peaks, params, uncertainty_threshold, height_cutoff)
|
||||||
|
return peaks, lm_result
|
||||||
|
|
||||||
|
|
||||||
class StepType(Enum):
|
class StepType(Enum):
|
||||||
|
|
|
@ -225,24 +225,38 @@ def plot_spectrum_and_peak_info(
|
||||||
"""
|
"""
|
||||||
|
|
||||||
ax.clear()
|
ax.clear()
|
||||||
ax.plot(peaks.freq, peaks.power, label="FFT Power", color="C0")
|
|
||||||
ax.plot(
|
ax.plot(
|
||||||
peaks.peak_freqs,
|
peaks.freq,
|
||||||
peaks.power[peaks.peaks],
|
peaks.power,
|
||||||
"x",
|
label="FFT Power",
|
||||||
label="Peaks",
|
color="C0",
|
||||||
color="C2",
|
linewidth=0.5,
|
||||||
|
marker="o",
|
||||||
|
markersize=2,
|
||||||
)
|
)
|
||||||
|
|
||||||
ax_angle = ax.twinx()
|
|
||||||
ax_angle.clear()
|
|
||||||
ax_angle.set_ylabel("Phase (rad)")
|
|
||||||
|
|
||||||
if annotate:
|
if annotate:
|
||||||
for i, (freq, height, lorentz) in enumerate(
|
for i, (freq, Δfreq, lorentz) in enumerate(
|
||||||
zip(peaks.peak_freqs, peaks.power[peaks.peaks], peaks.lorentz_params)
|
zip(peaks.peak_freqs, peaks.Δpeak_freqs, peaks.lorentz_params)
|
||||||
):
|
):
|
||||||
ax.annotate(f"{i} ({freq:.2e})", (freq, height))
|
# ax.plot(
|
||||||
|
# freq,
|
||||||
|
# max(lorentz[0], 1),
|
||||||
|
# "x",
|
||||||
|
# label="Peaks",
|
||||||
|
# color="C2",
|
||||||
|
# )
|
||||||
|
|
||||||
|
roundfreq, rounderr = scientific_round(freq, Δfreq)
|
||||||
|
|
||||||
|
t = ax.text(
|
||||||
|
freq,
|
||||||
|
lorentz[0],
|
||||||
|
rf"{i} (${roundfreq}\pm {rounderr}$)",
|
||||||
|
fontsize=20,
|
||||||
|
)
|
||||||
|
t.set_bbox(dict(facecolor="white", alpha=1, edgecolor="white"))
|
||||||
|
|
||||||
ax.plot(
|
ax.plot(
|
||||||
peaks.freq,
|
peaks.freq,
|
||||||
lorentzian(peaks.freq, *lorentz),
|
lorentzian(peaks.freq, *lorentz),
|
||||||
|
@ -261,4 +275,5 @@ def plot_spectrum_and_peak_info(
|
||||||
zorder=-10,
|
zorder=-10,
|
||||||
label="Frequency Shift",
|
label="Frequency Shift",
|
||||||
)
|
)
|
||||||
|
ax.set_xlim(peaks.freq[0], peaks.freq[-1])
|
||||||
ax.legend()
|
ax.legend()
|
||||||
|
|
|
@ -136,6 +136,8 @@ class RuntimeParams:
|
||||||
"""Secondary Parameters that are required to run the simulation."""
|
"""Secondary Parameters that are required to run the simulation."""
|
||||||
|
|
||||||
def __init__(self, params: Params):
|
def __init__(self, params: Params):
|
||||||
|
self.params = params
|
||||||
|
|
||||||
H_A = np.array(
|
H_A = np.array(
|
||||||
[
|
[
|
||||||
[0, params.δ],
|
[0, params.δ],
|
||||||
|
@ -215,17 +217,34 @@ class RuntimeParams:
|
||||||
"""The mode splitting of the system in *frequency units*."""
|
"""The mode splitting of the system in *frequency units*."""
|
||||||
return (self.Ωs[1] - self.Ωs[0]).real / (4 * np.pi)
|
return (self.Ωs[1] - self.Ωs[0]).real / (4 * np.pi)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def ringdown_frequencies(self) -> np.ndarray:
|
||||||
|
"""
|
||||||
|
The frequencies that are detectable in the ringdown spectrum.
|
||||||
|
In essence, those are the eigenfrequencies of the system,
|
||||||
|
shifted by laser detuning and measurement detuning.
|
||||||
|
"""
|
||||||
|
|
||||||
|
return np.abs(
|
||||||
|
self.params.measurement_detuning
|
||||||
|
- self.Ωs.real / (2 * np.pi)
|
||||||
|
+ self.params.δ * self.params.Ω
|
||||||
|
+ self.params.laser_detuning
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def time_axis(
|
def time_axis(
|
||||||
params: Params,
|
params: Params,
|
||||||
lifetimes: float | None = None,
|
lifetimes: float | None = None,
|
||||||
recurrences: float | None = None,
|
recurrences: float | None = None,
|
||||||
|
total_time: float | None = None,
|
||||||
resolution: float = 1,
|
resolution: float = 1,
|
||||||
):
|
):
|
||||||
"""Generate a time axis for the simulation.
|
"""Generate a time axis for the simulation.
|
||||||
|
|
||||||
:param params: system parameters
|
:param params: system parameters
|
||||||
:param lifetimes: number of lifetimes to simulate
|
:param lifetimes: number of lifetimes to simulate
|
||||||
|
:params total_time: the total timespan to set of the time array
|
||||||
:param resolution: time resolution
|
:param resolution: time resolution
|
||||||
|
|
||||||
Setting this to `1` will give the time axis just enough
|
Setting this to `1` will give the time axis just enough
|
||||||
|
@ -238,6 +257,8 @@ def time_axis(
|
||||||
tmax = params.lifetimes(lifetimes)
|
tmax = params.lifetimes(lifetimes)
|
||||||
elif recurrences is not None:
|
elif recurrences is not None:
|
||||||
tmax = recurrence_time(params) * recurrences
|
tmax = recurrence_time(params) * recurrences
|
||||||
|
elif total_time:
|
||||||
|
tmax = total_time
|
||||||
else:
|
else:
|
||||||
raise ValueError("Either lifetimes or recurrences must be set.")
|
raise ValueError("Either lifetimes or recurrences must be set.")
|
||||||
|
|
||||||
|
@ -437,13 +458,18 @@ def drive_frequencies_and_amplitudes(
|
||||||
return ωs, Δs, amplitudes, a_shift
|
return ωs, Δs, amplitudes, a_shift
|
||||||
|
|
||||||
|
|
||||||
def mode_name(mode: int):
|
def mode_name(mode: int, N: int | None = None):
|
||||||
"""Return the name of the mode."""
|
"""Return the name of the mode."""
|
||||||
if mode == 0:
|
if mode == 0:
|
||||||
return "anti-A"
|
return r"$\bar{A}$"
|
||||||
if mode == 1:
|
if mode == 1:
|
||||||
return "A"
|
return "A"
|
||||||
|
|
||||||
|
if N:
|
||||||
|
bath_mode = mode - 2
|
||||||
|
if mode >= N:
|
||||||
|
return rf"$\bar{{B}}{bath_mode % N + 1}$"
|
||||||
|
|
||||||
return f"B{mode - 1}"
|
return f"B{mode - 1}"
|
||||||
|
|
||||||
|
|
||||||
|
|
Loading…
Add table
Reference in a new issue