tabula-raza
new structure
|
@ -1,150 +0,0 @@
|
|||
\documentclass[prx,a4paper,aps,twocolumn,nofootinbib,superscriptaddress,10pt,showkeys]{revtex4-1}
|
||||
|
||||
\usepackage{amsmath,amsthm,amsfonts,graphicx,xcolor,times,xfrac,booktabs,mathtools,enumitem,xr,subfigure,amssymb,bbm,verbatim,appendix,placeins}
|
||||
\usepackage[unicode=true,bookmarks=true,bookmarksnumbered=false,bookmarksopen=false,breaklinks=false,pdfborder={0 0 1}, backref=false,colorlinks=true]{hyperref}
|
||||
\usepackage{orcidlink}
|
||||
\setcounter{secnumdepth}{3}
|
||||
\setlength{\bibsep}{-0.08pt}
|
||||
\renewcommand*{\doi}[1]{\href{http://dx.doi.org/#1}{DOI: #1}}
|
||||
\renewcommand*{\url}[1]{\href{#1}{#1}}
|
||||
\newcommand{\rem}[1]{{\color{red} [[#1]]}}
|
||||
\newcommand{\krem}[1]{{\color{orange}#1}}
|
||||
\newcommand{\com}[1]{\textbf{\color{cyan}[[#1]]}}
|
||||
\newcommand{\add}[1]{{\color{blue} #1}}
|
||||
\newcommand{\kadd}[1]{{\color{violet} #1}}
|
||||
\newcommand{\purp}[1]{{\color{violet} #1}}
|
||||
\newcommand{\scom}[1]{\textbf{\color{teal} [[#1]]}}
|
||||
\newcommand{\phil}[1]{\textbf{\color{violet}[[PT: #1]]}}
|
||||
|
||||
\renewcommand{\appendixname}{APPENDIX}
|
||||
|
||||
\makeatletter
|
||||
\theoremstyle{plain}
|
||||
\newtheorem{thm}{\protect\theoremname}
|
||||
\theoremstyle{plain}
|
||||
\newtheorem{lem}{\protect\lemmaname}
|
||||
\theoremstyle{plain}
|
||||
\newtheorem{prop}{\protect\propositionname}
|
||||
\theoremstyle{remark}
|
||||
\newtheorem*{rem*}{\protect\remarkname}
|
||||
\theoremstyle{plain}
|
||||
\newtheorem{conjecture}{\protect\conjecturename}
|
||||
\theoremstyle{plain}
|
||||
\newtheorem{cor}{\protect\corollaryname}
|
||||
\theoremstyle{definition}
|
||||
\newtheorem{defn}{\protect\definitionname}
|
||||
\theoremstyle{plain}
|
||||
\newtheorem{obs}{\protect\observationname}
|
||||
\theoremstyle{plain}
|
||||
\newtheorem*{thm*}{\protect\theoremname}
|
||||
\theoremstyle{plain}
|
||||
\newtheorem*{lem*}{\protect\lemmaname}
|
||||
|
||||
\providecommand{\propositionname}{Proposition}
|
||||
\providecommand{\theoremname}{Theorem}
|
||||
\providecommand{\lemmaname}{Lemma}
|
||||
\providecommand{\remarkname}{Remark}
|
||||
\providecommand{\conjecturename}{Conjecture}
|
||||
\providecommand{\definitionname}{Definition}
|
||||
\providecommand{\corollaryname}{Corollary}
|
||||
\providecommand{\observationname}{Observation}
|
||||
\allowdisplaybreaks
|
||||
|
||||
\newcommand{\trthm}{\normalfont \mathrm{tr}}
|
||||
|
||||
\def\bra#1{\langle{#1}\vert}
|
||||
\def\ket#1{\vert{#1}\rangle}
|
||||
\def\braket#1{\langle{#1}\rangle}
|
||||
\def\Bra#1{\left\langle#1\|}
|
||||
\def\Ket#1{\|#1\right \rangle}
|
||||
\def\BraVert{e.g.,roup\,\mid\,\bgroup}
|
||||
\def\Brak#1#2#3{\bra{#1}#2\ket{#3}}
|
||||
\def\ketbra#1#2{\vert{#1}\rangle\!\langle{#2}\vert}
|
||||
|
||||
\def\tr#1{\mbox{tr}\left[{#1}\right]}
|
||||
\newcommand{\ptr}[2]{\mbox{tr}_{#1}\left[ #2 \right]}
|
||||
\newcommand{\inp}{\normalfont \texttt{i}}
|
||||
\newcommand{\out}{\normalfont \texttt{o}}
|
||||
\def\prob#1{\mathbbm{P}{(#1)}}
|
||||
\DeclareMathOperator{\diag}{diag}
|
||||
\renewcommand{\arraystretch}{1.3}
|
||||
\DeclareMathOperator*{\argmin}{\arg\!\min}
|
||||
\DeclareMathOperator*{\argmax}{\arg\!\max}
|
||||
\DeclareMathOperator\sgn{sgn}
|
||||
|
||||
\newcommand{\Acal}{\mathcal{A}}
|
||||
\newcommand{\Bcal}{\mathcal{B}}
|
||||
\newcommand{\Ecal}{\mathcal{E}}
|
||||
\newcommand{\Fcal}{\mathcal{F}}
|
||||
\newcommand{\Hcal}{\mathcal{H}}
|
||||
\newcommand{\Ical}{\mathcal{I}}
|
||||
\newcommand{\Mcal}{\mathcal{M}}
|
||||
\newcommand{\Tcal}{\mathcal{T}}
|
||||
\newcommand{\Ocal}{\mathcal{O}}
|
||||
\newcommand{\Ucal}{\mathcal{U}}
|
||||
\newcommand{\Vcal}{\mathcal{V}}
|
||||
\newcommand{\Lcal}{\mathcal{L}}
|
||||
\newcommand{\Ccal}{\mathcal{C}}
|
||||
\newcommand{\Scal}{\mathcal{S}}
|
||||
\newcommand{\Ncal}{\mathcal{N}}
|
||||
\newcommand{\Jcal}{\mathcal{J}}
|
||||
\newcommand{\Pcal}{\mathcal{P}}
|
||||
\newcommand{\Pprob}{\mathbb{P}}
|
||||
\newcommand{\Qcal}{\mathcal{Q}}
|
||||
\newcommand{\Kcal}{\mathcal{K}}
|
||||
\newcommand{\Dcal}{\mathcal{D}}
|
||||
|
||||
\externaldocument{supp}
|
||||
|
||||
\let\oldaddcontentsline\addcontentsline
|
||||
\newcommand{\stoptocentries}{\renewcommand{\addcontentsline}[3]{}}
|
||||
\newcommand{\starttocentries}{\let\addcontentsline\oldaddcontentsline}
|
||||
|
||||
\begin{document}
|
||||
|
||||
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
||||
|
||||
\title{Hops hops}
|
||||
\date{\today}
|
||||
|
||||
\author{Valentin Boettcher\,\orcidlink{...}}
|
||||
\email{...}
|
||||
\affiliation{...}
|
||||
|
||||
\author{Konstantin Beyer\,\orcidlink{...}}
|
||||
\email{...}
|
||||
\affiliation{...}
|
||||
|
||||
\author{Richard Hartmann\,\orcidlink{...}}
|
||||
\email{...}
|
||||
\affiliation{...}
|
||||
|
||||
\author{Walter T. Strunz\,\orcidlink{...}}
|
||||
\email{...}
|
||||
\affiliation{...}
|
||||
|
||||
|
||||
\date{\today}
|
||||
\begin{abstract}
|
||||
Sehr abstrakt...
|
||||
\end{abstract}
|
||||
|
||||
%\keywords{\emph{Open Quantum Processes; ...}}
|
||||
|
||||
\maketitle
|
||||
|
||||
|
||||
|
||||
\begin{acknowledgments}
|
||||
We would like to thank ...
|
||||
\end{acknowledgments}
|
||||
|
||||
\def\bibsection{\section*{References}}
|
||||
\bibliography{references.bib}
|
||||
|
||||
%\onecolumn\newpage
|
||||
\appendix
|
||||
|
||||
\section{ }\label{...}
|
||||
|
||||
\end{document}
|
205
index.tex
|
@ -1,205 +0,0 @@
|
|||
\documentclass[reprint,aps,superscriptaddress]{revtex4-2}
|
||||
\usepackage[unicode=true,bookmarks=true,bookmarksnumbered=false,bookmarksopen=false,breaklinks=false,pdfborder={0 0 1}, backref=false,colorlinks=true]{hyperref}
|
||||
\usepackage{orcidlink}
|
||||
\usepackage{microtype}
|
||||
\usepackage{mathtools}
|
||||
\usepackage{graphicx}
|
||||
\usepackage{physics}
|
||||
\usepackage{cleveref}
|
||||
\usepackage{bm}
|
||||
\bibliographystyle{apsrev4-2}
|
||||
|
||||
|
||||
% HOPS/NMQSD
|
||||
\def\sys{\ensuremath{\mathrm{S}}}
|
||||
\def\bath{\ensuremath{\mathrm{B}}}
|
||||
\def\inter{\ensuremath{\mathrm{I}}}
|
||||
\def\nth{\ensuremath{^{(n)}}}
|
||||
|
||||
% unicode math
|
||||
\iftutex
|
||||
\usepackage{unicode-math}
|
||||
\else
|
||||
\usepackage{amssymb}
|
||||
\def\z"{}
|
||||
\def\UnicodeMathSymbol#1#2#3#4{%
|
||||
\ifnum#1>"A0
|
||||
\DeclareUnicodeCharacter{\z#1}{#2}%
|
||||
\fi}
|
||||
\input{unicode-math-table}
|
||||
\let\muprho\rho
|
||||
\def\BbbR{\mathbb{R}}
|
||||
\fi
|
||||
|
||||
|
||||
\begin{document}
|
||||
\preprint{APS/123-QED}
|
||||
\title{Quantifying Energy Flow in Arbitrarily Modulated Open Quantum Systems}
|
||||
\date{12.12.2100}
|
||||
|
||||
% fixme
|
||||
\newcommand{\fixme}[1]{\marginpar{\tiny\textcolor{red}{#1}}}
|
||||
|
||||
\author{Valentin Boettcher\,\orcidlink{0000-0003-2361-7874}}
|
||||
\affiliation{McGill University}
|
||||
\altaffiliation[formerly at ]{TU Dresden}
|
||||
\email{valentin.boettcher@mail.mcgill.ca}
|
||||
|
||||
\author{Konstantin Beyer\,\orcidlink{0000-0002-1864-4520}}
|
||||
\email{konstantin.beyer@tu-dresden.de}
|
||||
\affiliation{TU Dresden}
|
||||
|
||||
\author{Richard Hartmann\,\orcidlink{0000-0002-8967-6183}}
|
||||
\email{richard.hartmann@tu-dresden.de}
|
||||
\affiliation{TU Dresden}
|
||||
|
||||
\author{Walter T. Strunz\,\orcidlink{0000-0002-7806-3525}}
|
||||
\email{walter.strunz@tu-dresden.de}
|
||||
\affiliation{TU Dresden}
|
||||
|
||||
|
||||
|
||||
|
||||
\begin{abstract}
|
||||
\end{abstract}
|
||||
\maketitle
|
||||
|
||||
\tableofcontents
|
||||
|
||||
\section{Introduction}
|
||||
\label{sec:introduction}
|
||||
The field of quantum thermodynamics has attracted much interest
|
||||
recently~\cite{Talkner2020Oct,Rivas2019Oct,Riechers2021Apr,Vinjanampathy2016Oct,Binder2018,Kurizki2021Dec,Mukherjee2020Jan,Xu2022Mar}.
|
||||
Quantum thermodynamics is, among other issues, concerned with
|
||||
extending the standard phenomenological thermodynamic notions to
|
||||
microscopic open systems.
|
||||
|
||||
The general type of model that is being investigated in this field is
|
||||
given by the Hamiltonian
|
||||
\begin{equation}
|
||||
\label{eq:4}
|
||||
H = H_{\sys} + ∑_{n} \qty[H_{\inter}^{(n)} + H_{\bath}^{(n)}],
|
||||
\end{equation}
|
||||
where \(H_{\sys}\) models a ``small'' system (from here on called
|
||||
simply the \emph{system}) of arbitrary structure and the
|
||||
\(H_{\bath}^{(n)}\) model the ``large'' bath systems with simple
|
||||
structure but a large number of degrees of freedom. The
|
||||
\(H_{I}^{(n)}\) acts on system and bath, mediating their interaction.
|
||||
|
||||
In this setting may make be possible to formulate rigorous microscopic
|
||||
definitions of thermodynamic quantities such as internal energy, heat
|
||||
and work that are consistent with the well-known laws of
|
||||
thermodynamics. Currently, there is no consensus on this matter yet,
|
||||
as is demonstrated by the plethora of proposals and discussions in
|
||||
\cite{Rivas2019Oct,Talkner2020Oct,Motz2018Nov,Wiedmann2020Mar,Senior2020Feb,Kato2015Aug,Kato2016Dec,Strasberg2021Aug,Talkner2016Aug,Bera2021Feb,Bera2021Jun,Esposito2015Dec,Elouard2022Jul}.
|
||||
|
||||
This is particularly true for the general case where the coupling to
|
||||
the baths may be arbitrarily strong. In this case the weak coupling
|
||||
treatment that allows separate system and bath dynamics is not
|
||||
applicable. Even the simple seeming question of how internal energy is
|
||||
to be defined becomes non-trivial~\cite{Rivas2012,Binder2018} due to
|
||||
the fact that \(\ev{H_{\inter}}\neq 0\).
|
||||
|
||||
In this way the bath degrees of freedom interesting in themselves,
|
||||
which necessitates a treatment of the exact global unitary dynamics of
|
||||
system and bath.
|
||||
|
||||
If no analytical solution for these dynamics is available, numerical
|
||||
methods have to be relied upon. Notably there are perturbative methods
|
||||
such as the Redfield equations for non-Markovian weak coupling
|
||||
dynamics~\cite{Davidovic2020Sep} and also exact methods like the
|
||||
Hierarchical Equations of Motion
|
||||
HEOM~\cite{Tanimura1990Jun,Tang2015Dec}, multilayer
|
||||
MCTDH~\cite{Wang2010May}, TEMPO~\cite{Strathearn2018Aug} and the
|
||||
Hierarchy of Pure States HOPS~\cite{Suess2014Oct}\footnote{See
|
||||
\cite{RichardDiss} for a detailed account.}. Although the focus of
|
||||
these methods is on the reduced system dynamics, exact treatments of
|
||||
open systems can provide access to the global unitary evolution of the
|
||||
system and the baths.
|
||||
|
||||
In this work we will focus on the framework of the ``Non-Markovian
|
||||
Quantum State Diffusion'' (NMQSD)~\cite{Diosi1998Mar}, which is
|
||||
briefly reviewed in~\cref{sec:nmqsd}. We will show in \cref{chap:flow}
|
||||
that the NMQSD allows access to interaction and bath related
|
||||
quantities. This novel application of the formalism constitutes the
|
||||
main result of this work.
|
||||
|
||||
Based on the NMQSD and inspired by the ideas behind HEOM, a numerical
|
||||
method, the ``Hierarchy of Pure States''
|
||||
(HOPS)~\cite{RichardDiss,Hartmann2017Dec}, can be formulated. A brief
|
||||
account of the method is given in \cref{sec:hops}.
|
||||
|
||||
The results of \cref{sec:flow}, most importantly the calculation of
|
||||
bath and interaction energy expectation values, can be easily
|
||||
implemented within this numerical framework. By doing so we will
|
||||
elucidate the role of certain features inherent to the method. The
|
||||
most general case we will be able to handle is a system coupled to
|
||||
multiple baths of differing temperatures under arbitrary time
|
||||
dependent modulation. As HOPS on its own is already a method with a
|
||||
very broad range of applicability~\cite{RichardDiss}, we will find it
|
||||
to be suitable for the exploration of thermodynamical settings.
|
||||
|
||||
In \cref{sec:applications} we apply this result to two simple systems.
|
||||
As an elementary application, a brief study of the characteristics of
|
||||
the energy flow out of a qubit into a zero temperature bath is
|
||||
presented in \cref{sec:qubit-relax-char}. To demonstrate the current
|
||||
capabilities of our method to the fullest we will turn to the
|
||||
simulation of a quantum Otto-like
|
||||
cycle~\cite{cite:Geva1992Feb,cite:Wiedmann2020Mar,cite:Wiedmann2021Jun}
|
||||
in \cref{sec:quantum-otto-cycle}, which features a simultaneous time
|
||||
dependence in both \(H_{\inter}\) and \(H_{\sys}\).
|
||||
|
||||
\section{Energy Flow with HOPS}
|
||||
\label{sec:flow}
|
||||
|
||||
Let us proceed by briefly reviewing the fundamentals of the NMQSD and
|
||||
the HOPS. A more thuro
|
||||
\subsection{The NMQSD}
|
||||
\label{sec:nmqsd}
|
||||
|
||||
\subsection{The HOPS}
|
||||
\label{sec:hops}
|
||||
|
||||
\subsection{Bath Observables}
|
||||
\label{sec:bath-observables}
|
||||
|
||||
\subsubsection{Bath Energy Change}
|
||||
\label{sec:bath-energy-change}
|
||||
|
||||
\subsubsection{General Collective Bath Observables}
|
||||
\label{sec:gener-coll-bath}
|
||||
|
||||
|
||||
\section{Applications}
|
||||
\label{sec:applications}
|
||||
|
||||
\subsection{Qubit Relaxation Characteristics}
|
||||
\label{sec:qubit-relax-char}
|
||||
|
||||
\subsection{A Quantum Otto Cycle}
|
||||
\label{sec:quantum-otto-cycle}
|
||||
|
||||
|
||||
|
||||
\begin{itemize}
|
||||
\item see the chapter in my thesis
|
||||
\item \textbf{Ask richard about phase transitions in spin boson}
|
||||
\end{itemize}
|
||||
|
||||
|
||||
\section{Outlook and Open Questions}
|
||||
\label{sec:outl-open-quest}
|
||||
\begin{itemize}
|
||||
\item steady state methods
|
||||
\item energy flow for portions of the bath -> adaptive method?
|
||||
\end{itemize}
|
||||
|
||||
\bibliography{index}
|
||||
\end{document}
|
||||
|
||||
%%% Local Variables:
|
||||
%%% mode: latex
|
||||
%%% TeX-master: t
|
||||
%%% TeX-output-dir: "output"
|
||||
%%% TeX-engine: luatex
|
||||
%%% End:
|
|
@ -1,6 +0,0 @@
|
|||
$pdf_mode = 1;
|
||||
@default_files = ('index.tex');
|
||||
$out_dir = 'output';
|
||||
$pdflatex = 'pdflatex -synctex=1';
|
||||
|
||||
$pdf_previewer = "zathura %O %S";
|
|
@ -1,87 +0,0 @@
|
|||
import figsaver as fs
|
||||
import plot_utils as pu
|
||||
from hiro_models.one_qubit_model import StocProcTolerances
|
||||
from hiro_models.otto_cycle import OttoEngine
|
||||
import hiro_models.model_auxiliary as aux
|
||||
import numpy as np
|
||||
import qutip as qt
|
||||
import utilities as ut
|
||||
import stocproc
|
||||
import matplotlib.pyplot as plt
|
||||
import otto_utilities as ot
|
||||
|
||||
import ray
|
||||
ray.shutdown()
|
||||
|
||||
#ray.init(address='auto')
|
||||
ray.init()
|
||||
from hops.util.logging_setup import logging_setup
|
||||
import logging
|
||||
logging_setup(logging.INFO)
|
||||
plt.rcParams['figure.figsize'] = (12,4)
|
||||
|
||||
model = OttoEngine(
|
||||
δ=[0.4, 0.4],
|
||||
ω_c=[1, 1],
|
||||
ψ_0=qt.basis([2], [1]),
|
||||
description=f"A basic near-markovian, weakly coupled Otto Cycle that actually works.",
|
||||
k_max=4,
|
||||
bcf_terms=[6] * 2,
|
||||
truncation_scheme="simplex",
|
||||
driving_process_tolerances=[StocProcTolerances(1e-4, 1e-4)] * 2,
|
||||
thermal_process_tolerances=[StocProcTolerances(1e-4, 1e-4)] * 2,
|
||||
T=[1, 10],
|
||||
therm_methods=["tanhsinh", "fft"],
|
||||
Δ=1,
|
||||
num_cycles=5,
|
||||
Θ=1.5 / 0.05,
|
||||
dt=0.001,
|
||||
timings_H=(0, 0.1, 0.5, 0.6),
|
||||
timings_L=((0.6, 0.7, 0.9, 1), (0.1, 0.2, 0.4, 0.5)),
|
||||
streaming_mode=True,
|
||||
# solver_args=dict(rtol=1e-3, atol=1e-3)
|
||||
)
|
||||
|
||||
import figsaver as fs
|
||||
import plot_utils as pu
|
||||
from hiro_models.one_qubit_model import StocProcTolerances
|
||||
from hiro_models.otto_cycle import OttoEngine
|
||||
import hiro_models.model_auxiliary as aux
|
||||
import numpy as np
|
||||
import qutip as qt
|
||||
import utilities as ut
|
||||
import stocproc
|
||||
import matplotlib.pyplot as plt
|
||||
import otto_utilities as ot
|
||||
|
||||
import ray
|
||||
ray.shutdown()
|
||||
|
||||
#ray.init(address='auto')
|
||||
ray.init()
|
||||
from hops.util.logging_setup import logging_setup
|
||||
import logging
|
||||
logging_setup(logging.INFO)
|
||||
plt.rcParams['figure.figsize'] = (12,4)
|
||||
|
||||
model = OttoEngine(
|
||||
δ=[0.4, 0.4],
|
||||
ω_c=[1, 1],
|
||||
ψ_0=qt.basis([2], [1]),
|
||||
description=f"A basic near-markovian, weakly coupled Otto Cycle that actually works.",
|
||||
k_max=4,
|
||||
bcf_terms=[6] * 2,
|
||||
truncation_scheme="simplex",
|
||||
driving_process_tolerances=[StocProcTolerances(1e-4, 1e-4)] * 2,
|
||||
thermal_process_tolerances=[StocProcTolerances(1e-4, 1e-4)] * 2,
|
||||
T=[1, 10],
|
||||
therm_methods=["tanhsinh", "fft"],
|
||||
Δ=1,
|
||||
num_cycles=5,
|
||||
Θ=1.5 / 0.05,
|
||||
dt=0.001,
|
||||
timings_H=(0, 0.1, 0.5, 0.6),
|
||||
timings_L=((0.6, 0.7, 0.9, 1), (0.1, 0.2, 0.4, 0.5)),
|
||||
streaming_mode=True,
|
||||
# solver_args=dict(rtol=1e-3, atol=1e-3)
|
||||
)
|
|
@ -1,89 +0,0 @@
|
|||
import figsaver as fs
|
||||
import plot_utils as pu
|
||||
from hiro_models.one_qubit_model import StocProcTolerances
|
||||
from hiro_models.otto_cycle import OttoEngine
|
||||
import hiro_models.model_auxiliary as aux
|
||||
import numpy as np
|
||||
import qutip as qt
|
||||
import utilities as ut
|
||||
import stocproc
|
||||
import matplotlib.pyplot as plt
|
||||
import otto_utilities as ot
|
||||
|
||||
import ray
|
||||
ray.shutdown()
|
||||
|
||||
#ray.init(address='auto')
|
||||
ray.init()
|
||||
from hops.util.logging_setup import logging_setup
|
||||
import logging
|
||||
logging_setup(logging.INFO)
|
||||
plt.rcParams['figure.figsize'] = (12,4)
|
||||
|
||||
model = OttoEngine(
|
||||
δ=[0.4, 0.4],
|
||||
ω_c=[1, 1],
|
||||
ψ_0=qt.basis([2], [1]),
|
||||
description=f"A basic near-markovian, weakly coupled Otto Cycle that actually works.",
|
||||
k_max=4,
|
||||
bcf_terms=[6] * 2,
|
||||
truncation_scheme="simplex",
|
||||
driving_process_tolerances=[StocProcTolerances(1e-4, 1e-4)] * 2,
|
||||
thermal_process_tolerances=[StocProcTolerances(1e-4, 1e-4)] * 2,
|
||||
T=[1, 10],
|
||||
therm_methods=["tanhsinh", "fft"],
|
||||
Δ=1,
|
||||
num_cycles=5,
|
||||
Θ=1.5 / 0.05,
|
||||
dt=0.001,
|
||||
timings_H=(0, 0.1, 0.5, 0.6),
|
||||
timings_L=(None, None),
|
||||
streaming_mode=True,
|
||||
)
|
||||
|
||||
ot.plot_cycle(model)
|
||||
|
||||
import figsaver as fs
|
||||
import plot_utils as pu
|
||||
from hiro_models.one_qubit_model import StocProcTolerances
|
||||
from hiro_models.otto_cycle import OttoEngine
|
||||
import hiro_models.model_auxiliary as aux
|
||||
import numpy as np
|
||||
import qutip as qt
|
||||
import utilities as ut
|
||||
import stocproc
|
||||
import matplotlib.pyplot as plt
|
||||
import otto_utilities as ot
|
||||
|
||||
import ray
|
||||
ray.shutdown()
|
||||
|
||||
#ray.init(address='auto')
|
||||
ray.init()
|
||||
from hops.util.logging_setup import logging_setup
|
||||
import logging
|
||||
logging_setup(logging.INFO)
|
||||
plt.rcParams['figure.figsize'] = (12,4)
|
||||
|
||||
model = OttoEngine(
|
||||
δ=[0.4, 0.4],
|
||||
ω_c=[1, 1],
|
||||
ψ_0=qt.basis([2], [1]),
|
||||
description=f"A basic near-markovian, weakly coupled Otto Cycle that actually works.",
|
||||
k_max=4,
|
||||
bcf_terms=[6] * 2,
|
||||
truncation_scheme="simplex",
|
||||
driving_process_tolerances=[StocProcTolerances(1e-4, 1e-4)] * 2,
|
||||
thermal_process_tolerances=[StocProcTolerances(1e-4, 1e-4)] * 2,
|
||||
T=[1, 10],
|
||||
therm_methods=["tanhsinh", "fft"],
|
||||
Δ=1,
|
||||
num_cycles=5,
|
||||
Θ=1.5 / 0.05,
|
||||
dt=0.001,
|
||||
timings_H=(0, 0.1, 0.5, 0.6),
|
||||
timings_L=(None, None),
|
||||
streaming_mode=True,
|
||||
)
|
||||
|
||||
ot.plot_cycle(model)
|
|
@ -1,199 +0,0 @@
|
|||
import figsaver as fs
|
||||
import plot_utils as pu
|
||||
from hiro_models.one_qubit_model import StocProcTolerances
|
||||
from hiro_models.otto_cycle import OttoEngine
|
||||
import hiro_models.model_auxiliary as aux
|
||||
import numpy as np
|
||||
import qutip as qt
|
||||
import utilities as ut
|
||||
import stocproc
|
||||
import matplotlib.pyplot as plt
|
||||
import otto_utilities as ot
|
||||
|
||||
import ray
|
||||
ray.shutdown()
|
||||
|
||||
#ray.init(address='auto')
|
||||
ray.init()
|
||||
from hops.util.logging_setup import logging_setup
|
||||
import logging
|
||||
logging_setup(logging.INFO)
|
||||
plt.rcParams['figure.figsize'] = (12,4)
|
||||
|
||||
model = OttoEngine(
|
||||
δ=[0.4, 0.4],
|
||||
ω_c=[1, 1],
|
||||
ψ_0=qt.basis([2], [1]),
|
||||
description=f"A basic near-markovian, weakly coupled Otto Cycle that actually works.",
|
||||
k_max=4,
|
||||
bcf_terms=[6] * 2,
|
||||
truncation_scheme="simplex",
|
||||
driving_process_tolerances=[StocProcTolerances(1e-4, 1e-4)] * 2,
|
||||
thermal_process_tolerances=[StocProcTolerances(1e-4, 1e-4)] * 2,
|
||||
T=[1, 10],
|
||||
therm_methods=["tanhsinh", "tanhsinh"],
|
||||
Δ=1,
|
||||
num_cycles=5,
|
||||
Θ=1.5 / 0.05,
|
||||
dt=0.001,
|
||||
timings_H=(0, 0.1, 0.5, 0.6),
|
||||
timings_L=((0.6, 0.7, 0.9, 1), (0.1, 0.2, 0.4, 0.5)),
|
||||
streaming_mode=True,
|
||||
shift_to_resonance=(False, False),
|
||||
# solver_args=dict(rtol=1e-3, atol=1e-3)
|
||||
)
|
||||
ot.plot_cycle(model)
|
||||
|
||||
ot.plot_sd_overview(model)
|
||||
|
||||
import figsaver as fs
|
||||
import plot_utils as pu
|
||||
from hiro_models.one_qubit_model import StocProcTolerances
|
||||
from hiro_models.otto_cycle import OttoEngine
|
||||
import hiro_models.model_auxiliary as aux
|
||||
import numpy as np
|
||||
import qutip as qt
|
||||
import utilities as ut
|
||||
import stocproc
|
||||
import matplotlib.pyplot as plt
|
||||
import otto_utilities as ot
|
||||
|
||||
import ray
|
||||
ray.shutdown()
|
||||
|
||||
#ray.init(address='auto')
|
||||
ray.init()
|
||||
from hops.util.logging_setup import logging_setup
|
||||
import logging
|
||||
logging_setup(logging.INFO)
|
||||
plt.rcParams['figure.figsize'] = (12,4)
|
||||
|
||||
model = OttoEngine(
|
||||
δ=[0.4, 0.4],
|
||||
ω_c=[2, 2],
|
||||
ψ_0=qt.basis([2], [1]),
|
||||
description=f"A basic near-markovian, weakly coupled Otto Cycle without the shift.",
|
||||
k_max=3,
|
||||
bcf_terms=[4] * 2,
|
||||
truncation_scheme="simplex",
|
||||
driving_process_tolerances=[StocProcTolerances(1e-4, 1e-4)] * 2,
|
||||
thermal_process_tolerances=[StocProcTolerances(1e-4, 1e-4)] * 2,
|
||||
T=[0, 2],
|
||||
therm_methods=["tanhsinh", "tanhsinh"],
|
||||
Δ=1,
|
||||
num_cycles=1,
|
||||
Θ=1.5 / 0.05,
|
||||
dt=0.001,
|
||||
timings_H=(0, 0.1, 0.5, 0.6),
|
||||
timings_L=((0.6, 0.7, 0.9, 1), (0.1, 0.2, 0.4, 0.5)),
|
||||
streaming_mode=True,
|
||||
shift_to_resonance=(False, False),
|
||||
#ω_s_extra=[.1, .1],
|
||||
)
|
||||
|
||||
model_fft = model.copy()
|
||||
model_fft.therm_methods = ["fft", "fft"]
|
||||
|
||||
ot.plot_cycle(model)
|
||||
|
||||
ot.plot_sd_overview(model)
|
||||
|
||||
import figsaver as fs
|
||||
import plot_utils as pu
|
||||
from hiro_models.one_qubit_model import StocProcTolerances
|
||||
from hiro_models.otto_cycle import OttoEngine
|
||||
import hiro_models.model_auxiliary as aux
|
||||
import numpy as np
|
||||
import qutip as qt
|
||||
import utilities as ut
|
||||
import stocproc
|
||||
import matplotlib.pyplot as plt
|
||||
import otto_utilities as ot
|
||||
|
||||
import ray
|
||||
ray.shutdown()
|
||||
|
||||
#ray.init(address='auto')
|
||||
ray.init()
|
||||
from hops.util.logging_setup import logging_setup
|
||||
import logging
|
||||
logging_setup(logging.INFO)
|
||||
plt.rcParams['figure.figsize'] = (12,4)
|
||||
|
||||
model = OttoEngine(
|
||||
δ=[0.4, 0.4],
|
||||
ω_c=[1, 1],
|
||||
ψ_0=qt.basis([2], [1]),
|
||||
description=f"A basic near-markovian, weakly coupled Otto Cycle that actually works.",
|
||||
k_max=4,
|
||||
bcf_terms=[6] * 2,
|
||||
truncation_scheme="simplex",
|
||||
driving_process_tolerances=[StocProcTolerances(1e-4, 1e-4)] * 2,
|
||||
thermal_process_tolerances=[StocProcTolerances(1e-4, 1e-4)] * 2,
|
||||
T=[1, 10],
|
||||
therm_methods=["tanhsinh", "tanhsinh"],
|
||||
Δ=1,
|
||||
num_cycles=5,
|
||||
Θ=1.5 / 0.05,
|
||||
dt=0.001,
|
||||
timings_H=(0, 0.1, 0.5, 0.6),
|
||||
timings_L=((0.6, 0.7, 0.9, 1), (0.1, 0.2, 0.4, 0.5)),
|
||||
streaming_mode=True,
|
||||
shift_to_resonance=(False, False),
|
||||
# solver_args=dict(rtol=1e-3, atol=1e-3)
|
||||
)
|
||||
ot.plot_cycle(model)
|
||||
|
||||
ot.plot_sd_overview(model)
|
||||
|
||||
import figsaver as fs
|
||||
import plot_utils as pu
|
||||
from hiro_models.one_qubit_model import StocProcTolerances
|
||||
from hiro_models.otto_cycle import OttoEngine
|
||||
import hiro_models.model_auxiliary as aux
|
||||
import numpy as np
|
||||
import qutip as qt
|
||||
import utilities as ut
|
||||
import stocproc
|
||||
import matplotlib.pyplot as plt
|
||||
import otto_utilities as ot
|
||||
|
||||
import ray
|
||||
ray.shutdown()
|
||||
|
||||
#ray.init(address='auto')
|
||||
ray.init()
|
||||
from hops.util.logging_setup import logging_setup
|
||||
import logging
|
||||
logging_setup(logging.INFO)
|
||||
plt.rcParams['figure.figsize'] = (12,4)
|
||||
|
||||
model = OttoEngine(
|
||||
δ=[0.4, 0.4],
|
||||
ω_c=[2, 2],
|
||||
ψ_0=qt.basis([2], [1]),
|
||||
description=f"A basic near-markovian, weakly coupled Otto Cycle without the shift.",
|
||||
k_max=3,
|
||||
bcf_terms=[4] * 2,
|
||||
truncation_scheme="simplex",
|
||||
driving_process_tolerances=[StocProcTolerances(1e-4, 1e-4)] * 2,
|
||||
thermal_process_tolerances=[StocProcTolerances(1e-4, 1e-4)] * 2,
|
||||
T=[0, 2],
|
||||
therm_methods=["tanhsinh", "tanhsinh"],
|
||||
Δ=1,
|
||||
num_cycles=1,
|
||||
Θ=1.5 / 0.05,
|
||||
dt=0.001,
|
||||
timings_H=(0, 0.1, 0.5, 0.6),
|
||||
timings_L=((0.6, 0.7, 0.9, 1), (0.1, 0.2, 0.4, 0.5)),
|
||||
streaming_mode=True,
|
||||
shift_to_resonance=(False, False),
|
||||
#ω_s_extra=[.1, .1],
|
||||
)
|
||||
|
||||
model_fft = model.copy()
|
||||
model_fft.therm_methods = ["fft", "fft"]
|
||||
|
||||
ot.plot_cycle(model)
|
||||
|
||||
ot.plot_sd_overview(model)
|
|
@ -1,126 +0,0 @@
|
|||
from bayes_opt import BayesianOptimization
|
||||
import figsaver as fs
|
||||
import plot_utils as pu
|
||||
from hiro_models.one_qubit_model import StocProcTolerances
|
||||
from hiro_models.otto_cycle import OttoEngine
|
||||
import hiro_models.model_auxiliary as aux
|
||||
import numpy as np
|
||||
import qutip as qt
|
||||
import utilities as ut
|
||||
import stocproc
|
||||
import matplotlib.pyplot as plt
|
||||
import otto_utilities as ot
|
||||
|
||||
import ray
|
||||
ray.shutdown()
|
||||
|
||||
#ray.init(address='auto')
|
||||
ray.init()
|
||||
from hops.util.logging_setup import logging_setup
|
||||
import logging
|
||||
logging_setup(logging.INFO)
|
||||
plt.rcParams['figure.figsize'] = (12,4)
|
||||
|
||||
def timings(τ_c, τ_i, percent_overlap=0):
|
||||
τ_cI = τ_c * (1-percent_overlap)
|
||||
|
||||
τ_thI = (1 - 2 * τ_cI) / 2
|
||||
τ_th = (1 - 2 * τ_c) / 2
|
||||
τ_i_on = (τ_thI - 2*τ_i)
|
||||
timings_H = (0, τ_c, τ_c + τ_th, 2*τ_c + τ_th)
|
||||
|
||||
timings_L_hot = (τ_cI, τ_cI + τ_i, τ_cI + τ_i + τ_i_on, τ_cI + 2 * τ_i + τ_i_on)
|
||||
|
||||
timings_L_cold = tuple(time + timings_H[2] for time in timings_L_hot)
|
||||
|
||||
return timings_H, (timings_L_cold, timings_L_hot)
|
||||
|
||||
τ_mod, τ_I = 0.15, 0.15
|
||||
(p_H, p_L) = timings(τ_mod, τ_I, 0)
|
||||
prototype = OttoEngine(
|
||||
δ=[0.4, 0.4],
|
||||
ω_c=[2, 2],
|
||||
ψ_0=qt.basis([2], [1]),
|
||||
description=f"Classic Cycle",
|
||||
k_max=3,
|
||||
bcf_terms=[4] * 2,
|
||||
truncation_scheme="simplex",
|
||||
driving_process_tolerances=[StocProcTolerances(1e-3, 1e-3)] * 2,
|
||||
thermal_process_tolerances=[StocProcTolerances(1e-3, 1e-3)] * 2,
|
||||
T=[0.5, 4],
|
||||
therm_methods=["tanhsinh", "tanhsinh"],
|
||||
Δ=1,
|
||||
num_cycles=3,
|
||||
Θ=1.5 / 0.05,
|
||||
dt=0.01,
|
||||
timings_H=p_H,
|
||||
timings_L=p_L,
|
||||
streaming_mode=True,
|
||||
shift_to_resonance=(False, True),
|
||||
L_shift=(0, 0),
|
||||
)
|
||||
|
||||
|
||||
def make_cycle(shift_c, shift_h):
|
||||
crazy_model = prototype.copy()
|
||||
(p_H, p_L) = timings(τ_mod, τ_I, 1)
|
||||
p_L = [list(timings) for timings in p_L]
|
||||
p_L[1][2] += τ_I
|
||||
p_L[1][3] += τ_I
|
||||
p_L[0][0] -= τ_I
|
||||
p_L[0][1] -= τ_I
|
||||
crazy_model.timings_H = p_H
|
||||
crazy_model.timings_L = tuple(tuple(timing) for timing in p_L)
|
||||
crazy_model.L_shift = (shift_c + τ_mod, shift_h)
|
||||
crazy_model.description = "Full Overlap with Shift"
|
||||
|
||||
return crazy_model
|
||||
|
||||
def objective(shift_c, shift_h, N=500):
|
||||
print(shift_c, shift_h)
|
||||
model = make_cycle(shift_c, shift_h)
|
||||
ot.integrate_online(model, N)
|
||||
|
||||
return -1 * model.power(steady_idx=-2).value
|
||||
|
||||
# Bounded region of parameter space
|
||||
from bayes_opt.logger import JSONLogger
|
||||
from bayes_opt.event import Events
|
||||
from bayes_opt.util import load_logs
|
||||
pbounds = {"shift_c": (-0.1, 0.5), "shift_h": (-0.1, 0.5)}
|
||||
|
||||
optimizer = BayesianOptimization(
|
||||
f=objective,
|
||||
pbounds=pbounds,
|
||||
random_state=1,
|
||||
)
|
||||
# load_logs(optimizer, logs=["./logs.json"]);
|
||||
|
||||
# logger = JSONLogger(path="./logs.json")
|
||||
# optimizer.subscribe(Events.OPTIMIZATION_STEP, logger)
|
||||
optimizer.probe(
|
||||
params={"shift_c": 0.15, "shift_h": 0.15},
|
||||
lazy=True,
|
||||
)
|
||||
|
||||
optimizer.maximize(
|
||||
init_points=4,
|
||||
n_iter=100,
|
||||
)
|
||||
|
||||
with aux.model_db(data_path=".data") as db:
|
||||
model = db["05a638feb440fd913b41a5be74fbdd5a6cc358f2b556e61e4005b8539ca15115"]["model_config"]
|
||||
c=make_cycle(0.401813980810373, 0.302982197157591)
|
||||
# aux.import_results(
|
||||
# other_data_path = "taurus/.data",
|
||||
# results_path = "./results",
|
||||
# other_results_path = "taurus/results",
|
||||
# interactive = False,
|
||||
# models_to_import = [model],
|
||||
# force = False,
|
||||
# )
|
||||
#ot.plot_cycle(c)
|
||||
#model.L_shift
|
||||
t, total = ot.val_relative_to_steady(model, model.total_energy_from_power(), steady_idx=-2)
|
||||
pu.plot_with_σ(t, total)
|
||||
model.power(steady_idx=-2)
|
|
@ -1,391 +0,0 @@
|
|||
#+PROPERTY: header-args :session otto_bayes :kernel python :pandoc no :async yes :tangle no
|
||||
|
||||
Motivated by the striking result [[id:e8e99290-bd53-4d68-89f4-f903d6cf230c][from over here]] we would like to find
|
||||
some approximation of the optimal cycle.
|
||||
|
||||
Bayesian optimization allows us to optimize towards the result with as
|
||||
few cycles as possible.
|
||||
|
||||
* Boilerplate
|
||||
#+name: boilerplate
|
||||
#+begin_src jupyter-python :results none
|
||||
from bayes_opt import BayesianOptimization
|
||||
import figsaver as fs
|
||||
import plot_utils as pu
|
||||
from hiro_models.one_qubit_model import StocProcTolerances
|
||||
from hiro_models.otto_cycle import OttoEngine
|
||||
import hiro_models.model_auxiliary as aux
|
||||
import numpy as np
|
||||
import qutip as qt
|
||||
import utilities as ut
|
||||
import stocproc
|
||||
import matplotlib.pyplot as plt
|
||||
import otto_utilities as ot
|
||||
|
||||
import ray
|
||||
ray.shutdown()
|
||||
|
||||
#ray.init(address='auto')
|
||||
ray.init()
|
||||
from hops.util.logging_setup import logging_setup
|
||||
import logging
|
||||
logging_setup(logging.INFO)
|
||||
plt.rcParams['figure.figsize'] = (12,4)
|
||||
|
||||
def timings(τ_c, τ_i, percent_overlap=0):
|
||||
τ_cI = τ_c * (1-percent_overlap)
|
||||
|
||||
τ_thI = (1 - 2 * τ_cI) / 2
|
||||
τ_th = (1 - 2 * τ_c) / 2
|
||||
τ_i_on = (τ_thI - 2*τ_i)
|
||||
timings_H = (0, τ_c, τ_c + τ_th, 2*τ_c + τ_th)
|
||||
|
||||
timings_L_hot = (τ_cI, τ_cI + τ_i, τ_cI + τ_i + τ_i_on, τ_cI + 2 * τ_i + τ_i_on)
|
||||
|
||||
timings_L_cold = tuple(time + timings_H[2] for time in timings_L_hot)
|
||||
|
||||
return timings_H, (timings_L_cold, timings_L_hot)
|
||||
#+end_src
|
||||
|
||||
* First Attempt: Only shifting the coupling to the bath
|
||||
:PROPERTIES:
|
||||
:header-args: :tangle bayes.py :session simple_bayes :noweb yes :async yes
|
||||
:END:
|
||||
|
||||
#+begin_src jupyter-python :results none
|
||||
<<boilerplate>>
|
||||
#+end_src
|
||||
|
||||
To keep the number of parameters down, we'll shift the bath coupling
|
||||
without changing the coupling length. Later, an asymmetric approach
|
||||
with more parameters may be attempted.
|
||||
|
||||
#+begin_src jupyter-python
|
||||
τ_mod, τ_I = 0.15, 0.15
|
||||
(p_H, p_L) = timings(τ_mod, τ_I, 0)
|
||||
prototype = OttoEngine(
|
||||
δ=[0.4, 0.4],
|
||||
ω_c=[2, 2],
|
||||
ψ_0=qt.basis([2], [1]),
|
||||
description=f"Classic Cycle",
|
||||
k_max=3,
|
||||
bcf_terms=[4] * 2,
|
||||
truncation_scheme="simplex",
|
||||
driving_process_tolerances=[StocProcTolerances(1e-3, 1e-3)] * 2,
|
||||
thermal_process_tolerances=[StocProcTolerances(1e-3, 1e-3)] * 2,
|
||||
T=[0.5, 4],
|
||||
therm_methods=["tanhsinh", "tanhsinh"],
|
||||
Δ=1,
|
||||
num_cycles=3,
|
||||
Θ=1.5 / 0.05,
|
||||
dt=0.01,
|
||||
timings_H=p_H,
|
||||
timings_L=p_L,
|
||||
streaming_mode=True,
|
||||
shift_to_resonance=(False, True),
|
||||
L_shift=(0, 0),
|
||||
)
|
||||
|
||||
|
||||
def make_cycle(shift_c, shift_h):
|
||||
crazy_model = prototype.copy()
|
||||
(p_H, p_L) = timings(τ_mod, τ_I, 1)
|
||||
p_L = [list(timings) for timings in p_L]
|
||||
p_L[1][2] += τ_I
|
||||
p_L[1][3] += τ_I
|
||||
p_L[0][0] -= τ_I
|
||||
p_L[0][1] -= τ_I
|
||||
crazy_model.timings_H = p_H
|
||||
crazy_model.timings_L = tuple(tuple(timing) for timing in p_L)
|
||||
crazy_model.L_shift = (shift_c + τ_mod, shift_h)
|
||||
crazy_model.description = "Full Overlap with Shift"
|
||||
|
||||
return crazy_model
|
||||
#+end_src
|
||||
|
||||
#+RESULTS:
|
||||
|
||||
|
||||
This is the best known config so far.
|
||||
#+begin_src jupyter-python :tangle no
|
||||
#ot.plot_cycle(make_cycle(τ_mod, τ_mod))
|
||||
ot.plot_cycle(make_cycle(.4018, .303))
|
||||
#+end_src
|
||||
|
||||
#+RESULTS:
|
||||
:RESULTS:
|
||||
| <Figure | size | 1200x400 | with | 1 | Axes> | <AxesSubplot: | xlabel= | $\tau$ | ylabel= | Operator Norm | > |
|
||||
[[file:./.ob-jupyter/fb246ee7bdc3bb9cd2ff2e98dc02af2122dc7688.svg]]
|
||||
:END:
|
||||
|
||||
|
||||
Now we define our objective function
|
||||
#+begin_src jupyter-python :results none
|
||||
def objective(shift_c, shift_h, N=500):
|
||||
print(shift_c, shift_h)
|
||||
model = make_cycle(shift_c, shift_h)
|
||||
ot.integrate_online(model, N)
|
||||
|
||||
return -1 * model.power(steady_idx=-2).value
|
||||
#+end_src
|
||||
|
||||
|
||||
... and run the optimizer.
|
||||
#+begin_src jupyter-python
|
||||
# Bounded region of parameter space
|
||||
from bayes_opt.logger import JSONLogger
|
||||
from bayes_opt.event import Events
|
||||
from bayes_opt.util import load_logs
|
||||
pbounds = {"shift_c": (-0.1, 0.5), "shift_h": (-0.1, 0.5)}
|
||||
|
||||
optimizer = BayesianOptimization(
|
||||
f=objective,
|
||||
pbounds=pbounds,
|
||||
random_state=1,
|
||||
)
|
||||
# load_logs(optimizer, logs=["./logs.json"]);
|
||||
|
||||
# logger = JSONLogger(path="./logs.json")
|
||||
# optimizer.subscribe(Events.OPTIMIZATION_STEP, logger)
|
||||
optimizer.probe(
|
||||
params={"shift_c": 0.15, "shift_h": 0.15},
|
||||
lazy=True,
|
||||
)
|
||||
|
||||
optimizer.maximize(
|
||||
init_points=4,
|
||||
n_iter=100,
|
||||
)
|
||||
#+end_src
|
||||
|
||||
#+RESULTS:
|
||||
:RESULTS:
|
||||
#+begin_example
|
||||
[INFO hops.core.integration 264609] Choosing the nonlinear integrator.
|
||||
[INFO root 264609] Starting analysis process.
|
||||
| iter | target | shift_c | shift_h |
|
||||
-------------------------------------------------
|
||||
[INFO root 264609] Started analysis process with pid 268966.
|
||||
[INFO hops.core.hierarchy_data 264609] Creating the streaming fifo at: /home/hiro/Documents/Projects/UNI/master/eflow_paper/python/otto_motor/results_49165931924fa512ce3e8357ea5e629d22c808f8070c3949c830b5948e16ecf2.fifo
|
||||
[INFO hops.core.integration 264609] Using 16 integrators.
|
||||
[INFO hops.core.integration 264609] Some 0 trajectories have to be integrated.
|
||||
[INFO hops.core.integration 264609] Using 165 hierarchy states.
|
||||
0it [00:00, ?it/s]
|
||||
[INFO hops.core.integration 264609] Choosing the nonlinear integrator.
|
||||
[INFO root 264609] Starting analysis process.
|
||||
| [0m1 [0m | [0m-0.0 [0m | [0m0.1502 [0m | [0m0.3322 [0m |
|
||||
[INFO root 264609] Started analysis process with pid 268971.
|
||||
[INFO hops.core.hierarchy_data 264609] Creating the streaming fifo at: /home/hiro/Documents/Projects/UNI/master/eflow_paper/python/otto_motor/results_c369b7aefe5503442c698bdb4de83a3f7b1c88ae9cdf1456153e9087c7d9fc2f.fifo
|
||||
[INFO hops.core.integration 264609] Using 16 integrators.
|
||||
[INFO hops.core.integration 264609] Some 0 trajectories have to be integrated.
|
||||
[INFO hops.core.integration 264609] Using 165 hierarchy states.
|
||||
0it [00:00, ?it/s]
|
||||
[INFO hops.core.integration 264609] Choosing the nonlinear integrator.
|
||||
[INFO root 264609] Starting analysis process.
|
||||
| [0m2 [0m | [0m-0.01777 [0m | [0m-0.09993 [0m | [0m0.0814 [0m |
|
||||
[INFO root 264609] Started analysis process with pid 268976.
|
||||
[INFO hops.core.hierarchy_data 264609] Creating the streaming fifo at: /home/hiro/Documents/Projects/UNI/master/eflow_paper/python/otto_motor/results_f6fa2d1ea82b839e46df4013b731e3476b80119a206ef196c9a10f9d625066e4.fifo
|
||||
[INFO hops.core.integration 264609] Using 16 integrators.
|
||||
[INFO hops.core.integration 264609] Some 0 trajectories have to be integrated.
|
||||
[INFO hops.core.integration 264609] Using 165 hierarchy states.
|
||||
0it [00:00, ?it/s]
|
||||
[INFO hops.core.integration 264609] Choosing the nonlinear integrator.
|
||||
[INFO root 264609] Starting analysis process.
|
||||
| [0m3 [0m | [0m-0.001374[0m | [0m-0.01195 [0m | [0m-0.0446 [0m |
|
||||
[INFO root 264609] Started analysis process with pid 268981.
|
||||
[INFO hops.core.hierarchy_data 264609] Creating the streaming fifo at: /home/hiro/Documents/Projects/UNI/master/eflow_paper/python/otto_motor/results_1779c2e0c81b26e68f18a2298525a84c531fd36c909e6fddc0e41f3b78a02ee1.fifo
|
||||
[INFO hops.core.integration 264609] Using 16 integrators.
|
||||
[INFO hops.core.integration 264609] Some 0 trajectories have to be integrated.
|
||||
[INFO hops.core.integration 264609] Using 165 hierarchy states.
|
||||
0it [00:00, ?it/s]
|
||||
[INFO hops.core.integration 264609] Choosing the nonlinear integrator.
|
||||
[INFO root 264609] Starting analysis process.
|
||||
| [0m4 [0m | [0m-0.0 [0m | [0m0.01176 [0m | [0m0.1073 [0m |
|
||||
[INFO root 264609] Started analysis process with pid 269016.
|
||||
[INFO hops.core.hierarchy_data 264609] Creating the streaming fifo at: /home/hiro/Documents/Projects/UNI/master/eflow_paper/python/otto_motor/results_4c924d501d086d896c1552881c628116ad03c2100d680cb6ef5cc81dd4b2a2a6.fifo
|
||||
[INFO hops.core.integration 264609] Using 16 integrators.
|
||||
[INFO hops.core.integration 264609] Some 0 trajectories have to be integrated.
|
||||
[INFO hops.core.integration 264609] Using 165 hierarchy states.
|
||||
0it [00:00, ?it/s]
|
||||
[INFO hops.core.integration 264609] Choosing the nonlinear integrator.
|
||||
[INFO root 264609] Starting analysis process.
|
||||
| [0m5 [0m | [0m-0.02987 [0m | [0m-0.03473 [0m | [0m0.1213 [0m |
|
||||
[INFO root 264609] Started analysis process with pid 269051.
|
||||
[INFO hops.core.hierarchy_data 264609] Creating the streaming fifo at: /home/hiro/Documents/Projects/UNI/master/eflow_paper/python/otto_motor/results_d8ae65a827650db8ac3da6b4bce3faef161be342650b8238d59244d1ec5f69bb.fifo
|
||||
[INFO hops.core.integration 264609] Using 16 integrators.
|
||||
[INFO hops.core.integration 264609] Some 0 trajectories have to be integrated.
|
||||
[INFO hops.core.integration 264609] Using 165 hierarchy states.
|
||||
0it [00:00, ?it/s]
|
||||
[INFO hops.core.integration 264609] Choosing the nonlinear integrator.
|
||||
[INFO root 264609] Starting analysis process.
|
||||
| [95m6 [0m | [95m0.02888 [0m | [95m0.398 [0m | [95m0.2961 [0m |
|
||||
[INFO root 264609] Started analysis process with pid 269086.
|
||||
[INFO hops.core.hierarchy_data 264609] Creating the streaming fifo at: /home/hiro/Documents/Projects/UNI/master/eflow_paper/python/otto_motor/results_97c6b5378d143228f25a568548ca12c00f145bef0320218d249394cdf75795d6.fifo
|
||||
[INFO hops.core.integration 264609] Using 16 integrators.
|
||||
[INFO hops.core.integration 264609] Some 0 trajectories have to be integrated.
|
||||
[INFO hops.core.integration 264609] Using 165 hierarchy states.
|
||||
0it [00:00, ?it/s]
|
||||
[INFO hops.core.integration 264609] Choosing the nonlinear integrator.
|
||||
[INFO root 264609] Starting analysis process.
|
||||
| [0m7 [0m | [0m-0.004074[0m | [0m-0.01042 [0m | [0m0.2773 [0m |
|
||||
[INFO root 264609] Started analysis process with pid 269121.
|
||||
[INFO hops.core.hierarchy_data 264609] Creating the streaming fifo at: /home/hiro/Documents/Projects/UNI/master/eflow_paper/python/otto_motor/results_a5de368bb5ac8323883a97b14f8dc14ef84d021a9134152bd2e73a3bd4760052.fifo
|
||||
[INFO hops.core.integration 264609] Using 16 integrators.
|
||||
[INFO hops.core.integration 264609] Some 0 trajectories have to be integrated.
|
||||
[INFO hops.core.integration 264609] Using 165 hierarchy states.
|
||||
0it [00:00, ?it/s]
|
||||
[INFO hops.core.integration 264609] Choosing the nonlinear integrator.
|
||||
[INFO root 264609] Starting analysis process.
|
||||
| [0m8 [0m | [0m-0.04096 [0m | [0m-0.04836 [0m | [0m0.3552 [0m |
|
||||
[INFO root 264609] Started analysis process with pid 269156.
|
||||
[INFO hops.core.hierarchy_data 264609] Creating the streaming fifo at: /home/hiro/Documents/Projects/UNI/master/eflow_paper/python/otto_motor/results_c981612944d44cef75b47e1c972bc3e1b979a39b180aeceea5746a3cc49138b9.fifo
|
||||
[INFO hops.core.integration 264609] Using 16 integrators.
|
||||
[INFO hops.core.integration 264609] Some 0 trajectories have to be integrated.
|
||||
[INFO hops.core.integration 264609] Using 165 hierarchy states.
|
||||
0it [00:00, ?it/s]
|
||||
[INFO hops.core.integration 264609] Choosing the nonlinear integrator.
|
||||
[INFO root 264609] Starting analysis process.
|
||||
| [0m9 [0m | [0m7.841e-06[0m | [0m0.1374 [0m | [0m-0.003125[0m |
|
||||
[INFO root 264609] Started analysis process with pid 269191.
|
||||
[INFO hops.core.hierarchy_data 264609] Creating the streaming fifo at: /home/hiro/Documents/Projects/UNI/master/eflow_paper/python/otto_motor/results_ad69b9cace9b50f1041832e082421a317348dcbb5bfda0a3e6b8efb7717c92e8.fifo
|
||||
[INFO hops.core.integration 264609] Using 16 integrators.
|
||||
[INFO hops.core.integration 264609] Some 610 trajectories have to be integrated.
|
||||
[INFO hops.core.integration 264609] Using 165 hierarchy states.
|
||||
0% 0/610 [00:00<?, ?it/s][INFO hops.core.signal_delay 264609] caught sig 'SIGINT'
|
||||
[INFO hops.core.signal_delay 264609] caught sig 'SIGINT'
|
||||
[INFO hops.core.signal_delay 264609] caught sig 'SIGINT'
|
||||
0% 0/610 [00:10<?, ?it/s]
|
||||
[INFO hops.core.signal_delay 264609] caught 3 signal(s)
|
||||
[INFO hops.core.signal_delay 264609] emit signal 'SIGINT'
|
||||
[INFO hops.core.signal_delay 264609] caught sig 'SIGINT'
|
||||
[INFO hops.core.signal_delay 264609] emit signal 'SIGINT'
|
||||
[INFO hops.core.signal_delay 264609] caught sig 'SIGINT'
|
||||
[INFO hops.core.signal_delay 264609] emit signal 'SIGINT'
|
||||
[INFO hops.core.signal_delay 264609] caught sig 'SIGINT'
|
||||
2023-02-02 09:05:23,690 ERROR worker.py:94 -- Unhandled error (suppress with 'RAY_IGNORE_UNHANDLED_ERRORS=1'): The worker died unexpectedly while executing this task. Check python-core-worker-*.log files for more information.
|
||||
2023-02-02 09:05:23,691 ERROR worker.py:94 -- Unhandled error (suppress with 'RAY_IGNORE_UNHANDLED_ERRORS=1'): The worker died unexpectedly while executing this task. Check python-core-worker-*.log files for more information.
|
||||
2023-02-02 09:05:23,695 ERROR worker.py:94 -- Unhandled error (suppress with 'RAY_IGNORE_UNHANDLED_ERRORS=1'): The worker died unexpectedly while executing this task. Check python-core-worker-*.log files for more information.
|
||||
2023-02-02 09:05:23,696 ERROR worker.py:94 -- Unhandled error (suppress with 'RAY_IGNORE_UNHANDLED_ERRORS=1'): The worker died unexpectedly while executing this task. Check python-core-worker-*.log files for more information.
|
||||
2023-02-02 09:05:23,697 ERROR worker.py:94 -- Unhandled error (suppress with 'RAY_IGNORE_UNHANDLED_ERRORS=1'): The worker died unexpectedly while executing this task. Check python-core-worker-*.log files for more information.
|
||||
2023-02-02 09:05:23,705 ERROR worker.py:94 -- Unhandled error (suppress with 'RAY_IGNORE_UNHANDLED_ERRORS=1'): The worker died unexpectedly while executing this task. Check python-core-worker-*.log files for more information.
|
||||
2023-02-02 09:05:23,729 ERROR worker.py:94 -- Unhandled error (suppress with 'RAY_IGNORE_UNHANDLED_ERRORS=1'): The worker died unexpectedly while executing this task. Check python-core-worker-*.log files for more information.
|
||||
2023-02-02 09:05:23,741 ERROR worker.py:94 -- Unhandled error (suppress with 'RAY_IGNORE_UNHANDLED_ERRORS=1'): The worker died unexpectedly while executing this task. Check python-core-worker-*.log files for more information.
|
||||
2023-02-02 09:05:23,742 ERROR worker.py:94 -- Unhandled error (suppress with 'RAY_IGNORE_UNHANDLED_ERRORS=1'): The worker died unexpectedly while executing this task. Check python-core-worker-*.log files for more information.
|
||||
2023-02-02 09:05:23,742 ERROR worker.py:94 -- Unhandled error (suppress with 'RAY_IGNORE_UNHANDLED_ERRORS=1'): The worker died unexpectedly while executing this task. Check python-core-worker-*.log files for more information.
|
||||
2023-02-02 09:05:23,743 ERROR worker.py:94 -- Unhandled error (suppress with 'RAY_IGNORE_UNHANDLED_ERRORS=1'): The worker died unexpectedly while executing this task. Check python-core-worker-*.log files for more information.
|
||||
2023-02-02 09:05:23,743 ERROR worker.py:94 -- Unhandled error (suppress with 'RAY_IGNORE_UNHANDLED_ERRORS=1'): The worker died unexpectedly while executing this task. Check python-core-worker-*.log files for more information.
|
||||
2023-02-02 09:05:23,743 ERROR worker.py:94 -- Unhandled error (suppress with 'RAY_IGNORE_UNHANDLED_ERRORS=1'): The worker died unexpectedly while executing this task. Check python-core-worker-*.log files for more information.
|
||||
2023-02-02 09:05:23,744 ERROR worker.py:94 -- Unhandled error (suppress with 'RAY_IGNORE_UNHANDLED_ERRORS=1'): The worker died unexpectedly while executing this task. Check python-core-worker-*.log files for more information.
|
||||
2023-02-02 09:05:23,744 ERROR worker.py:94 -- Unhandled error (suppress with 'RAY_IGNORE_UNHANDLED_ERRORS=1'): The worker died unexpectedly while executing this task. Check python-core-worker-*.log files for more information.
|
||||
2023-02-02 09:05:23,745 ERROR worker.py:94 -- Unhandled error (suppress with 'RAY_IGNORE_UNHANDLED_ERRORS=1'): The worker died unexpectedly while executing this task. Check python-core-worker-*.log files for more information.
|
||||
[INFO hops.core.signal_delay 264609] caught 3 signal(s)
|
||||
[INFO hops.core.signal_delay 264609] emit signal 'SIGINT'
|
||||
#+end_example
|
||||
# [goto error]
|
||||
#+begin_example
|
||||
[0;31m---------------------------------------------------------------------------[0m
|
||||
[0;31mKeyboardInterrupt[0m Traceback (most recent call last)
|
||||
Cell [0;32mIn[7], line 16[0m
|
||||
[1;32m 7[0m optimizer [38;5;241m=[39m BayesianOptimization(
|
||||
[1;32m 8[0m f[38;5;241m=[39mobjective,
|
||||
[1;32m 9[0m pbounds[38;5;241m=[39mpbounds,
|
||||
[1;32m 10[0m random_state[38;5;241m=[39m[38;5;241m1[39m,
|
||||
[1;32m 11[0m )
|
||||
[1;32m 12[0m [38;5;66;03m# load_logs(optimizer, logs=["./logs.json"]);[39;00m
|
||||
[1;32m 13[0m
|
||||
[1;32m 14[0m [38;5;66;03m# logger = JSONLogger(path="./logs.json")[39;00m
|
||||
[1;32m 15[0m [38;5;66;03m# optimizer.subscribe(Events.OPTIMIZATION_STEP, logger)[39;00m
|
||||
[0;32m---> 16[0m [43moptimizer[49m[38;5;241;43m.[39;49m[43mmaximize[49m[43m([49m
|
||||
[1;32m 17[0m [43m [49m[43minit_points[49m[38;5;241;43m=[39;49m[38;5;241;43m4[39;49m[43m,[49m
|
||||
[1;32m 18[0m [43m [49m[43mn_iter[49m[38;5;241;43m=[39;49m[38;5;241;43m8[39;49m[43m,[49m
|
||||
[1;32m 19[0m [43m)[49m
|
||||
|
||||
File [0;32m/nix/store/vkzza81mzwyk5br1c6cm67g48xycvmvl-python3-3.9.15-env/lib/python3.9/site-packages/bayes_opt/bayesian_optimization.py:311[0m, in [0;36mBayesianOptimization.maximize[0;34m(self, init_points, n_iter, acquisition_function, acq, kappa, kappa_decay, kappa_decay_delay, xi, **gp_params)[0m
|
||||
[1;32m 309[0m x_probe [38;5;241m=[39m [38;5;28mself[39m[38;5;241m.[39msuggest(util)
|
||||
[1;32m 310[0m iteration [38;5;241m+[39m[38;5;241m=[39m [38;5;241m1[39m
|
||||
[0;32m--> 311[0m [38;5;28;43mself[39;49m[38;5;241;43m.[39;49m[43mprobe[49m[43m([49m[43mx_probe[49m[43m,[49m[43m [49m[43mlazy[49m[38;5;241;43m=[39;49m[38;5;28;43;01mFalse[39;49;00m[43m)[49m
|
||||
[1;32m 313[0m [38;5;28;01mif[39;00m [38;5;28mself[39m[38;5;241m.[39m_bounds_transformer [38;5;129;01mand[39;00m iteration [38;5;241m>[39m [38;5;241m0[39m:
|
||||
[1;32m 314[0m [38;5;66;03m# The bounds transformer should only modify the bounds after[39;00m
|
||||
[1;32m 315[0m [38;5;66;03m# the init_points points (only for the true iterations)[39;00m
|
||||
[1;32m 316[0m [38;5;28mself[39m[38;5;241m.[39mset_bounds(
|
||||
[1;32m 317[0m [38;5;28mself[39m[38;5;241m.[39m_bounds_transformer[38;5;241m.[39mtransform([38;5;28mself[39m[38;5;241m.[39m_space))
|
||||
|
||||
File [0;32m/nix/store/vkzza81mzwyk5br1c6cm67g48xycvmvl-python3-3.9.15-env/lib/python3.9/site-packages/bayes_opt/bayesian_optimization.py:208[0m, in [0;36mBayesianOptimization.probe[0;34m(self, params, lazy)[0m
|
||||
[1;32m 206[0m [38;5;28mself[39m[38;5;241m.[39m_queue[38;5;241m.[39madd(params)
|
||||
[1;32m 207[0m [38;5;28;01melse[39;00m:
|
||||
[0;32m--> 208[0m [38;5;28;43mself[39;49m[38;5;241;43m.[39;49m[43m_space[49m[38;5;241;43m.[39;49m[43mprobe[49m[43m([49m[43mparams[49m[43m)[49m
|
||||
[1;32m 209[0m [38;5;28mself[39m[38;5;241m.[39mdispatch(Events[38;5;241m.[39mOPTIMIZATION_STEP)
|
||||
|
||||
File [0;32m/nix/store/vkzza81mzwyk5br1c6cm67g48xycvmvl-python3-3.9.15-env/lib/python3.9/site-packages/bayes_opt/target_space.py:236[0m, in [0;36mTargetSpace.probe[0;34m(self, params)[0m
|
||||
[1;32m 234[0m x [38;5;241m=[39m [38;5;28mself[39m[38;5;241m.[39m_as_array(params)
|
||||
[1;32m 235[0m params [38;5;241m=[39m [38;5;28mdict[39m([38;5;28mzip[39m([38;5;28mself[39m[38;5;241m.[39m_keys, x))
|
||||
[0;32m--> 236[0m target [38;5;241m=[39m [38;5;28;43mself[39;49m[38;5;241;43m.[39;49m[43mtarget_func[49m[43m([49m[38;5;241;43m*[39;49m[38;5;241;43m*[39;49m[43mparams[49m[43m)[49m
|
||||
[1;32m 238[0m [38;5;28;01mif[39;00m [38;5;28mself[39m[38;5;241m.[39m_constraint [38;5;129;01mis[39;00m [38;5;28;01mNone[39;00m:
|
||||
[1;32m 239[0m [38;5;28mself[39m[38;5;241m.[39mregister(x, target)
|
||||
|
||||
Cell [0;32mIn[4], line 3[0m, in [0;36mobjective[0;34m(shift_c, shift_h, N)[0m
|
||||
[1;32m 1[0m [38;5;28;01mdef[39;00m [38;5;21mobjective[39m(shift_c, shift_h, N[38;5;241m=[39m[38;5;241m1000[39m):
|
||||
[1;32m 2[0m model [38;5;241m=[39m make_cycle(shift_c, shift_h)
|
||||
[0;32m----> 3[0m [43mot[49m[38;5;241;43m.[39;49m[43mintegrate_online[49m[43m([49m[43mmodel[49m[43m,[49m[43m [49m[43mN[49m[43m)[49m
|
||||
[1;32m 5[0m [38;5;28;01mreturn[39;00m [38;5;241m-[39m[38;5;241m1[39m [38;5;241m*[39m model[38;5;241m.[39mpower(steady_idx[38;5;241m=[39m[38;5;241m-[39m[38;5;241m1[39m)[38;5;241m.[39mvalue
|
||||
|
||||
File [0;32m~/Documents/Projects/UNI/master/eflow_paper/python/otto_motor/otto_utilities.py:155[0m, in [0;36mintegrate_online[0;34m(model, n, stream_folder, **kwargs)[0m
|
||||
[1;32m 154[0m [38;5;28;01mdef[39;00m [38;5;21mintegrate_online[39m(model, n, stream_folder[38;5;241m=[39m[38;5;28;01mNone[39;00m, [38;5;241m*[39m[38;5;241m*[39mkwargs):
|
||||
[0;32m--> 155[0m [43maux[49m[38;5;241;43m.[39;49m[43mintegrate[49m[43m([49m
|
||||
[1;32m 156[0m [43m [49m[43mmodel[49m[43m,[49m
|
||||
[1;32m 157[0m [43m [49m[43mn[49m[43m,[49m
|
||||
[1;32m 158[0m [43m [49m[43mstream_file[49m[38;5;241;43m=[39;49m[43m([49m[38;5;124;43m"[39;49m[38;5;124;43m"[39;49m[43m [49m[38;5;28;43;01mif[39;49;00m[43m [49m[43mstream_folder[49m[43m [49m[38;5;129;43;01mis[39;49;00m[43m [49m[38;5;28;43;01mNone[39;49;00m[43m [49m[38;5;28;43;01melse[39;49;00m[43m [49m[43mstream_folder[49m[43m)[49m
|
||||
[1;32m 159[0m [43m [49m[38;5;241;43m+[39;49m[43m [49m[38;5;124;43mf[39;49m[38;5;124;43m"[39;49m[38;5;124;43mresults_[39;49m[38;5;132;43;01m{[39;49;00m[43mmodel[49m[38;5;241;43m.[39;49m[43mhexhash[49m[38;5;132;43;01m}[39;49;00m[38;5;124;43m.fifo[39;49m[38;5;124;43m"[39;49m[43m,[49m
|
||||
[1;32m 160[0m [43m [49m[43manalyze[49m[38;5;241;43m=[39;49m[38;5;28;43;01mTrue[39;49;00m[43m,[49m
|
||||
[1;32m 161[0m [43m [49m[38;5;241;43m*[39;49m[38;5;241;43m*[39;49m[43mkwargs[49m[43m,[49m
|
||||
[1;32m 162[0m [43m [49m[43m)[49m
|
||||
|
||||
File [0;32m~/src/two_qubit_model/hiro_models/model_auxiliary.py:201[0m, in [0;36mintegrate[0;34m(model, n, data_path, clear_pd, single_process, stream_file, analyze, results_path, analyze_kwargs)[0m
|
||||
[1;32m 199[0m supervisor[38;5;241m.[39mintegrate_single_process(clear_pd)
|
||||
[1;32m 200[0m [38;5;28;01melse[39;00m:
|
||||
[0;32m--> 201[0m supervisor[38;5;241m.[39mintegrate(clear_pd)
|
||||
[1;32m 203[0m cleanup([38;5;241m0[39m)
|
||||
|
||||
File [0;32m~/src/hops/hops/core/signal_delay.py:87[0m, in [0;36msig_delay.__exit__[0;34m(self, exc_type, exc_val, exc_tb)[0m
|
||||
[1;32m 84[0m [38;5;28;01mif[39;00m [38;5;28mlen[39m([38;5;28mself[39m[38;5;241m.[39msigh[38;5;241m.[39msigs_caught) [38;5;241m>[39m [38;5;241m0[39m [38;5;129;01mand[39;00m [38;5;28mself[39m[38;5;241m.[39mhandler [38;5;129;01mis[39;00m [38;5;129;01mnot[39;00m [38;5;28;01mNone[39;00m:
|
||||
[1;32m 85[0m [38;5;28mself[39m[38;5;241m.[39mhandler([38;5;28mself[39m[38;5;241m.[39msigh[38;5;241m.[39msigs_caught)
|
||||
[0;32m---> 87[0m [38;5;28;43mself[39;49m[38;5;241;43m.[39;49m[43m_restore[49m[43m([49m[43m)[49m
|
||||
|
||||
File [0;32m~/src/hops/hops/core/signal_delay.py:68[0m, in [0;36msig_delay._restore[0;34m(self)[0m
|
||||
[1;32m 66[0m [38;5;28;01mfor[39;00m i, s [38;5;129;01min[39;00m [38;5;28menumerate[39m([38;5;28mself[39m[38;5;241m.[39msigs):
|
||||
[1;32m 67[0m signal[38;5;241m.[39msignal(s, [38;5;28mself[39m[38;5;241m.[39mold_handlers[i])
|
||||
[0;32m---> 68[0m [38;5;28;43mself[39;49m[38;5;241;43m.[39;49m[43msigh[49m[38;5;241;43m.[39;49m[43memit[49m[43m([49m[43m)[49m
|
||||
|
||||
File [0;32m~/src/hops/hops/core/signal_delay.py:42[0m, in [0;36mSigHandler.emit[0;34m(self)[0m
|
||||
[1;32m 40[0m [38;5;28;01mfor[39;00m s [38;5;129;01min[39;00m [38;5;28mself[39m[38;5;241m.[39msigs_caught:
|
||||
[1;32m 41[0m log[38;5;241m.[39minfo([38;5;124m"[39m[38;5;124memit signal [39m[38;5;124m'[39m[38;5;132;01m{}[39;00m[38;5;124m'[39m[38;5;124m"[39m[38;5;241m.[39mformat(SIG_MAP[s]))
|
||||
[0;32m---> 42[0m [43mos[49m[38;5;241;43m.[39;49m[43mkill[49m[43m([49m[43mos[49m[38;5;241;43m.[39;49m[43mgetpid[49m[43m([49m[43m)[49m[43m,[49m[43m [49m[43ms[49m[43m)[49m
|
||||
|
||||
[0;31mKeyboardInterrupt[0m:
|
||||
#+end_example
|
||||
:END:
|
||||
|
||||
#+begin_src jupyter-python
|
||||
with aux.model_db(data_path=".data") as db:
|
||||
model = db["05a638feb440fd913b41a5be74fbdd5a6cc358f2b556e61e4005b8539ca15115"]["model_config"]
|
||||
c=make_cycle(0.401813980810373, 0.302982197157591)
|
||||
# aux.import_results(
|
||||
# other_data_path = "taurus/.data",
|
||||
# results_path = "./results",
|
||||
# other_results_path = "taurus/results",
|
||||
# interactive = False,
|
||||
# models_to_import = [model],
|
||||
# force = False,
|
||||
# )
|
||||
#ot.plot_cycle(c)
|
||||
#model.L_shift
|
||||
t, total = ot.val_relative_to_steady(model, model.total_energy_from_power(), steady_idx=-2)
|
||||
pu.plot_with_σ(t, total)
|
||||
model.power(steady_idx=-2)
|
||||
#+end_src
|
||||
|
||||
#+RESULTS:
|
||||
:RESULTS:
|
||||
: EnsembleValue([(10000, 3.3527328716046976e-05, 5.0274219343398344e-05)])
|
||||
[[file:./.ob-jupyter/bb13221d6e76ccdb1e7068e301948add99c2104a.svg]]
|
||||
:END:
|
113
python/otto_motor/flake.lock
generated
|
@ -1,113 +0,0 @@
|
|||
{
|
||||
"nodes": {
|
||||
"flake-utils": {
|
||||
"locked": {
|
||||
"lastModified": 1667395993,
|
||||
"narHash": "sha256-nuEHfE/LcWyuSWnS8t12N1wc105Qtau+/OdUAjtQ0rA=",
|
||||
"owner": "numtide",
|
||||
"repo": "flake-utils",
|
||||
"rev": "5aed5285a952e0b949eb3ba02c12fa4fcfef535f",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "numtide",
|
||||
"repo": "flake-utils",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"flake-utils_2": {
|
||||
"locked": {
|
||||
"lastModified": 1667395993,
|
||||
"narHash": "sha256-nuEHfE/LcWyuSWnS8t12N1wc105Qtau+/OdUAjtQ0rA=",
|
||||
"owner": "numtide",
|
||||
"repo": "flake-utils",
|
||||
"rev": "5aed5285a952e0b949eb3ba02c12fa4fcfef535f",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "numtide",
|
||||
"repo": "flake-utils",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nixpkgs": {
|
||||
"locked": {
|
||||
"lastModified": 1670841420,
|
||||
"narHash": "sha256-mSEia1FzrsHbfqjorMyYiX8NXdDVeR1Pw1k55jMJlJY=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "33e0d99cbedf2acfd7340d2150837fbb28039a64",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"id": "nixpkgs",
|
||||
"ref": "nixos-unstable",
|
||||
"type": "indirect"
|
||||
}
|
||||
},
|
||||
"nixpkgs_2": {
|
||||
"locked": {
|
||||
"lastModified": 1669226068,
|
||||
"narHash": "sha256-/eL0TjRajyQWddAQV2yBxhHjT3gJ8/zfNUNpbo8zXDQ=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "176d2084b5751560dfc32ec4a81858c9290d14c5",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"poetry2nix": {
|
||||
"inputs": {
|
||||
"flake-utils": "flake-utils_2",
|
||||
"nixpkgs": "nixpkgs_2"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1669070813,
|
||||
"narHash": "sha256-Z8FxcJfG7cIAIjMvGU0jr1K1oWCM/DDnlU7i8LsrkKY=",
|
||||
"owner": "nix-community",
|
||||
"repo": "poetry2nix",
|
||||
"rev": "686a2d4ee4f00244b80396d1948e3b38000df6e8",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "nix-community",
|
||||
"repo": "poetry2nix",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"root": {
|
||||
"inputs": {
|
||||
"nixpkgs": "nixpkgs",
|
||||
"utils": "utils"
|
||||
}
|
||||
},
|
||||
"utils": {
|
||||
"inputs": {
|
||||
"flake-utils": "flake-utils",
|
||||
"nixpkgs": [
|
||||
"nixpkgs"
|
||||
],
|
||||
"poetry2nix": "poetry2nix"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1670963182,
|
||||
"narHash": "sha256-TybEDLywRxX0fKqdWKRhtKyegNJqwwuauxTbCX48Wpc=",
|
||||
"owner": "vale981",
|
||||
"repo": "hiro-flake-utils",
|
||||
"rev": "90714b1950c419b0e3eb879ae7140e8a88c15f05",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "vale981",
|
||||
"repo": "hiro-flake-utils",
|
||||
"type": "github"
|
||||
}
|
||||
}
|
||||
},
|
||||
"root": "root",
|
||||
"version": 7
|
||||
}
|
|
@ -1,25 +0,0 @@
|
|||
from timing_scan import *
|
||||
|
||||
aux.import_results(
|
||||
data_path="./.data",
|
||||
other_data_path="./taurus/.data_timing",
|
||||
results_path="./results",
|
||||
other_results_path="./taurus/results_timing",
|
||||
interactive=False,
|
||||
models_to_import=models,
|
||||
skip_checkpoints=False,
|
||||
force=True,
|
||||
)
|
||||
|
||||
from timing_scan import *
|
||||
|
||||
aux.import_results(
|
||||
data_path="./.data",
|
||||
other_data_path="./taurus/.data_timing",
|
||||
results_path="./results",
|
||||
other_results_path="./taurus/results_timing",
|
||||
interactive=False,
|
||||
models_to_import=models[5:6],
|
||||
skip_checkpoints=False,
|
||||
force=True,
|
||||
)
|
|
@ -1,43 +0,0 @@
|
|||
import figsaver as fs
|
||||
import plot_utils as pu
|
||||
from hiro_models.one_qubit_model import StocProcTolerances
|
||||
from hiro_models.otto_cycle import OttoEngine
|
||||
import hiro_models.model_auxiliary as aux
|
||||
import numpy as np
|
||||
import qutip as qt
|
||||
import utilities as ut
|
||||
import stocproc
|
||||
import matplotlib.pyplot as plt
|
||||
import otto_utilities as ot
|
||||
|
||||
import ray
|
||||
ray.shutdown()
|
||||
|
||||
#ray.init(address='auto')
|
||||
ray.init()
|
||||
from hops.util.logging_setup import logging_setup
|
||||
import logging
|
||||
logging_setup(logging.INFO)
|
||||
plt.rcParams['figure.figsize'] = (12,4)
|
||||
|
||||
import figsaver as fs
|
||||
import plot_utils as pu
|
||||
from hiro_models.one_qubit_model import StocProcTolerances
|
||||
from hiro_models.otto_cycle import OttoEngine
|
||||
import hiro_models.model_auxiliary as aux
|
||||
import numpy as np
|
||||
import qutip as qt
|
||||
import utilities as ut
|
||||
import stocproc
|
||||
import matplotlib.pyplot as plt
|
||||
import otto_utilities as ot
|
||||
|
||||
import ray
|
||||
ray.shutdown()
|
||||
|
||||
#ray.init(address='auto')
|
||||
ray.init()
|
||||
from hops.util.logging_setup import logging_setup
|
||||
import logging
|
||||
logging_setup(logging.INFO)
|
||||
plt.rcParams['figure.figsize'] = (12,4)
|
|
@ -1,811 +0,0 @@
|
|||
import matplotlib.pyplot as plt
|
||||
import plot_utils as pu
|
||||
from hiro_models.otto_cycle import OttoEngine, SmoothlyInterpolatdPeriodicMatrix
|
||||
from hops.util.dynamic_matrix import *
|
||||
import numpy as np
|
||||
import figsaver as fs
|
||||
import hiro_models.model_auxiliary as aux
|
||||
from typing import Iterable
|
||||
import qutip as qt
|
||||
import itertools
|
||||
|
||||
|
||||
def plot_power_eff_convergence(models, steady_idx=2):
|
||||
f, (a_power, a_efficiency) = plt.subplots(ncols=2)
|
||||
|
||||
a_efficiency.set_yscale("log")
|
||||
for model in models:
|
||||
try:
|
||||
Ns = model.power(steady_idx=steady_idx).Ns
|
||||
a_power.plot(Ns, model.power(steady_idx=steady_idx).values)
|
||||
a_efficiency.plot(
|
||||
Ns, np.abs(model.efficiency(steady_idx=steady_idx).values)
|
||||
)
|
||||
except:
|
||||
pass
|
||||
|
||||
a_power.set_xlabel("$N$")
|
||||
a_power.set_ylabel("$P$")
|
||||
a_efficiency.set_xlabel("$N$")
|
||||
a_efficiency.set_ylabel("$\eta$")
|
||||
return f, (a_power, a_efficiency)
|
||||
|
||||
|
||||
@pu.wrap_plot
|
||||
def plot_powers_and_efficiencies(x, models, steady_idx=2, ax=None, xlabel=""):
|
||||
powers = [-model.power(steady_idx=steady_idx).value for model in models]
|
||||
powers_σ = [model.power(steady_idx=steady_idx).σ for model in models]
|
||||
|
||||
ax.axhline(0, color="lightgray")
|
||||
system_powers = [
|
||||
val_relative_to_steady(
|
||||
model,
|
||||
-1 * model.system_power().integrate(model.t) * 1 / model.Θ,
|
||||
steady_idx=steady_idx,
|
||||
)[1].value[-1]
|
||||
for model in models
|
||||
]
|
||||
|
||||
system_powers_σ = [
|
||||
val_relative_to_steady(
|
||||
model,
|
||||
-1 * model.system_power().integrate(model.t) * 1 / model.Θ,
|
||||
steady_idx=steady_idx,
|
||||
)[1].σ[-1]
|
||||
for model in models
|
||||
]
|
||||
|
||||
interaction_powers = [
|
||||
val_relative_to_steady(
|
||||
model,
|
||||
-1 * model.interaction_power().sum_baths().integrate(model.t) * 1 / model.Θ,
|
||||
steady_idx=steady_idx,
|
||||
)[1].value[-1]
|
||||
for model in models
|
||||
]
|
||||
|
||||
interaction_powers_σ = [
|
||||
val_relative_to_steady(
|
||||
model,
|
||||
-1 * model.interaction_power().sum_baths().integrate(model.t) * 1 / model.Θ,
|
||||
steady_idx=steady_idx,
|
||||
)[1].σ[-1]
|
||||
for model in models
|
||||
]
|
||||
|
||||
efficiencies = np.array(
|
||||
[100 * model.efficiency(steady_idx=steady_idx).value for model in models]
|
||||
)
|
||||
|
||||
efficiencies_σ = np.array(
|
||||
[100 * model.efficiency(steady_idx=steady_idx).σ for model in models]
|
||||
)
|
||||
|
||||
mask = efficiencies > 0
|
||||
a2 = ax.twinx()
|
||||
ax.errorbar(x, powers, yerr=powers_σ, marker=".", label=r"$\bar{P}$")
|
||||
ax.errorbar(
|
||||
x,
|
||||
system_powers,
|
||||
yerr=system_powers_σ,
|
||||
marker=".",
|
||||
label=r"$\bar{P}_{\mathrm{sys}}$",
|
||||
)
|
||||
|
||||
ax.errorbar(
|
||||
x,
|
||||
interaction_powers,
|
||||
yerr=interaction_powers_σ,
|
||||
marker=".",
|
||||
label=r"$\bar{P}_{\mathrm{int}}$",
|
||||
)
|
||||
ax.legend()
|
||||
|
||||
lines = a2.errorbar(
|
||||
np.asarray(x)[mask],
|
||||
efficiencies[mask],
|
||||
yerr=efficiencies_σ[mask],
|
||||
marker="*",
|
||||
color="C4",
|
||||
label=r"$\eta$",
|
||||
)
|
||||
a2.legend(loc="upper left")
|
||||
ax.set_xlabel(xlabel)
|
||||
ax.set_ylabel(r"$\bar{P}$", color="C0")
|
||||
a2.set_ylabel(r"$\eta$", color="C4")
|
||||
|
||||
return ax, a2
|
||||
|
||||
|
||||
def plot_multi_powers_and_efficiencies(
|
||||
x, multi_models, titles, steady_idx=2, xlabel=""
|
||||
):
|
||||
fig, axs = plt.subplots(nrows=2, ncols=2)
|
||||
(efficiency, power, system_power, interaction_power) = axs.flatten()
|
||||
|
||||
markers = itertools.cycle((".", "+", "*", ",", "o"))
|
||||
for models, title, marker in zip(multi_models, titles, [".", "^", "*"]):
|
||||
powers = [-model.power(steady_idx=steady_idx).value for model in models]
|
||||
powers_σ = [model.power(steady_idx=steady_idx).σ for model in models]
|
||||
|
||||
system_powers = [
|
||||
val_relative_to_steady(
|
||||
model,
|
||||
-1 * model.system_power().integrate(model.t) * 1 / model.Θ,
|
||||
steady_idx=steady_idx,
|
||||
)[1].value[-1]
|
||||
for model in models
|
||||
]
|
||||
|
||||
system_powers_σ = [
|
||||
val_relative_to_steady(
|
||||
model,
|
||||
-1 * model.system_power().integrate(model.t) * 1 / model.Θ,
|
||||
steady_idx=steady_idx,
|
||||
)[1].σ[-1]
|
||||
for model in models
|
||||
]
|
||||
|
||||
interaction_powers = [
|
||||
val_relative_to_steady(
|
||||
model,
|
||||
-1
|
||||
* model.interaction_power().sum_baths().integrate(model.t)
|
||||
* 1
|
||||
/ model.Θ,
|
||||
steady_idx=steady_idx,
|
||||
)[1].value[-1]
|
||||
for model in models
|
||||
]
|
||||
|
||||
interaction_powers_σ = [
|
||||
val_relative_to_steady(
|
||||
model,
|
||||
-1
|
||||
* model.interaction_power().sum_baths().integrate(model.t)
|
||||
* 1
|
||||
/ model.Θ,
|
||||
steady_idx=steady_idx,
|
||||
)[1].σ[-1]
|
||||
for model in models
|
||||
]
|
||||
|
||||
efficiencies = np.array(
|
||||
[100 * model.efficiency(steady_idx=steady_idx).value for model in models]
|
||||
)
|
||||
|
||||
efficiencies_σ = np.array(
|
||||
[100 * model.efficiency(steady_idx=steady_idx).σ for model in models]
|
||||
)
|
||||
|
||||
mask = efficiencies > 0
|
||||
|
||||
power.plot(x, powers, marker=marker)
|
||||
system_power.plot(
|
||||
x,
|
||||
system_powers,
|
||||
marker=marker,
|
||||
)
|
||||
|
||||
interaction_power.plot(
|
||||
x,
|
||||
interaction_powers,
|
||||
marker=marker,
|
||||
)
|
||||
|
||||
efficiency.plot(
|
||||
np.asarray(x)[mask],
|
||||
efficiencies[mask],
|
||||
marker=marker,
|
||||
label=title,
|
||||
)
|
||||
|
||||
efficiency.set_title(r"$\eta$")
|
||||
power.set_title(r"$\bar{P}$")
|
||||
system_power.set_title(
|
||||
r"$\bar{P}_{\mathrm{sys}}$",
|
||||
)
|
||||
interaction_power.set_title(
|
||||
r"$\bar{P}_{\mathrm{int}}$",
|
||||
)
|
||||
|
||||
fig.supxlabel(xlabel)
|
||||
fig.legend(loc="lower center", bbox_to_anchor=(0.5, -0.1), ncol=3)
|
||||
return fig, axs
|
||||
|
||||
|
||||
@pu.wrap_plot
|
||||
def plot_cycle(model: OttoEngine, ax=None):
|
||||
assert ax is not None
|
||||
ax.plot(
|
||||
model.t, model.coupling_operators[0].operator_norm(model.t) * 2, label=r"$L_c$"
|
||||
)
|
||||
ax.plot(
|
||||
model.t, model.coupling_operators[1].operator_norm(model.t) * 2, label=r"$L_h$"
|
||||
)
|
||||
|
||||
ax.plot(
|
||||
model.t,
|
||||
(model.H.operator_norm(model.t)) / model.H.operator_norm(model.τ_compressed),
|
||||
label="$H_{\mathrm{sys}}$",
|
||||
)
|
||||
|
||||
ax.set_xlim((0, model.Θ))
|
||||
ax.set_xlabel(r"$\tau$")
|
||||
ax.set_ylabel(r"Operator Norm")
|
||||
ax.legend()
|
||||
|
||||
|
||||
@pu.wrap_plot
|
||||
def plot_cycles(
|
||||
models: list[OttoEngine],
|
||||
ax=None,
|
||||
H_for_all=False,
|
||||
H=True,
|
||||
L_for_all=True,
|
||||
bath=None,
|
||||
legend=False,
|
||||
):
|
||||
assert ax is not None
|
||||
|
||||
model = models[0]
|
||||
|
||||
if H:
|
||||
ax.plot(
|
||||
model.t,
|
||||
(model.H.operator_norm(model.t))
|
||||
/ model.H.operator_norm(model.τ_compressed),
|
||||
label=f"$H_1$",
|
||||
)
|
||||
|
||||
for index, name in enumerate(["c", "h"]):
|
||||
if bath is None or bath == index:
|
||||
ax.plot(
|
||||
model.t,
|
||||
model.coupling_operators[index].operator_norm(model.t) * 2,
|
||||
label=rf"$L_{{{name},1}}$",
|
||||
)
|
||||
|
||||
ax.set_xlim((0, model.Θ))
|
||||
ax.set_xlabel(r"$\tau$")
|
||||
ax.set_ylabel(r"Operator Norm")
|
||||
|
||||
for i, model in enumerate(models[1:]):
|
||||
if H and H_for_all:
|
||||
ax.plot(
|
||||
model.t,
|
||||
(model.H.operator_norm(model.t))
|
||||
/ model.H.operator_norm(model.τ_compressed),
|
||||
label=f"$H_1$",
|
||||
)
|
||||
|
||||
if L_for_all:
|
||||
for index, name in enumerate(["c", "h"]):
|
||||
if bath is None or bath == index:
|
||||
ax.plot(
|
||||
model.t,
|
||||
model.coupling_operators[index].operator_norm(model.t) * 2,
|
||||
label=rf"$L_{{{name},{i+2}}}$",
|
||||
)
|
||||
|
||||
legend and ax.legend()
|
||||
|
||||
|
||||
@pu.wrap_plot
|
||||
def plot_sd_overview(model: OttoEngine, ax=None):
|
||||
assert ax is not None
|
||||
|
||||
gaps = model.energy_gaps
|
||||
ω = np.linspace(0.0001, gaps[-1] + gaps[0], 1000)
|
||||
|
||||
for ω_i, label, i in zip(gaps, ["Cold", "Hot"], range(len(gaps))):
|
||||
lines = ax.plot(
|
||||
ω,
|
||||
model.full_thermal_spectral_density(i)(ω) * model.bcf_scales[i],
|
||||
label=f"{label} $T={model.T[i]}$",
|
||||
)
|
||||
|
||||
ax.plot(
|
||||
ω,
|
||||
model.spectral_density(i)(ω) * model.bcf_scales[i],
|
||||
label=f"{label} $T=0$",
|
||||
color=pu.lighten_color(lines[0].get_color()),
|
||||
linestyle="--",
|
||||
)
|
||||
|
||||
ax.plot(
|
||||
ω_i,
|
||||
model.full_thermal_spectral_density(i)(ω_i) * model.bcf_scales[i],
|
||||
marker="o",
|
||||
color=lines[0].get_color(),
|
||||
)
|
||||
|
||||
# plt.plot(ω, model.full_thermal_spectral_density(1)(ω) * model.bcf_scales[1])
|
||||
# plt.plot(
|
||||
# 2, model.full_thermal_spectral_density(1)(2) * model.bcf_scales[1], marker="o"
|
||||
# )
|
||||
|
||||
ax.set_xlabel(r"$\omega$")
|
||||
ax.set_ylabel(r"Spectral Density")
|
||||
ax.legend()
|
||||
|
||||
|
||||
def full_report(model):
|
||||
cyc = plot_cycle(model)
|
||||
sd = plot_sd_overview(model)
|
||||
|
||||
f, a = plot_energy(model)
|
||||
pu.plot_with_σ(model.t, model.total_energy(), ax=a)
|
||||
|
||||
power = model.power()
|
||||
η = model.efficiency() * 100
|
||||
|
||||
print(
|
||||
fs.tex_value(power.value, err=power.σ, prefix="P="),
|
||||
)
|
||||
print(
|
||||
fs.tex_value(η.value, err=η.σ, prefix=r"\eta="),
|
||||
)
|
||||
|
||||
|
||||
def plot_energy(model):
|
||||
f, a = pu.plot_energy_overview(
|
||||
model,
|
||||
strobe_data=model.strobe,
|
||||
hybrid=True,
|
||||
bath_names=["cold", "hot"],
|
||||
online=True,
|
||||
)
|
||||
|
||||
a.legend()
|
||||
|
||||
return f, a
|
||||
|
||||
|
||||
def integrate_online(model, n, stream_folder=None, **kwargs):
|
||||
aux.integrate(
|
||||
model,
|
||||
n,
|
||||
stream_file=("" if stream_folder is None else stream_folder)
|
||||
+ f"results_{model.hexhash}.fifo",
|
||||
analyze=True,
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
|
||||
def get_sample_count(model):
|
||||
try:
|
||||
with aux.get_data(model) as d:
|
||||
return d.samples
|
||||
|
||||
except:
|
||||
return 0
|
||||
|
||||
|
||||
def integrate_online_multi(models, n, *args, increment=1000, **kwargs):
|
||||
target = increment
|
||||
|
||||
while target <= n:
|
||||
current_target = min([n, target])
|
||||
for model in models:
|
||||
count = get_sample_count(model)
|
||||
if count < current_target:
|
||||
integrate_online(model, current_target, *args, **kwargs)
|
||||
|
||||
target += increment
|
||||
|
||||
|
||||
@pu.wrap_plot
|
||||
def plot_3d_heatmap(models, value_accessor, x_spec, y_spec, normalize=False, ax=None):
|
||||
value_dict = {}
|
||||
x_labels = set()
|
||||
y_labels = set()
|
||||
|
||||
for model in models:
|
||||
x_label = x_spec(model)
|
||||
y_label = y_spec(model)
|
||||
value = value_accessor(model)
|
||||
|
||||
if x_label not in value_dict:
|
||||
value_dict[x_label] = {}
|
||||
|
||||
if y_label in value_dict[x_label]:
|
||||
raise ValueError(
|
||||
f"Dublicate value for model with x={x_label}, y={y_label}."
|
||||
)
|
||||
|
||||
value_dict[x_label][y_label] = value_accessor(model)
|
||||
|
||||
x_labels.add(x_label)
|
||||
y_labels.add(y_label)
|
||||
|
||||
x_labels = np.sort(list(x_labels))
|
||||
y_labels = np.sort(list(y_labels))
|
||||
|
||||
_xx, _yy = np.meshgrid(x_labels, y_labels, indexing="ij")
|
||||
x, y = _xx.ravel(), _yy.ravel()
|
||||
|
||||
values = np.fromiter((value_dict[_x][_y] for _x, _y in zip(x, y)), dtype=float)
|
||||
|
||||
dx = x_labels[1] - x_labels[0]
|
||||
dy = y_labels[1] - y_labels[0]
|
||||
|
||||
x -= dx / 2
|
||||
y -= dy / 2
|
||||
|
||||
normalized_values = abs(values) - abs(values).min()
|
||||
normalized_values /= abs(normalized_values).max()
|
||||
|
||||
cmap = plt.get_cmap("plasma")
|
||||
colors = [cmap(power) for power in normalized_values]
|
||||
|
||||
ax.bar3d(
|
||||
x,
|
||||
y,
|
||||
np.zeros_like(values),
|
||||
dx,
|
||||
dy,
|
||||
values / abs(values).max() if normalize else values,
|
||||
color=colors,
|
||||
)
|
||||
ax.set_xticks(x_labels)
|
||||
ax.set_yticks(y_labels)
|
||||
|
||||
|
||||
@pu.wrap_plot
|
||||
def plot_contour(
|
||||
models, value_accessor, x_spec, y_spec, normalize=False, ax=None, levels=None
|
||||
):
|
||||
value_dict = {}
|
||||
x_labels = set()
|
||||
y_labels = set()
|
||||
|
||||
for model in models:
|
||||
x_label = x_spec(model)
|
||||
y_label = y_spec(model)
|
||||
value = value_accessor(model)
|
||||
|
||||
if x_label not in value_dict:
|
||||
value_dict[x_label] = {}
|
||||
|
||||
if y_label in value_dict[x_label]:
|
||||
raise ValueError(
|
||||
f"Dublicate value for model with x={x_label}, y={y_label}."
|
||||
)
|
||||
|
||||
value_dict[x_label][y_label] = value_accessor(model)
|
||||
|
||||
x_labels.add(x_label)
|
||||
y_labels.add(y_label)
|
||||
|
||||
x_labels = np.sort(list(x_labels))
|
||||
y_labels = np.sort(list(y_labels))
|
||||
|
||||
_xx, _yy = np.meshgrid(x_labels, y_labels, indexing="ij")
|
||||
x, y = _xx.ravel(), _yy.ravel()
|
||||
|
||||
values = (
|
||||
np.fromiter((value_dict[_x][_y] for _x, _y in zip(x, y)), dtype=float)
|
||||
.reshape(len(x_labels), len(y_labels))
|
||||
.T
|
||||
)
|
||||
|
||||
normalized_values = abs(values) - abs(values).min()
|
||||
normalized_values /= abs(normalized_values).max()
|
||||
|
||||
cont = ax.contourf(
|
||||
x_labels,
|
||||
y_labels,
|
||||
values / abs(values).max() if normalize else values,
|
||||
levels=levels,
|
||||
)
|
||||
ax.set_xticks(x_labels)
|
||||
ax.set_yticks(y_labels)
|
||||
return cont, (x_labels, y_labels, values)
|
||||
|
||||
|
||||
def get_steady_times(model, steady_idx, shift=0):
|
||||
shift_idx = int(1 / model.dt * shift)
|
||||
|
||||
begin_idx = model.strobe[1][steady_idx] - shift_idx
|
||||
end_idx = -shift_idx if shift != 0 else -2
|
||||
|
||||
return model.t[begin_idx - 1 : end_idx]
|
||||
|
||||
|
||||
def val_relative_to_steady(model, val, steady_idx, shift=0, absolute=False):
|
||||
shift_idx = int(1 / model.dt * shift)
|
||||
begin_idx = model.strobe[1][steady_idx] - shift_idx
|
||||
end_idx = -shift_idx if shift != 0 else -2
|
||||
|
||||
final_value = val.slice(slice(begin_idx - 1, end_idx, 1))
|
||||
if not absolute:
|
||||
final_value = final_value - val.slice(begin_idx - 1)
|
||||
|
||||
return (model.t[begin_idx - 1 : end_idx], final_value)
|
||||
|
||||
|
||||
def timings(τ_c, τ_i):
|
||||
τ_th = (1 - 2 * τ_c) / 2
|
||||
τ_i_on = τ_th - 2 * τ_i
|
||||
timings_H = (0, τ_c, τ_c + τ_th, 2 * τ_c + τ_th)
|
||||
timings_L_hot = (τ_c, τ_c + τ_i, τ_c + τ_i + τ_i_on, τ_c + 2 * τ_i + τ_i_on)
|
||||
|
||||
timings_L_cold = tuple(time + timings_H[2] for time in timings_L_hot)
|
||||
|
||||
return timings_H, (timings_L_cold, timings_L_hot)
|
||||
|
||||
|
||||
def model_description(model):
|
||||
return model.description
|
||||
|
||||
|
||||
@pu.wrap_plot
|
||||
def plot_steady_energy_changes(
|
||||
models,
|
||||
steady_idx=2,
|
||||
label_fn=model_description,
|
||||
bath=None,
|
||||
ax=None,
|
||||
with_shift=False,
|
||||
shift_min_inter=False,
|
||||
):
|
||||
times, inters, systems = [], [], []
|
||||
for model in models:
|
||||
t, inter = val_relative_to_steady(
|
||||
model,
|
||||
(
|
||||
model.interaction_power().sum_baths()
|
||||
if bath is None
|
||||
else model.interaction_power().for_bath(bath)
|
||||
).integrate(model.t),
|
||||
steady_idx,
|
||||
shift=model.L_shift[0] if with_shift else 0,
|
||||
)
|
||||
t, sys = val_relative_to_steady(
|
||||
model,
|
||||
model.system_power().sum_baths().integrate(model.t),
|
||||
steady_idx,
|
||||
)
|
||||
|
||||
inters.append(inter)
|
||||
systems.append(sys)
|
||||
times.append(t)
|
||||
|
||||
if shift_min_inter:
|
||||
for i, inter in enumerate(inters):
|
||||
length = len(inter.value)
|
||||
inters[i] -= (inter.slice(slice(0, length // 3))).max.value
|
||||
|
||||
for inter, sys, t, model in zip(inters, systems, times, models):
|
||||
print(model.L_shift)
|
||||
_, _, (l, _) = pu.plot_with_σ(
|
||||
t,
|
||||
-1 * inter,
|
||||
ax=ax,
|
||||
label=rf"$W_\mathrm{{int}}$ {label_fn(model)}",
|
||||
linestyle="--",
|
||||
)
|
||||
pu.plot_with_σ(
|
||||
t,
|
||||
-1 * sys,
|
||||
ax=ax,
|
||||
label=rf"$W_\mathrm{{sys}}$ {label_fn(model)}",
|
||||
color=l[0].get_color(),
|
||||
)
|
||||
|
||||
ax.set_xlabel(r"$\tau$")
|
||||
ax.set_ylabel(r"$W$")
|
||||
ax.legend()
|
||||
|
||||
|
||||
def add_arrow(line, start_ind=None, direction="right", size=15, color=None):
|
||||
"""
|
||||
add an arrow to a line.
|
||||
|
||||
line: Line2D object
|
||||
position: x-position of the arrow. If None, mean of xdata is taken
|
||||
direction: 'left' or 'right'
|
||||
size: size of the arrow in fontsize points
|
||||
color: if None, line color is taken.
|
||||
"""
|
||||
if color is None:
|
||||
color = line.get_color()
|
||||
|
||||
xdata = line.get_xdata()
|
||||
ydata = line.get_ydata()
|
||||
|
||||
if direction == "right":
|
||||
end_ind = start_ind + 1
|
||||
else:
|
||||
end_ind = start_ind - 1
|
||||
|
||||
line.axes.annotate(
|
||||
"",
|
||||
xytext=(xdata[start_ind], ydata[start_ind]),
|
||||
xy=(xdata[end_ind], ydata[end_ind]),
|
||||
arrowprops=dict(arrowstyle="->", color=color),
|
||||
size=size,
|
||||
)
|
||||
|
||||
|
||||
@pu.wrap_plot
|
||||
def plot_state_change_diagram(modulation, value, phase_indices, ax=None):
|
||||
all_modulation = model.coupling_operators[bath].operator_norm(t)
|
||||
phase_indices = (
|
||||
np.array(model.coupling_operators[bath]._matrix._timings) * (len(t) - 1)
|
||||
).astype(np.int64)
|
||||
|
||||
modulation_windowed = all_modulation[phase_indices[0] : phase_indices[-1]]
|
||||
value_windowed = inter.value[phase_indices[0] : phase_indices[-1]]
|
||||
|
||||
ax.plot(modulation_windowed, value_windowed, linewidth=3, color="cornflowerblue")
|
||||
ax.add_collection(
|
||||
color_gradient(
|
||||
modulation_windowed, value_windowed, "cornflowerblue", "red", linewidth=3
|
||||
)
|
||||
)
|
||||
|
||||
for begin, end in zip(phase_indices[:-1], phase_indices[1:]):
|
||||
ax.scatter(modulation[begin], value[begin], zorder=100, marker=".", s=200)
|
||||
|
||||
for i, index in enumerate(phase_indices[:-1]):
|
||||
ax.text(
|
||||
modulation[index] + np.max(modulation) * 0.02,
|
||||
value[index] + np.max(np.abs(value)) * 0.01,
|
||||
str(i + 1),
|
||||
)
|
||||
|
||||
return fig, ax
|
||||
|
||||
|
||||
def get_modulation_and_value(model, operator, value, steady_idx=2):
|
||||
shift = 0
|
||||
timing_operator = operator
|
||||
while not isinstance(timing_operator, SmoothlyInterpolatdPeriodicMatrix):
|
||||
if isinstance(operator, Shift):
|
||||
shift = operator._delta
|
||||
timing_operator = operator._matrix
|
||||
|
||||
if isinstance(operator, DynamicMatrixSum):
|
||||
timing_operator = operator._left
|
||||
|
||||
t, value = val_relative_to_steady(model, value, steady_idx, absolute=True)
|
||||
|
||||
all_modulation = operator.operator_norm(t)
|
||||
all_modulation_deriv = operator.derivative().operator_norm(t)
|
||||
|
||||
timings = np.array(timing_operator._timings)
|
||||
phase_indices = (((timings + shift / model.Θ) % 1) * (len(t) - 1)).astype(np.int64)
|
||||
|
||||
values = np.zeros_like(all_modulation)
|
||||
np.divide(
|
||||
value.value, all_modulation, where=np.abs(all_modulation) > 1e-3, out=values
|
||||
),
|
||||
|
||||
return (
|
||||
values,
|
||||
all_modulation,
|
||||
phase_indices,
|
||||
)
|
||||
|
||||
|
||||
def plot_modulation_system_diagram(model, steady_idx):
|
||||
fig, ax = plt.subplots()
|
||||
|
||||
t, system = val_relative_to_steady(
|
||||
model,
|
||||
(model.system_energy().sum_baths()),
|
||||
steady_idx,
|
||||
)
|
||||
|
||||
modulation = model.H.operator_norm(t)
|
||||
ax.plot(modulation, system.value)
|
||||
ax.set_xlabel(r"$||H_\mathrm{S}||$")
|
||||
ax.set_ylabel(r"$\langle{H_\mathrm{S}}\rangle$")
|
||||
|
||||
return fig, ax
|
||||
|
||||
|
||||
def plot_steady_work_baths(models, steady_idx=2, label_fn=model_description):
|
||||
fig, ax = plt.subplots()
|
||||
|
||||
for model in models:
|
||||
t, inter_c = val_relative_to_steady(
|
||||
model,
|
||||
(model.interaction_power().for_bath(0)).integrate(model.t),
|
||||
steady_idx,
|
||||
)
|
||||
|
||||
t, inter_h = val_relative_to_steady(
|
||||
model,
|
||||
(model.interaction_power().for_bath(1)).integrate(model.t),
|
||||
steady_idx,
|
||||
)
|
||||
|
||||
pu.plot_with_σ(
|
||||
t,
|
||||
inter_c,
|
||||
ax=ax,
|
||||
label=rf"$W_\mathrm{{int, c}}$ {label_fn(model)}",
|
||||
)
|
||||
|
||||
pu.plot_with_σ(
|
||||
t,
|
||||
inter_h,
|
||||
ax=ax,
|
||||
label=rf"$W_\mathrm{{int, h}}$ {label_fn(model)}",
|
||||
linestyle="--",
|
||||
)
|
||||
ax.set_xlabel(r"$\tau$")
|
||||
ax.set_ylabel(r"$W$")
|
||||
ax.legend()
|
||||
|
||||
return fig, ax
|
||||
|
||||
|
||||
@pu.wrap_plot
|
||||
def plot_bloch_components(model, ax=None, **kwargs):
|
||||
with aux.get_data(model) as data:
|
||||
ρ = data.rho_t_accum.mean[:]
|
||||
σ_ρ = data.rho_t_accum.ensemble_std[:]
|
||||
|
||||
xs = np.einsum("tij,ji->t", ρ, qt.sigmax().full()).real
|
||||
ys = np.einsum("tij,ji->t", ρ, qt.sigmay().full()).real
|
||||
zs = np.einsum("tij,ji->t", ρ, qt.sigmaz().full()).real
|
||||
|
||||
ax.plot(
|
||||
model.t,
|
||||
zs,
|
||||
**(dict(label=r"$\langle \sigma_z\rangle$", color="C1") | kwargs),
|
||||
)
|
||||
ax.plot(
|
||||
model.t,
|
||||
xs,
|
||||
**(dict(label=r"$\langle \sigma_x\rangle$", color="C2") | kwargs),
|
||||
)
|
||||
ax.plot(
|
||||
model.t,
|
||||
ys,
|
||||
**(dict(label=r"$\langle \sigma_y\rangle$", color="C3") | kwargs),
|
||||
)
|
||||
ax.legend()
|
||||
ax.set_xlabel(r"$\tau$")
|
||||
|
||||
|
||||
@pu.wrap_plot
|
||||
def plot_energy_deviation(models, ax=None, labels=None):
|
||||
ax.set_xlabel(r"$\tau$")
|
||||
ax.set_ylabel(r"$\Delta||H||/\max||H||$")
|
||||
|
||||
for i, model in enumerate(models):
|
||||
ax.plot(
|
||||
model.t,
|
||||
abs(model.total_energy_from_power().value - model.total_energy().value)
|
||||
/ max(abs(model.total_energy_from_power().value)),
|
||||
label=labels[i] if labels else None,
|
||||
)
|
||||
|
||||
if labels:
|
||||
ax.legend()
|
||||
|
||||
|
||||
def max_energy_error(models, steady_idx=None):
|
||||
deviations = np.zeros(len(models))
|
||||
|
||||
for i, model in enumerate(models):
|
||||
if steady_idx is None:
|
||||
deviations[i] = abs(
|
||||
model.total_energy_from_power().value - model.total_energy().value
|
||||
).max()
|
||||
else:
|
||||
deviations[i] = abs(
|
||||
val_relative_to_steady(
|
||||
model, model.total_energy_from_power(), steady_idx=steady_idx
|
||||
)[1].value
|
||||
- val_relative_to_steady(
|
||||
model, model.total_energy(), steady_idx=steady_idx
|
||||
)[1].value
|
||||
).max()
|
||||
|
||||
return round(deviations.max() * 100)
|
|
@ -1,121 +0,0 @@
|
|||
import figsaver as fs
|
||||
import plot_utils as pu
|
||||
from hiro_models.one_qubit_model import StocProcTolerances
|
||||
from hiro_models.otto_cycle import OttoEngine
|
||||
import hiro_models.model_auxiliary as aux
|
||||
import numpy as np
|
||||
import qutip as qt
|
||||
import utilities as ut
|
||||
import stocproc
|
||||
import matplotlib.pyplot as plt
|
||||
import otto_utilities as ot
|
||||
|
||||
import ray
|
||||
ray.shutdown()
|
||||
|
||||
#ray.init(address='auto')
|
||||
ray.init()
|
||||
from hops.util.logging_setup import logging_setup
|
||||
import logging
|
||||
logging_setup(logging.INFO)
|
||||
plt.rcParams['figure.figsize'] = (12,4)
|
||||
|
||||
def timings(τ_c, τ_i, percent_overlap=0):
|
||||
τ_cI = τ_c * (1-percent_overlap)
|
||||
|
||||
τ_thI = (1 - 2 * τ_cI) / 2
|
||||
τ_th = (1 - 2 * τ_c) / 2
|
||||
τ_i_on = (τ_thI - 2*τ_i)
|
||||
timings_H = (0, τ_c, τ_c + τ_th, 2*τ_c + τ_th)
|
||||
|
||||
timings_L_hot = (τ_cI, τ_cI + τ_i, τ_cI + τ_i + τ_i_on, τ_cI + 2 * τ_i + τ_i_on)
|
||||
|
||||
timings_L_cold = tuple(time + timings_H[2] for time in timings_L_hot)
|
||||
|
||||
return timings_H, (timings_L_cold, timings_L_hot)
|
||||
|
||||
τ_mod, τ_I = 0.15, 0.15
|
||||
(p_H, p_L) = timings(τ_mod, τ_I, 0)
|
||||
prototype = OttoEngine(
|
||||
δ=[0.4, 0.4],
|
||||
ω_c=[2, 2],
|
||||
ψ_0=qt.basis([2], [1]),
|
||||
description=f"Classic Cycle",
|
||||
k_max=4,
|
||||
bcf_terms=[6] * 2,
|
||||
truncation_scheme="simplex",
|
||||
driving_process_tolerances=[StocProcTolerances(1e-3, 1e-3)] * 2,
|
||||
thermal_process_tolerances=[StocProcTolerances(1e-3, 1e-3)] * 2,
|
||||
T=[.5, 4],
|
||||
therm_methods=["tanhsinh", "tanhsinh"],
|
||||
Δ=1,
|
||||
num_cycles=4,
|
||||
Θ=1.5 / 0.05,
|
||||
dt=0.001,
|
||||
timings_H=p_H,
|
||||
timings_L=p_L,
|
||||
streaming_mode=True,
|
||||
shift_to_resonance=(False, True),
|
||||
L_shift=(0, 0),
|
||||
)
|
||||
ot.plot_cycle(prototype)
|
||||
|
||||
shifted_model = prototype.copy()
|
||||
(p_H, p_L) = timings(τ_mod, τ_I, 1)
|
||||
shifted_model.timings_H = p_H
|
||||
shifted_model.timings_L = p_L
|
||||
shifted_model.L_shift = (τ_mod, τ_mod)
|
||||
shifted_model.description="Decoupling Overlap"
|
||||
ot.plot_cycle(shifted_model)
|
||||
|
||||
left_shifted_model = prototype.copy()
|
||||
(p_H, p_L) = timings(τ_mod, τ_I, 1)
|
||||
left_shifted_model.timings_H = p_H
|
||||
left_shifted_model.timings_L = p_L
|
||||
left_shifted_model.L_shift = (0, 0)
|
||||
left_shifted_model.description="Coupling Overlap"
|
||||
ot.plot_cycle(left_shifted_model)
|
||||
|
||||
overlap_model = prototype.copy()
|
||||
(p_H, p_L) = timings(τ_mod, τ_I, 1)
|
||||
p_L = [list(timings) for timings in p_L]
|
||||
p_L[1][2] += τ_I
|
||||
p_L[1][3] += τ_I
|
||||
|
||||
p_L[0][0] -= τ_I
|
||||
p_L[0][1] -= τ_I
|
||||
overlap_model.timings_H = p_H
|
||||
overlap_model.timings_L = tuple(tuple(timing) for timing in p_L)
|
||||
overlap_model.L_shift = (τ_mod, 0)
|
||||
overlap_model.description="Full Overlap"
|
||||
ot.plot_cycle(overlap_model)
|
||||
|
||||
crazy_model = prototype.copy()
|
||||
(p_H, p_L) = timings(τ_mod, τ_I, 1)
|
||||
p_L = [list(timings) for timings in p_L]
|
||||
p_L[1][2] += τ_I
|
||||
p_L[1][3] += τ_I
|
||||
|
||||
p_L[0][0] -= τ_I
|
||||
p_L[0][1] -= τ_I
|
||||
crazy_model.timings_H = p_H
|
||||
crazy_model.timings_L = tuple(tuple(timing) for timing in p_L)
|
||||
crazy_model.L_shift = (τ_mod *2, τ_mod)
|
||||
crazy_model.description="Full Overlap with Shift"
|
||||
ot.plot_cycle(crazy_model)
|
||||
|
||||
less_crazy_model = shifted_model.copy()
|
||||
|
||||
|
||||
less_crazy_model.L_shift = (τ_mod *2, τ_mod*2)
|
||||
less_crazy_model.description="Large Shift without Overlap"
|
||||
ot.plot_cycle(less_crazy_model)
|
||||
|
||||
optimized_crazy_model = crazy_model.copy()
|
||||
|
||||
|
||||
optimized_crazy_model.L_shift = (τ_mod + 0.401813980810373, 0.302982197157591)
|
||||
optimized_crazy_model.description="Large Shift without Overlap"
|
||||
ot.plot_cycle(optimized_crazy_model)
|
||||
|
||||
models = [prototype, shifted_model, left_shifted_model, overlap_model, crazy_model, less_crazy_model]
|
2305
python/otto_motor/poetry.lock
generated
|
@ -1,2 +0,0 @@
|
|||
from overlap_vs_no_overlap import *
|
||||
ot.integrate_online_multi(models[-1:], 100_000, increment=10_000, analyze_kwargs=dict(every=10_000))
|
|
@ -1,124 +0,0 @@
|
|||
from speed_coupling_scan import *
|
||||
|
||||
taurus_path = "taurus"
|
||||
from hiro_models.model_auxiliary import import_results
|
||||
|
||||
import_results(
|
||||
other_data_path="./taurus/.data",
|
||||
other_results_path="./taurus/results",
|
||||
interactive=False,
|
||||
models_to_import=models,
|
||||
force=True,
|
||||
)
|
||||
|
||||
f, a = plt.subplots()
|
||||
|
||||
for model in models:
|
||||
Δs = (model.steady_index(observable=model.system_energy()))
|
||||
#Δ = (model.steady_index(observable=model.total_power(), fraction=.7))
|
||||
# for Δ in Δs[2:]:
|
||||
# pu.plot_with_σ(model.t[:model.strobe[1][1]], Δ, ax=a)
|
||||
#plt.plot(Δ)
|
||||
print(Δs)
|
||||
|
||||
try:
|
||||
# pu.plot_with_σ(model.t, model.total_energy_from_power().sum_baths(), ax=a, label=fr"$\delta={model.δ[0]}$, $\tau_I={model.timings_L[0][1] - model.timings_L[0][0]:.3}$")
|
||||
# pu.plot_with_σ(model.t, model.total_energy().sum_baths(), ax=a, label=fr"$\delta={model.δ[0]}$, $\tau_I={model.timings_L[0][1] - model.timings_L[0][0]:.3}$")
|
||||
pu.plot_with_σ(model.t, model.total_energy_from_power(), ax=a, label=fr"$\delta={model.δ[0]}$, $\tau_I={model.timings_L[0][1] - model.timings_L[0][0]:.3}$")
|
||||
pu.plot_with_σ(model.t, model.total_energy(), ax=a, label=fr"$\delta={model.δ[0]}$, $\tau_I={model.timings_L[0][1] - model.timings_L[0][0]:.3}$")
|
||||
print(model.system_energy().N)
|
||||
print(model.system_power().N)
|
||||
print(model.interaction_power().N)
|
||||
except:
|
||||
pass
|
||||
a.legend()
|
||||
|
||||
f, a =ot.plot_3d_heatmap(models, lambda model: -model.power(fraction=.3).value, lambda model: model.δ[0], lambda model: model.timings_L[0][1] - model.timings_L[0][0])
|
||||
a.set_xlabel(r"$\delta$")
|
||||
a.set_ylabel(r"$\tau_I$")
|
||||
a.set_zlabel(r"$P$")
|
||||
|
||||
f, a = plt.subplots()
|
||||
|
||||
for model in models:
|
||||
try:
|
||||
power = model.power(fraction=.5)
|
||||
a.plot(power.Ns, power.values, label=fr"$\delta={model.δ[0]}$, $\tau_I={model.timings_L[0][1] - model.timings_L[0][0]:.3}$")
|
||||
except:
|
||||
pass
|
||||
a.legend()
|
||||
|
||||
from speed_coupling_scan import *
|
||||
|
||||
taurus_path = "taurus"
|
||||
from hiro_models.model_auxiliary import import_results
|
||||
|
||||
import_results(
|
||||
other_data_path="./taurus/.data",
|
||||
other_results_path="./taurus/results",
|
||||
interactive=False,
|
||||
models_to_import=models,
|
||||
# force=True,
|
||||
)
|
||||
|
||||
f, a = plt.subplots()
|
||||
|
||||
for model in models:
|
||||
Δs = (model.steady_index(observable=model.system_energy()))
|
||||
#Δ = (model.steady_index(observable=model.total_power(), fraction=.7))
|
||||
# for Δ in Δs[2:]:
|
||||
# pu.plot_with_σ(model.t[:model.strobe[1][1]], Δ, ax=a)
|
||||
#plt.plot(Δ)
|
||||
print(Δs)
|
||||
|
||||
try:
|
||||
# pu.plot_with_σ(model.t, model.total_energy_from_power().sum_baths(), ax=a, label=fr"$\delta={model.δ[0]}$, $\tau_I={model.timings_L[0][1] - model.timings_L[0][0]:.3}$")
|
||||
# pu.plot_with_σ(model.t, model.total_energy().sum_baths(), ax=a, label=fr"$\delta={model.δ[0]}$, $\tau_I={model.timings_L[0][1] - model.timings_L[0][0]:.3}$")
|
||||
pu.plot_with_σ(model.t, model.total_energy_from_power(), ax=a, label=fr"$\delta={model.δ[0]}$, $\tau_I={model.timings_L[0][1] - model.timings_L[0][0]:.3}$")
|
||||
pu.plot_with_σ(model.t, model.total_energy(), ax=a, label=fr"$\delta={model.δ[0]}$, $\tau_I={model.timings_L[0][1] - model.timings_L[0][0]:.3}$")
|
||||
print(model.system_energy().N)
|
||||
print(model.system_power().N)
|
||||
print(model.interaction_power().N)
|
||||
except:
|
||||
pass
|
||||
a.legend()
|
||||
|
||||
f = plt.figure()
|
||||
a_power = f.add_subplot(121, projection='3d')
|
||||
a_efficiency = f.add_subplot(122, projection='3d')
|
||||
|
||||
ot.plot_3d_heatmap(
|
||||
models,
|
||||
lambda model: -model.power(fraction=0.5).value,
|
||||
lambda model: model.δ[0],
|
||||
lambda model: model.timings_L[0][1] - model.timings_L[0][0],
|
||||
normalize=True,
|
||||
ax=a_power,
|
||||
)
|
||||
a_power.set_xlabel(r"$\delta$")
|
||||
a_power.set_ylabel(r"$\tau_I$")
|
||||
a_power.set_zlabel(r"$P$ (normalized)")
|
||||
|
||||
ot.plot_3d_heatmap(
|
||||
models,
|
||||
lambda model: model.efficiency(fraction=0.5).value,
|
||||
lambda model: model.δ[0],
|
||||
lambda model: model.timings_L[0][1] - model.timings_L[0][0],
|
||||
ax=a_efficiency,
|
||||
)
|
||||
a_efficiency.set_xlabel(r"$\delta$")
|
||||
a_efficiency.set_ylabel(r"$\tau_I$")
|
||||
a_efficiency.set_zlabel(r"$\eta$")
|
||||
|
||||
fs.export_fig("coupling_speed_scan", fig=f)
|
||||
f
|
||||
|
||||
f, a = plt.subplots()
|
||||
|
||||
for model in models:
|
||||
try:
|
||||
power = model.power(fraction=.5)
|
||||
a.plot(power.Ns, power.values, label=fr"$\delta={model.δ[0]}$, $\tau_I={model.timings_L[0][1] - model.timings_L[0][0]:.3}$")
|
||||
except:
|
||||
pass
|
||||
a.legend()
|
|
@ -1,7 +0,0 @@
|
|||
from speed_coupling_scan import *
|
||||
|
||||
ot.integrate_online_multi(models, 10_000, increment=1000, analyze_kwargs=dict(every=100))
|
||||
|
||||
from speed_coupling_scan import *
|
||||
|
||||
ot.integrate_online_multi(models, 10_000, increment=1000, analyze_kwargs=dict(every=100))
|
|
@ -1,152 +0,0 @@
|
|||
import figsaver as fs
|
||||
import plot_utils as pu
|
||||
from hiro_models.one_qubit_model import StocProcTolerances
|
||||
from hiro_models.otto_cycle import OttoEngine
|
||||
import hiro_models.model_auxiliary as aux
|
||||
import numpy as np
|
||||
import qutip as qt
|
||||
import utilities as ut
|
||||
import stocproc
|
||||
import matplotlib.pyplot as plt
|
||||
import otto_utilities as ot
|
||||
|
||||
import ray
|
||||
ray.shutdown()
|
||||
|
||||
#ray.init(address='auto')
|
||||
ray.init()
|
||||
from hops.util.logging_setup import logging_setup
|
||||
import logging
|
||||
logging_setup(logging.INFO)
|
||||
plt.rcParams['figure.figsize'] = (12,4)
|
||||
|
||||
def timings(τ_c, τ_i):
|
||||
τ_th = (1 - 2 * τ_c) / 2
|
||||
τ_i_on = (τ_th - 2*τ_i)
|
||||
timings_H = (0, τ_c, τ_c + τ_th, 2*τ_c + τ_th)
|
||||
timings_L_hot = (τ_c, τ_c + τ_i, τ_c + τ_i + τ_i_on, τ_c + 2 * τ_i + τ_i_on)
|
||||
|
||||
timings_L_cold = tuple(time + timings_H[2] for time in timings_L_hot)
|
||||
|
||||
return timings_H, (timings_L_cold, timings_L_hot)
|
||||
|
||||
(p_H, p_L) = timings(0.1, 0.3)
|
||||
prototype = OttoEngine(
|
||||
δ=[0.4, 0.4],
|
||||
ω_c=[2, 2],
|
||||
ψ_0=qt.basis([2], [1]),
|
||||
description=f"A model for scanning coupling strength and interactin switch times.",
|
||||
k_max=4,
|
||||
bcf_terms=[6] * 2,
|
||||
truncation_scheme="simplex",
|
||||
driving_process_tolerances=[StocProcTolerances(1e-3, 1e-3)] * 2,
|
||||
thermal_process_tolerances=[StocProcTolerances(1e-3, 1e-3)] * 2,
|
||||
# driving_process_tolerances=[StocProcTolerances(1e-5, 1e-5)] * 2,
|
||||
# thermal_process_tolerances=[StocProcTolerances(1e-5, 1e-5)] * 2,
|
||||
T=[1, 4],
|
||||
therm_methods=["tanhsinh", "tanhsinh"],
|
||||
Δ=1,
|
||||
num_cycles=5,
|
||||
Θ=1.5 / 0.05,
|
||||
dt=0.01/8,
|
||||
timings_H=p_H,
|
||||
timings_L=p_L,
|
||||
streaming_mode=True,
|
||||
shift_to_resonance=(False, False),
|
||||
)
|
||||
|
||||
δs = np.round(np.linspace(.3, .5, 3), 3)
|
||||
τ_Is = np.array([# .05,
|
||||
.1, .15, .2])
|
||||
δs, τ_Is
|
||||
|
||||
models = []
|
||||
|
||||
import itertools
|
||||
|
||||
for τ_I, δ in itertools.product(τ_Is, δs):
|
||||
(p_H, p_L) = timings(0.1, τ_I)
|
||||
|
||||
model = prototype.copy()
|
||||
model.δ = [δ, δ]
|
||||
model.timings_H = p_H
|
||||
model.timings_L = p_L
|
||||
models.append(model)
|
||||
|
||||
|
||||
ot.plot_cycles(models[:: len(δs)])
|
||||
|
||||
import figsaver as fs
|
||||
import plot_utils as pu
|
||||
from hiro_models.one_qubit_model import StocProcTolerances
|
||||
from hiro_models.otto_cycle import OttoEngine
|
||||
import hiro_models.model_auxiliary as aux
|
||||
import numpy as np
|
||||
import qutip as qt
|
||||
import utilities as ut
|
||||
import stocproc
|
||||
import matplotlib.pyplot as plt
|
||||
import otto_utilities as ot
|
||||
|
||||
import ray
|
||||
ray.shutdown()
|
||||
|
||||
#ray.init(address='auto')
|
||||
ray.init()
|
||||
from hops.util.logging_setup import logging_setup
|
||||
import logging
|
||||
logging_setup(logging.INFO)
|
||||
plt.rcParams['figure.figsize'] = (12,4)
|
||||
|
||||
def timings(τ_c, τ_i):
|
||||
τ_th = (1 - 2 * τ_c) / 2
|
||||
τ_i_on = (τ_th - 2*τ_i)
|
||||
timings_H = (0, τ_c, τ_c + τ_th, 2*τ_c + τ_th)
|
||||
timings_L_hot = (τ_c, τ_c + τ_i, τ_c + τ_i + τ_i_on, τ_c + 2 * τ_i + τ_i_on)
|
||||
|
||||
timings_L_cold = tuple(time + timings_H[2] for time in timings_L_hot)
|
||||
|
||||
return timings_H, (timings_L_cold, timings_L_hot)
|
||||
|
||||
(p_H, p_L) = timings(0.1, 0.3)
|
||||
prototype = OttoEngine(
|
||||
δ=[0.4, 0.4],
|
||||
ω_c=[2, 2],
|
||||
ψ_0=qt.basis([2], [1]),
|
||||
description=f"A model for scanning coupling strength and interactin switch times.",
|
||||
k_max=4,
|
||||
bcf_terms=[6] * 2,
|
||||
truncation_scheme="simplex",
|
||||
driving_process_tolerances=[StocProcTolerances(1e-3, 1e-3)] * 2,
|
||||
thermal_process_tolerances=[StocProcTolerances(1e-3, 1e-3)] * 2,
|
||||
# driving_process_tolerances=[StocProcTolerances(1e-5, 1e-5)] * 2,
|
||||
# thermal_process_tolerances=[StocProcTolerances(1e-5, 1e-5)] * 2,
|
||||
T=[1, 4],
|
||||
therm_methods=["tanhsinh", "tanhsinh"],
|
||||
Δ=1,
|
||||
num_cycles=5,
|
||||
Θ=1.5 / 0.05,
|
||||
dt=0.01/8,
|
||||
timings_H=p_H,
|
||||
timings_L=p_L,
|
||||
streaming_mode=True,
|
||||
shift_to_resonance=(False, False),
|
||||
)
|
||||
|
||||
δs = np.round(np.linspace(.3, .5, 3), 3)
|
||||
τ_Is = np.array([# .05,
|
||||
.1, .15, .2])
|
||||
δs, τ_Is
|
||||
|
||||
models = []
|
||||
|
||||
import itertools
|
||||
|
||||
for τ_I, δ in itertools.product(τ_Is, δs):
|
||||
(p_H, p_L) = timings(0.1, τ_I)
|
||||
|
||||
model = prototype.copy()
|
||||
model.δ = [δ, δ]
|
||||
model.timings_H = p_H
|
||||
model.timings_L = p_L
|
||||
models.append(model)
|
|
@ -1 +0,0 @@
|
|||
sshfs -oIdentityFile=~/.ssh/id_ed25519_taurus s8896854@taurusexport.hrsk.tu-dresden.de:/beegfs/ws/0/s8896854-ot_cpl/project/python/otto_motor/subprojects/cycle_shift taurus
|
|
@ -1 +0,0 @@
|
|||
../figsaver.py
|
|
@ -1 +0,0 @@
|
|||
../plot_utils.py
|
|
@ -1,164 +0,0 @@
|
|||
import figsaver as fs
|
||||
import plot_utils as pu
|
||||
from hiro_models.one_qubit_model import StocProcTolerances
|
||||
from hiro_models.otto_cycle import OttoEngine
|
||||
import hiro_models.model_auxiliary as aux
|
||||
import numpy as np
|
||||
import qutip as qt
|
||||
import utilities as ut
|
||||
import stocproc
|
||||
import matplotlib.pyplot as plt
|
||||
import otto_utilities as ot
|
||||
|
||||
import ray
|
||||
ray.shutdown()
|
||||
|
||||
#ray.init(address='auto')
|
||||
ray.init()
|
||||
from hops.util.logging_setup import logging_setup
|
||||
import logging
|
||||
logging_setup(logging.INFO)
|
||||
plt.rcParams['figure.figsize'] = (12,4)
|
||||
|
||||
def timings(τ_c, τ_i, percent_overlap=0):
|
||||
τ_cI = τ_c * (1-percent_overlap)
|
||||
|
||||
τ_thI = (1 - 2 * τ_cI) / 2
|
||||
τ_th = (1 - 2 * τ_c) / 2
|
||||
τ_i_on = (τ_thI - 2*τ_i)
|
||||
timings_H = (0, τ_c, τ_c + τ_th, 2*τ_c + τ_th)
|
||||
|
||||
timings_L_hot = (τ_cI, τ_cI + τ_i, τ_cI + τ_i + τ_i_on, τ_cI + 2 * τ_i + τ_i_on)
|
||||
|
||||
timings_L_cold = tuple(time + timings_H[2] for time in timings_L_hot)
|
||||
|
||||
return timings_H, (timings_L_cold, timings_L_hot)
|
||||
|
||||
τ_mod, τ_I = 0.1, 0.1
|
||||
(p_H, p_L) = timings(τ_mod, τ_I, .5)
|
||||
prototype = OttoEngine(
|
||||
δ=[0.4, 0.4],
|
||||
ω_c=[2, 2],
|
||||
ψ_0=qt.basis([2], [1]),
|
||||
description=f"A model for scanning coupling strength and interactin switch times.",
|
||||
k_max=4,
|
||||
bcf_terms=[6] * 2,
|
||||
truncation_scheme="simplex",
|
||||
driving_process_tolerances=[StocProcTolerances(1e-3, 1e-3)] * 2,
|
||||
thermal_process_tolerances=[StocProcTolerances(1e-3, 1e-3)] * 2,
|
||||
T=[1, 4],
|
||||
therm_methods=["tanhsinh", "tanhsinh"],
|
||||
Δ=1,
|
||||
num_cycles=4,
|
||||
Θ=1.5 / 0.05,
|
||||
dt=0.01/8,
|
||||
timings_H=p_H,
|
||||
timings_L=p_L,
|
||||
streaming_mode=True,
|
||||
shift_to_resonance=(False, False),
|
||||
L_shift=(0.0, 0.0),
|
||||
)
|
||||
ot.plot_cycle(prototype)
|
||||
|
||||
overlaps = np.round(np.linspace(0, 1, 3), 3)
|
||||
shifts = np.round(np.linspace(0, τ_mod, 3), 3)
|
||||
|
||||
models = []
|
||||
|
||||
import itertools
|
||||
|
||||
for overlap, shift in itertools.product(overlaps, shifts):
|
||||
print(overlap, shift)
|
||||
(p_H, p_L) = timings(τ_mod, τ_I, overlap)
|
||||
|
||||
model = prototype.copy()
|
||||
model.timings_H = p_H
|
||||
model.timings_L = p_L
|
||||
model.L_shift = (shift, shift)
|
||||
models.append(model)
|
||||
|
||||
|
||||
ot.plot_cycles(models)
|
||||
|
||||
import figsaver as fs
|
||||
import plot_utils as pu
|
||||
from hiro_models.one_qubit_model import StocProcTolerances
|
||||
from hiro_models.otto_cycle import OttoEngine
|
||||
import hiro_models.model_auxiliary as aux
|
||||
import numpy as np
|
||||
import qutip as qt
|
||||
import utilities as ut
|
||||
import stocproc
|
||||
import matplotlib.pyplot as plt
|
||||
import otto_utilities as ot
|
||||
|
||||
import ray
|
||||
ray.shutdown()
|
||||
|
||||
#ray.init(address='auto')
|
||||
ray.init()
|
||||
from hops.util.logging_setup import logging_setup
|
||||
import logging
|
||||
logging_setup(logging.INFO)
|
||||
plt.rcParams['figure.figsize'] = (12,4)
|
||||
|
||||
def timings(τ_c, τ_i, percent_overlap=0):
|
||||
τ_cI = τ_c * (1-percent_overlap)
|
||||
|
||||
τ_thI = (1 - 2 * τ_cI) / 2
|
||||
τ_th = (1 - 2 * τ_c) / 2
|
||||
τ_i_on = (τ_thI - 2*τ_i)
|
||||
timings_H = (0, τ_c, τ_c + τ_th, 2*τ_c + τ_th)
|
||||
|
||||
timings_L_hot = (τ_cI, τ_cI + τ_i, τ_cI + τ_i + τ_i_on, τ_cI + 2 * τ_i + τ_i_on)
|
||||
|
||||
timings_L_cold = tuple(time + timings_H[2] for time in timings_L_hot)
|
||||
|
||||
return timings_H, (timings_L_cold, timings_L_hot)
|
||||
|
||||
τ_mod, τ_I = 0.1, 0.1
|
||||
(p_H, p_L) = timings(τ_mod, τ_I, .5)
|
||||
prototype = OttoEngine(
|
||||
δ=[0.4, 0.4],
|
||||
ω_c=[2, 2],
|
||||
ψ_0=qt.basis([2], [1]),
|
||||
description=f"A model for scanning coupling strength and interactin switch times.",
|
||||
k_max=4,
|
||||
bcf_terms=[6] * 2,
|
||||
truncation_scheme="simplex",
|
||||
driving_process_tolerances=[StocProcTolerances(1e-3, 1e-3)] * 2,
|
||||
thermal_process_tolerances=[StocProcTolerances(1e-3, 1e-3)] * 2,
|
||||
T=[1, 4],
|
||||
therm_methods=["tanhsinh", "tanhsinh"],
|
||||
Δ=1,
|
||||
num_cycles=4,
|
||||
Θ=1.5 / 0.05,
|
||||
dt=0.01/8,
|
||||
timings_H=p_H,
|
||||
timings_L=p_L,
|
||||
streaming_mode=True,
|
||||
shift_to_resonance=(False, False),
|
||||
L_shift=(0.0, 0.0),
|
||||
)
|
||||
ot.plot_cycle(prototype)
|
||||
|
||||
overlaps = np.round(np.linspace(0, 1, 3), 3)
|
||||
shifts = np.round(np.linspace(0, τ_mod, 3), 3)
|
||||
|
||||
models = []
|
||||
|
||||
import itertools
|
||||
|
||||
for overlap, shift in itertools.product(overlaps, shifts):
|
||||
print(overlap, shift)
|
||||
(p_H, p_L) = timings(τ_mod, τ_I, overlap)
|
||||
|
||||
model = prototype.copy()
|
||||
model.timings_H = p_H
|
||||
model.timings_L = p_L
|
||||
model.L_shift = (shift, shift)
|
||||
models.append(model)
|
||||
|
||||
|
||||
ot.plot_cycles(models)
|
||||
fs.export_fig("timing_scan_cycles")
|
|
@ -1,3 +0,0 @@
|
|||
ot.integrate_online_multi(models, 10_000, increment=2000, analyze_kwargs=dict(every=100), data_path=".data_timing", results_path="results_timing")
|
||||
|
||||
ot.integrate_online_multi(models, 10_000, increment=2000, analyze_kwargs=dict(every=100), data_path=".data_timing", results_path="results_timing")
|
|
@ -1 +0,0 @@
|
|||
../utilities.py
|
Before Width: | Height: | Size: 43 KiB After Width: | Height: | Size: 43 KiB |
Before Width: | Height: | Size: 46 KiB After Width: | Height: | Size: 46 KiB |
Before Width: | Height: | Size: 2.2 MiB After Width: | Height: | Size: 2.2 MiB |
Before Width: | Height: | Size: 2.2 MiB After Width: | Height: | Size: 2.2 MiB |
Before Width: | Height: | Size: 28 KiB After Width: | Height: | Size: 28 KiB |
Before Width: | Height: | Size: 260 KiB After Width: | Height: | Size: 260 KiB |
Before Width: | Height: | Size: 2.2 MiB After Width: | Height: | Size: 2.2 MiB |
Before Width: | Height: | Size: 3.8 MiB After Width: | Height: | Size: 3.8 MiB |
Before Width: | Height: | Size: 34 KiB After Width: | Height: | Size: 34 KiB |
Before Width: | Height: | Size: 3.8 MiB After Width: | Height: | Size: 3.8 MiB |
Before Width: | Height: | Size: 2.9 MiB After Width: | Height: | Size: 2.9 MiB |
Before Width: | Height: | Size: 53 KiB After Width: | Height: | Size: 53 KiB |
Before Width: | Height: | Size: 89 KiB After Width: | Height: | Size: 89 KiB |
Before Width: | Height: | Size: 38 KiB After Width: | Height: | Size: 38 KiB |
Before Width: | Height: | Size: 31 KiB After Width: | Height: | Size: 31 KiB |
Before Width: | Height: | Size: 30 KiB After Width: | Height: | Size: 30 KiB |
Before Width: | Height: | Size: 80 KiB After Width: | Height: | Size: 80 KiB |
Before Width: | Height: | Size: 1.5 MiB After Width: | Height: | Size: 1.5 MiB |
Before Width: | Height: | Size: 26 KiB After Width: | Height: | Size: 26 KiB |
Before Width: | Height: | Size: 954 KiB After Width: | Height: | Size: 954 KiB |
Before Width: | Height: | Size: 32 KiB After Width: | Height: | Size: 32 KiB |
Before Width: | Height: | Size: 30 KiB After Width: | Height: | Size: 30 KiB |
Before Width: | Height: | Size: 21 KiB After Width: | Height: | Size: 21 KiB |