Skip to content

Commit

Permalink
Added numerically stable way to compute Chebyshev coefficients for 1/…
Browse files Browse the repository at this point in the history
…x, and modified tests, plotting methods, and specifications accordingly. README updates with examples of use forthcoming.
  • Loading branch information
white-noise committed Sep 6, 2024
1 parent edec9b7 commit da719b6
Show file tree
Hide file tree
Showing 6 changed files with 144 additions and 48 deletions.
6 changes: 2 additions & 4 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -169,17 +169,15 @@ We can also plot the response given by a given QSP angle sequence, e.g. using:

> :construction: Here we provide some discussion of the recently added (as of `09/01/2024`) method for computing QSP phases using iterative methods for symmetric QSP protocols.
<!-- Currently in progress. -->

Newly added methods related to the theory of symmetric quantum signal processing allow one to quickly determine, by an iterative quasi-Newton method, the phases corresponding to useful classes of functions entirely subsuming those discussed previously. These methods are double-precision limited, numerically stable, and fast even for high-degree polynomials. Currently these `symmetric_qsp` methods are contained in `pyqsp/sym_qsp_opt.py`, and we have prepared a temporary plotting module within `pyqsp/sym_qsp_plotting.py` which can be run by itself with `python sym_qsp_plotting.py` to generate some common examples and illustrate common calling/plotting patterns.

For instance, the current file returns approximations to cosine, sine, and a step function, of which we reproduce the first and third plots below.

![QSP response function trigonometric cosine](https://github.com/ichuang/pyqsp/blob/master/docs/ex_cosine_approximation.png)
![QSP response function approximating trigonometric cosine](https://github.com/ichuang/pyqsp/blob/master/docs/ex_cosine_approximation.png)

As the quality of the approximation is quite high, causing the three intended plots to superpose, we include a logarithmic plot of the pairwise difference between the plotted values, indicating near-machine-precision limited performance.

![QSP response function for step function](https://github.com/ichuang/pyqsp/blob/master/docs/ex_step_approximation.png)
![QSP response function approximating a step function](https://github.com/ichuang/pyqsp/blob/master/docs/ex_step_approximation.png)

As in the case of trigonometric cosine, the step function's approximation is also excellent, and far more forgiving in its generation than with the earlier `laurent` method.

Expand Down
16 changes: 9 additions & 7 deletions pyqsp/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ def CommandLine(args=None, arglist=None):
pyqsp --poly=-1,0,2 poly2angles
pyqsp --poly=-1,0,2 --plot poly2angles
pyqsp --signal_operator=Wz --poly=0,0,0,1 --plot poly2angles
pyqsp --signal_operator=Wz --poly=0,0,0,1 --plot poly2angles
pyqsp --plot --tau 10 hamsim
pyqsp --plot --tolerance=0.01 --seqargs 3 invert
pyqsp --plot-npts=4000 --plot-positive-only --plot-magnitude --plot --seqargs=1000,1.0e-20 --seqname fpsearch angles
Expand All @@ -61,6 +61,8 @@ def CommandLine(args=None, arglist=None):
pyqsp --plot-positive-only --plot-real-only --plot --polyargs 20,3.5 --polyname gibbs --plot-qsp-model poly
pyqsp --polydeg 16 --measurement="z" --func="-1+np.sign(1/np.sqrt(2)-x)+ np.sign(1/np.sqrt(2)+x)" --plot polyfunc
## Currently deprecated: pyqsp --plot --cosine=1.0, 1e-12 ##
""".format(version)

parser = argparse.ArgumentParser(
Expand Down Expand Up @@ -108,7 +110,7 @@ def float_list(value):
action="store_true")
parser.add_argument(
"--poly",
help="comma delimited list of floating-point coeficients for polynomial, as const, a, a^2, ...",
help="comma delimited list of floating-point coefficients for polynomial, as const, a, a^2, ...",
action="store",
type=float_list)
parser.add_argument(
Expand Down Expand Up @@ -141,7 +143,7 @@ def float_list(value):
type=float_list)
parser.add_argument(
"--polyname",
help="name of polynomial generate using the 'poly' command, e.g. 'sign'",
help="name of polynomial generated using the 'poly' command, e.g. 'sign'",
type=str,
default=None)
parser.add_argument(
Expand Down Expand Up @@ -214,7 +216,7 @@ def float_list(value):
default=5000)
parser.add_argument(
"--npts-theta",
help="number of discretized values of theta to use in tensorflow optimization",
help="number of discretized values of theta to use in TensorFlow optimization",
type=int,
default=30)

Expand Down Expand Up @@ -243,7 +245,7 @@ def float_list(value):
coefs = args.poly
if not coefs:
print(
f"[pyqsp.main] must specify polynomial coeffients using --poly, e.g. --poly -1,0,2")
f"[pyqsp.main] must specify polynomial coefficients using --poly, e.g. --poly -1,0,2")
sys.exit(0)
if isinstance(coefs, str):
coefs = list(map(float, coefs.split(",")))
Expand Down Expand Up @@ -271,7 +273,7 @@ def float_list(value):
phiset,
target=lambda x: scale * np.cos(args.seqargs[0] * x),
signal_operator="Wx",
title="Hamiltonian Simultation (Cosine)",
title="Hamiltonian Simulation (Cosine)",
**plot_args)

pg = pyqsp.poly.PolySineTX()
Expand All @@ -287,7 +289,7 @@ def float_list(value):
phiset,
target=lambda x: scale * np.sin(args.seqargs[0] * x),
signal_operator="Wx",
title="Hamiltonian Simultation (Sine)",
title="Hamiltonian Simulation (Sine)",
**plot_args)

elif args.cmd == "fpsearch":
Expand Down
29 changes: 26 additions & 3 deletions pyqsp/poly.py
Original file line number Diff line number Diff line change
Expand Up @@ -205,7 +205,10 @@ def generate(
Approximation to 1/x polynomial, using sums of Chebyshev polynomials,
from Quantum algorithm for systems of linear equations with exponentially
improved dependence on precision, by Childs, Kothari, and Somma,
https://arxiv.org/abs/1511.02306v2
https://arxiv.org/abs/1511.02306v2.
Note in the above paper, Lemma 14 (page 16), that the given function is
2*epsilon-close to the desired function over the region specified below.
Define region D_kappa to be from 1/kappa to 1, and from -1/kappa to -1. A good
approximation is desired only in this region.
Expand All @@ -216,6 +219,8 @@ def generate(
j0 = int(np.sqrt(b * np.log(4 * b / epsilon)))
print(f"b={b}, j0={j0}")

"""
# Analytic form for inverse function; note large integer divisions.
g = np.polynomial.chebyshev.Chebyshev([0])
for j in range(j0 + 1):
gcoef = 0
Expand All @@ -225,14 +230,32 @@ def generate(
g += (-1)**j * gcoef * \
np.polynomial.chebyshev.Chebyshev([0] * deg + [1])
g = 4 * g
"""

# Iterative subroutine replacing above block to avoid integer overflow.
# Following analytic form of Lemma 18 from https://arxiv.org/abs/1511.02306v2.
# I.e., (1 - (1 - x**2)**b)/x, as per (77) on page (19) in cited work.
g = np.polynomial.chebyshev.Chebyshev([1])
for j in range(b):
g *= np.polynomial.chebyshev.Chebyshev([0.5, 0, -0.5])
g = -1*g
g += np.polynomial.chebyshev.Chebyshev([1])

# Perform polynomial division; remainder is trivial and ignored.
g_coef = g.coef
div_result = np.polynomial.chebyshev.chebdiv(g_coef, [0, 1])[0]

# Replace g with its divided value.
g = np.polynomial.chebyshev.Chebyshev(div_result)

if ensure_bounded:
# TODO: determine the meaning of this minimization.
res = scipy.optimize.minimize(g, (-0.1,), bounds=[(-0.8, 0.8)])
pmin = res.x
print(
f"[PolyOneOverX] minimum {g(pmin)} is at {pmin}: normalizing")
scale = 1 / abs(g(pmin))
if 0:
if 1: # Choose to be less constrained
scale = scale * 0.9
else:
scale = scale * 0.5
Expand All @@ -256,7 +279,7 @@ def generate(
class PolyOneOverXRect(PolyGenerator):

def help(self):
return "Region of validity is from 1/kappa to 1, and from -1/kappa to -1. Error is epsilon"
return "Region of validity is from 1/kappa to 1, and from -1/kappa to -1. Error is epsilon."

def generate(
self,
Expand Down
130 changes: 98 additions & 32 deletions pyqsp/sym_qsp_plotting.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,9 +23,8 @@ def main():
cos_fun = lambda x: np.polynomial.chebyshev.chebval(x, pcoefs)

# Initialize definite parity coefficients, and slice out nontrivial ones.
full_coef = pcoefs
parity = 0
coef = full_coef[parity::2]
coef = pcoefs[parity::2]

# Anonymous function for ideal polynomial from Chebyshev coefficients.
true_fun = lambda x: 0.5*np.cos(freq*x)
Expand Down Expand Up @@ -53,7 +52,7 @@ def main():
# Standard plotting of relevant components.
axs[0].plot(samples, im_vals, 'r', label="QSP poly")
axs[0].plot(samples, cos_vals, 'b', label="Target poly")
axs[0].plot(samples, des_vals, 'g', label="Ideal fun")
axs[0].plot(samples, des_vals, 'g', label="Ideal function")
# plt.plot(samples, re_vals, 'r', label="Real") # Unimportant real component.

diff = np.abs(im_vals - cos_vals)
Expand Down Expand Up @@ -96,7 +95,7 @@ def main():
pcoefs = pg.generate(tau=freq, epsilon=1e-12, chebyshev_basis=True)

# Generate anonymous function (approx to cosine) using pcoefs.
cos_fun = lambda x: np.polynomial.chebyshev.chebval(x, pcoefs)
sin_fun = lambda x: np.polynomial.chebyshev.chebval(x, pcoefs)

# Initialize definite parity coefficients, and slice out nontrivial ones.
full_coef = pcoefs
Expand All @@ -120,20 +119,20 @@ def main():

# Map the desired function and achieved function over samples.
des_vals = np.array(list(map(true_fun, samples)))
cos_vals = np.array(list(map(cos_fun, samples)))
sin_vals = np.array(list(map(sin_fun, samples)))

# Generate simultaneous plots.
fig, axs = plt.subplots(2,sharex=True)
fig.suptitle('Approximating sine with QSP to machine precision')

# Standard plotting of relevant components.
axs[0].plot(samples, im_vals, 'r', label="QSP poly")
axs[0].plot(samples, cos_vals, 'b', label="Target poly")
axs[0].plot(samples, sin_vals, 'b', label="Target poly")
axs[0].plot(samples, des_vals, 'g', label="Ideal fun")
# plt.plot(samples, re_vals, 'r', label="Real") # Unimportant real component.

diff = np.abs(im_vals - cos_vals)
true_diff = np.abs(des_vals - cos_vals)
diff = np.abs(im_vals - sin_vals)
true_diff = np.abs(des_vals - sin_vals)
total_diff = np.abs(im_vals - des_vals)
axs[1].plot(samples, diff, 'r', label="Approx vs QSP")
axs[1].plot(samples, true_diff, 'g', label="Approx vs true")
Expand All @@ -160,79 +159,146 @@ def main():

#######################################################
# #
# SIGN APPROX #
# INVERSE APPROX #
# #
#######################################################

"""
Here we include some notes on how to choose the proper number of phases, sample points, and polynomial degree when determining good QSP protocols for piecewise continous functions.
# Generate inverse polynomial approximation
pg = poly.PolyOneOverX()

# Underlying parameters of inverse approximation.
# We use return_scale=True for ease of plotting correct desired function.
kappa=5
epsilon=0.01
pcoefs, scale = pg.generate(kappa=kappa, epsilon=epsilon, chebyshev_basis=True, return_scale=True)

# Generate anonymous approximation and ideal function for scaled reciprocal.
inv_fun = lambda x: np.polynomial.chebyshev.chebval(x, pcoefs)
ideal_fun = lambda x: scale*np.reciprocal(x)

# Using odd parity and instantiating desired coefficeints.
parity = 1
coef = pcoefs[parity::2]

# Optimize to the desired function using Newton solver.
crit = 1e-12
(phases, err, total_iter, qsp_seq_opt) = sym_qsp_opt.newton_Solver(coef, parity, crit=crit)
print("phase len: %s\nerror: %s\niter: %s\n"%(str(len(phases)), str(err), str(total_iter)))

# Generate samples for plotting.
num_samples = 400
sample_0 = np.linspace(-1, -1.0/10000,num=num_samples)
sample_1 = np.linspace(1.0/10000,1,num=num_samples)
# Adding NaN between ranges to remove plotting artifacts.
samples = np.concatenate((sample_0, [float('NaN')], sample_1))

We know that the degree of the polynomial should be greater than the number of sampling points, as otherwise we can directly interpolate these points and will get a bad fit.
# Grab im part of QSP unitary top-left matrix element.
im_vals = np.array(qsp_seq_opt.gen_response_im(samples))

# Generate plotted values.
approx_vals = np.array(list(map(inv_fun, samples)))
# NOTE: For some reason this map casts to have an additional dimension.
ideal_vals = np.array(list(map(ideal_fun, samples)))[:,0]

fig, axs = plt.subplots(2, sharex=True)
fig.suptitle('Approximating $1/x$ with QSP on $[-1,-1/5]\\cup[1/5, 1]$')

# Plot achieved QSP protocol along with approximating polynomial.
axs[0].plot(samples, approx_vals, 'r', label="Poly approx")
axs[0].plot(samples, im_vals, 'g', label="Poly QSP")
axs[0].plot(samples, ideal_vals, 'b', label="True function")

# Plot difference between two on log-plot
diff = np.abs(im_vals - approx_vals)
approx_diff = np.abs(ideal_vals - approx_vals)

# Plot QSP output polynomial versus desired polynomial, and error bound.
axs[1].plot(samples, diff, 'r', label="Approx vs QSP")
axs[1].plot(samples, [crit]*len(samples), 'y', label="QSP error limit")

# Plot approximation versus ideal function, and error bound.
axs[1].plot(samples, approx_diff, 'g', label="Approx vs ideal")
axs[1].plot(samples, [epsilon]*len(samples), 'b', label="Approx error limit")
axs[1].set_yscale('log')

# Set axis limits and quality of life features.
axs[0].set_xlim([-1, 1])
axs[0].set_ylim([-1, 1])
axs[0].set_ylabel("Component value")
axs[1].set_ylabel("Absolute error")
axs[1].set_xlabel('Input signal')

We also know that the degree of the polynomial should roughly match the scaling suggested by the ultimate uniform error and slew rates specified for the fit (e.g., the delta and epsilon parameters used for a given bandpass filter.).
# Further cosmetic alterations
axs[0].spines['top'].set_visible(False)
axs[0].spines['right'].set_visible(False)
axs[1].spines['top'].set_visible(False)
axs[1].spines['right'].set_visible(False)

For the sign function below the degree is hardcoded, and the error function being compared against depends on delta; note there is no explicit epsilon dependence, other than the one baked into the degree. Currently the chebyshev samples used do not depend on the degree, but could be chosen to be something like 2*degree if unspecified (currently not permitted).
axs[0].legend(loc="upper right")
axs[1].legend(loc="upper right")

Otherwise, the max_scale is currently hardcoded in the call of the function. We could relax this (defaulting to 0.9) and checking somewhere that it is not within some small epsilon of 1, given the known performance of the iterative algorithm even for large infinity norm.
axs[0].axvspan(-1.0/kappa, 1/kappa, alpha=0.1, color='black',lw=0)
axs[1].axvspan(-1.0/kappa, 1/kappa, alpha=0.1, color='black',lw=0)

plt.show()

"""
#######################################################
# #
# SIGN APPROX #
# #
#######################################################

# Call existing methods to compute approximation to rect.
freq = 16
pg = poly.PolySign()

# TODO: note that definition of PolySign has been changed to return bare pcoefs and not TargetPolynomial
pcoefs = pg.generate(
pcoefs, scale = pg.generate(
degree=161,
delta=25,
chebyshev_basis=True,
cheb_samples=250)
# Cast from TargetPolynomial class bare Chebyshev coefficients.
pcoefs = pcoefs.coef
cheb_samples=250,
return_scale=True)
# Cast from TargetPolynomial class bare Chebyshev coefficients if not using return_scale.
# pcoefs = pcoefs.coef

# Generate anonymous function (approx to cosine) using pcoefs.
cos_fun = lambda x: np.polynomial.chebyshev.chebval(x, pcoefs)
sign_fun = lambda x: np.polynomial.chebyshev.chebval(x, pcoefs)

# Initialize definite parity coefficients, and slice out nontrivial ones.
parity = 1
bare_coefs = pcoefs
coef = bare_coefs[parity::2]

# Anonymous function for ideal polynomial from Chebyshev coefficients.
true_fun = lambda x: 0.9 * scipy.special.erf(x * 20)
true_fun = lambda x: scale * scipy.special.erf(x * 20)

# Optimize to the desired function using Newton solver.
(phases, err, total_iter, qsp_seq_opt) = sym_qsp_opt.newton_Solver(coef, parity, crit=1e-12)
print("phases: %s\nerror: %s\niter: %s\n"%(str(phases), str(err), str(total_iter)))

### TODO: note that current return here is of wrapped polynomial type, and not what the code above is expecting.

# Plot achieved versus desired function over samples.
num_samples = 400
samples = np.linspace(-1,1,num=num_samples)

# Compute real and imaginary parts of (0,0) matrix element.
re_vals = np.array(qsp_seq_opt.gen_response_re(samples))
im_vals = np.array(qsp_seq_opt.gen_response_im(samples))

# Map the desired function and achieved function over samples.
des_vals = np.array(list(map(true_fun, samples)))
cos_vals = np.array(list(map(cos_fun, samples)))
des_vals = np.array(list(map(true_fun, samples)))[:,0] # Note shape casting.
sign_vals = np.array(list(map(sign_fun, samples)))

# Generate simultaneous plots.
fig, axs = plt.subplots(2,sharex=True)
fig.suptitle('Approximating sign with QSP to machine precision')

# Standard plotting of relevant components.
axs[0].plot(samples, im_vals, 'r', label="QSP imag poly")
axs[0].plot(samples, cos_vals, 'b', label="Target poly")
axs[0].plot(samples, sign_vals, 'b', label="Target poly")
axs[0].plot(samples, des_vals, 'g', label="Ideal fun")
# axs[0].plot(samples, re_vals, 'y', label="QSP real poly") # Unimportant real component.

diff = np.abs(im_vals - cos_vals)
true_diff = np.abs(des_vals - cos_vals)
diff = np.abs(im_vals - sign_vals)
true_diff = np.abs(des_vals - sign_vals)
total_diff = np.abs(im_vals - des_vals)
axs[1].plot(samples, diff, 'r', label="Approx vs QSP")
axs[1].plot(samples, true_diff, 'g', label="Approx vs true")
Expand Down
Loading

0 comments on commit da719b6

Please sign in to comment.