Skip to content

Instantly share code, notes, and snippets.

rand_bitstrings = random_bitstrings(n_qubits, 10_000)
yspace = xspace*(dim**2)*np.exp(-dim*xspace)
# plot both empirical and theoretical calculations
plt.figure(figsize=(9, 6))
plt.hist(rand_bitstrings, bins=50, density=True, label='Empirical')
plt.plot(xspace, yspace, label='Theoretical')
# plot the uniform distribution for reference
plt.axvline(x=1/dim, linestyle='dotted', color='r', label='Uniform Distribution')
def random_bitstrings(n_qubits, n_programs):
dim = 2**n_qubits
# keep track of probability of sampling (randomly) chosen bitstring
probs_bitstring = []
# simulate many Haar-random circuits
for _ in range(n_programs):
unitary = random_unitary(dim)
bitstring = np.random.choice(dim, p=[np.abs(unitary[b,0])**2 for b in range(dim)])
prob = np.abs(unitary[bitstring,0])**2
probs_bitstring.append(prob)
# run the noisy experiment
noisy_xeb = fidelity_xeb_noisy(n_qubits=6, trials=10**3, n_samples=10, prob_no_error=0.7)
print("Empirical FXEB of a noisy simulation: ", noisy_xeb)
def fidelity_xeb_noisy(n_qubits: int, trials: int, n_samples: int, prob_no_error: float):
dim = 2**n_qubits
# keep track of ideal output probabilities
ideal_probs = []
# identify depolarizing operators over n-qubit space
depolarizing_ops = []
for x in itertools.product(paulis, repeat=n_qubits):
op = functools.reduce(lambda a, b: np.kron(a, b), x)
dim = 2**n_qubits
def unif_dist(unitary, bitstring):
return 1/dim # all bitstrings have the same probability
unif_xeb = fidelity_xeb(n_qubits=n_qubits, trials=10, n_samples=10**5, sampler=unif_dist)
print("Empirical FXEB of a uniform sampler: ", unif_xeb)
# sample f_xeb using the same parameters as in the Google paper
n_qubits = 6
f_xeb = fidelity_xeb(n_qubits=n_qubits, trials=10, n_samples=10**5, sampler=simulate_probability)
print("Empirical FXEB: ", f_xeb)
@willzeng
willzeng / fidelity_xeb.py
Created November 5, 2019 17:41
Function for calculating the cross-entropy benchmarking fidelity
def fidelity_xeb(n_qubits: int, trials: int, n_samples: int, sampler: Callable[[np.ndarray, int], float]) -> float:
dim = 2**n_qubits
# keep track of the ideal simulated probabilities
ideal_probs = []
# loop over the random programs
for _ in range(trials):
unitary = random_unitary(dim)
sample_probs = [sampler(unitary, bb) for bb in range(dim)]
samples = np.random.choice(dim, size=n_samples, p=sample_probs)
for sample in samples:
@willzeng
willzeng / plot_pt.py
Last active November 5, 2019 17:53
Plotting of an example Porter-Thomas distribution
n_qubits = 4
porter_thomas = quantum_sample_probability(n_qubits, 10_000)
# theoretical Porter-Thomas distribution
dim = 2**n_qubits
xspace = np.linspace(0.0, 1.0, 100)
yspace = dim * np.exp(-dim*xspace)
# plot both empirical and theoretical calculations
plt.figure(figsize=(9, 6))
@willzeng
willzeng / porter_thomas.py
Created November 5, 2019 17:38
Functions to simulate the Porter-Thomas distribution
def simulate_probability(unitary: np.ndarray, bitstring:int) -> float:
# simulates the probability of measuring bitstring when evolving from the ground state
# according to the quantum program given unitary
return np.abs(unitary[bitstring, 0])**2
def quantum_sample_probability(n_qubits: int, trials: int) -> List:
# returns the probabilities of a randomly chosen bistring outcome over "trials" number of different
# random quantum programs on n_qubits
@willzeng
willzeng / random_unitary.py
Created November 5, 2019 17:36
Generates a Haar-randomly sampled unitary matrix
def random_unitary(dim: int) -> np.ndarray:
# follows the algorithm in https://arxiv.org/pdf/math-ph/0609050.pdf
# returns a unitary of size dim x dim
Z = np.array([np.random.normal(0, 1) + np.random.normal(0, 1) * 1j for _ in range(dim ** 2)]).reshape(dim, dim)
Q, R = np.linalg.qr(Z)
diag = np.diagonal(R)
lamb = np.diag(diag) / np.absolute(diag)
unitary = np.matmul(Q, lamb)
# this condition asserts that the matrix is unitary