Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
56 changes: 56 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,7 @@ req = Requirements({io:1.0})
#### 3. Find the interferometer that best satisfies the requirements
Note that the *first time* the optimizer is called, the various `numba` functions in the code are compiled.
Subsequent calls will start immediately, until you restart the ipython kernel.

```python
from bolt import Optimizer
opt = Optimizer(lr = 0.01, max_steps=500)
Expand All @@ -48,7 +49,58 @@ import matplotlib.pyplot as plt
plt.plot(opt.losses)
```

#### 3. Find the interferometer that best classifies an array of input states

Takes an array of states as an Input and gives the output of the best possible unitary that classifies (distinguishes) these input states.

A. Photon Number based Detection

```python
from bolt import State
from bolt.PNRD import PNRD_State_Discriminator
# 2 photons in 4 modes
# dual rail encoding

bs1 = State({(1,0,0,1):(1/2),(0,1,1,0):(1/2)})
bs2 = State({(1,0,0,1):(1/2),(0,1,1,0):(-1/2)})
bs3 = State({(1,0,1,0):(1/2),(0,1,0,1):(1/2)})
bs4 = State({(1,0,1,0):(1/2),(0,1,0,1):(-1/2)})

bell=[bs1,bs2,bs3,bs4]
opt = PNRD_State_Discriminator(lr = 0.02,epsilon= 1e-6,max_steps=400)#lr- learning rate, epsilon- error
cov_matrix = opt(bell)
print(f'The search took {opt.elapsed:.3f} seconds')

import matplotlib.pyplot as plt
plt.plot(opt.losses);
```

B. Binary Detection

```python
from bolt import State
from bolt.BPD import General_State_Discriminator # can also import BPD_parr, gives faster results for higher no of photons(>10)
# 2 photons in 4 modes
# dual rail encoding

bs1 = State({(1,0,0,1):(1/2),(0,1,1,0):(1/2)})
bs2 = State({(1,0,0,1):(1/2),(0,1,1,0):(-1/2)})
bs3 = State({(1,0,1,0):(1/2),(0,1,0,1):(1/2)})
bs4 = State({(1,0,1,0):(1/2),(0,1,0,1):(-1/2)})

bell=[bs1,bs2,bs3,bs4]
opt = General_State_Discriminator(lr = 0.02,epsilon= 1e-6,max_steps=400)#lr- learning rate, epsilon- error
cov_matrix = opt(bell)
print(f'The search took {opt.elapsed:.3f} seconds')

import matplotlib.pyplot as plt
plt.plot(opt.losses);
```

The General_State_Discriminator class has an additional function called `discrim_states(n,l)`where n is the no of photons and l is the number of modes. It returns a 2D array with each row having the states that are read identically in the BPD [like (0,1,0,3) and (0,2,0,2)], with the first element of each row being the representation of the readout [in this case, (0,1,0,1)]

### Did you blink?

Let's increase the complexity (16 modes, 12 photons). It should still be reasonably fast (44 it/s on my laptop):
```python
from bolt import State, IOSpec, Requirements, Optimizer
Expand All @@ -65,6 +117,7 @@ cov_matrix = opt(req)

### All possible output amplitudes
`Bolt` can generate the complete output state as well:

```python
from bolt.utils import all_outputs
from scipy.stats import unitary_group
Expand All @@ -76,7 +129,10 @@ V = unitary_group.rvs(state_in.num_modes) # interferometer unitary
out,_ = all_outputs(state_in, V, grad=False)
```

Has an additional gradient function that can be used to return the gradient of the output states w.r.t the interferometer unitary. To use this, change the parameter to `grad=True`.

### Fun Experiments

Note that at times the optimizer gets stuck in a local minimum. Run the optimization a few times to assess how often this happens.

#### Bell state analyzer
Expand Down
158 changes: 158 additions & 0 deletions bolt/BPD.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,158 @@
import time
import numpy as np
from numpy_ml.neural_nets.optimizers import Adam
from tqdm import trange

# from scipy.linalg import expm
from typing import Tuple, List
from .expm import expm
from .tree import Tree
from .liealgebra import L, dV_dlambdas
from .states import State,IOSpec,Requirements
from .utils import partition,pos,all_outputs

class General_State_Discriminator:
"""
Interferometer optimizer class. Create an instance by specifying:
lr (float): learning rate
epsilon (float): the optimization keeps going until the drop in loss is larger than epsilon
max_steps: (int): stop at max_steps if the epsilon criterion is not met yet

Usage:
>>> opt = Optimizer(lr=0.01, epsilon=1e-4)
>>> cov_matrix = opt(requirements)
"""
def __init__(self, lr:float, epsilon:float = 1e-6, max_steps:int = 1000, cov_matrix_init=None,com:bool=False, natural:bool = True):
self.epsilon = epsilon
self.max_steps = max_steps
self.losses = []
self.probs = []
self.elapsed = 0.0
self.cov_matrix_init = cov_matrix_init
self.natural = natural
self.com=com #defines if we need a covariant matrix or not
if self.natural:
self.lr = lr
else:
self.lr = lr
self.opt = Adam(lr=lr)

@staticmethod
def mse(x, y):
return 0.5*(x-y)**2
@staticmethod
def discrim_states(n:int,l:int): #n=no of photons, l=no of modes
b=[]
for i in range(n):
i+=1
w=partition(i,(1,)*l)
for j in w:
b.append(j)
c=partition(n,(n,)*l)
a=[]
for i in b:
d=[]
d.append(i)
for j in c:
if pos(i)==pos(j):
d.append(j)
a.append(d)
return a
@staticmethod
def no_photons(x:State):
z=0
for y in x:
z=y
break
z=list(z)
x=0
for i in z:
x+=i
return x

def __call__(self, st:[State]):
"""
The optimizer instance supports being called as a function on a Requirements object.
It records the list of losses (self.losses) and the elapsed time (self.elapsed).

Arguments:
req (Requirements): requirements object
Returns:
covariance matrix
"""
lengthst=int(len(st))
num_modes=st[0].num_modes
num_photons=self.no_photons(x=st[0])
size=st[0].num_modes**2
states=self.discrim_states(n=num_photons,l=num_modes)
tic = time.time()

if self.com:
V = self.cov_matrix_init
else:
lambdas = np.random.normal(size=size, scale=0.01)
V = expm(L(lambdas))
if self.natural:
A = np.block([[np.real(V), -np.imag(V)],[np.imag(V), np.real(V)]])
self.losses = [1e6]
try:
progress = trange(self.max_steps)
for step in progress:
grad_update = 0
loss = 0
op=[]
op_g=[]
#tic1 = time.time()
for i in range(lengthst):
u,v= all_outputs(st[i], V, grad=True)
# op is an array of dictionaries, each element being the corresponding output state for each input state
op.append(u)
op_g.append(v)
#toc1 = time.time()
#print("Time to store all outputs is ",toc1-tic1)
for i in range(lengthst):
for j in range(lengthst):
if(j>i):
for r in states: #this part can be parallelised for high mode numbers
len1=len(r)
for i1 in range(len1):
for i2 in range(len1):
if i1!=0 and i2!=0:
a=r[i1]
b=r[i2]
a1 = op[i].get(a)
da1_dV = op_g[i].get(a)
a2 = op[j].get(b)
da2_dV = op_g[j].get(b)
p1 = abs(a1)**2
p2 = abs(a2)**2
loss += p1*p2
dL_da1=np.conj(a1)*p2
dL_da2=np.conj(a2)*p1
if self.natural:
da1_dA = np.block([[da1_dV, -1j*da1_dV],[1j*da1_dV, da1_dV]])
da2_dA = np.block([[da2_dV, -1j*da2_dV],[1j*da2_dV, da2_dV]])
grad_update += 2*np.real((dL_da1 * da1_dA)+(dL_da2 * da2_dA))
else:
da1_dlambdas = np.sum(da1_dV * dV_dlambdas(lambdas), axis=(1,2))
da2_dlambdas = np.sum(da2_dV * dV_dlambdas(lambdas), axis=(1,2))
grad_update += 2*np.real((dL_da1 * da1_dlambdas)+(dL_da2 * da2_dlambdas))
#print(grad_update)
self.losses.append(loss)
if self.natural:
Q = grad_update
D = 0.5*(Q - A @ Q.T @ A) # natural gradient
A = A @ expm(self.lr * D.T @ A)
V = A[:len(V), :len(V)] + 1j*A[len(V):, :len(V)]
else:
lambdas = self.opt.update(lambdas, grad_update, None, None)
V = expm(L(lambdas))
progress.set_description(f"{step}:loss = {loss:.4f}")
if abs(self.losses[-2] - self.losses[-1]) < self.epsilon:
break
except KeyboardInterrupt:
print('gracefully stopping optimization...')
self.losses.pop(0)
toc = time.time()
self.elapsed = toc-tic
return V
Loading