|
1 | 1 | using YaoExtensions, Yao |
2 | 2 | using Test, Random |
3 | | -using QuAlgorithmZoo: Adam, update! |
| 3 | +using Optim: LBFGS, optimize |
4 | 4 |
|
| 5 | +# port the `Matrix` function to Yao's AD. |
5 | 6 | include("zygote_patch.jl") |
6 | 7 |
|
7 | 8 | function loss(u, ansatz) |
8 | 9 | m = Matrix(ansatz) |
9 | 10 | sum(abs.(u .- m)) |
10 | 11 | end |
11 | 12 |
|
12 | | -function learn_su4(u::AbstractMatrix; optimizer=Adam(lr=0.1), niter=100) |
| 13 | +""" |
| 14 | + learn_u4(u::AbstractMatrix; niter=100) |
| 15 | +
|
| 16 | +Learn a general U4 gate. The optimizer is LBFGS. |
| 17 | +""" |
| 18 | +function learn_u4(u::AbstractMatrix; niter=100) |
13 | 19 | ansatz = general_U4() * put(2, 1=>phase(0.0)) # initial values are 0, here, we attach a global phase. |
14 | 20 | params = parameters(ansatz) |
15 | | - for i=1:1000 |
16 | | - println("Step = $i, loss = $(loss(u,ansatz))") |
17 | | - grad = gradient(ansatz->loss(u, ansatz), ansatz)[1] |
18 | | - update!(params, grad, optimizer) |
19 | | - dispatch!(ansatz, params) |
20 | | - end |
| 21 | + g!(G, x) = (dispatch!(ansatz, x); G .= gradient(ansatz->loss(u, ansatz), ansatz)[1]) |
| 22 | + optimize(x->(dispatch!(ansatz, x); loss(u, ansatz)), g!, parameters(ansatz), |
| 23 | + LBFGS(), Optim.Options(iterations=niter)) |
| 24 | + println("final loss = $(loss(u,ansatz))") |
21 | 25 | return ansatz |
22 | 26 | end |
23 | 27 |
|
24 | 28 | using Random |
25 | 29 | Random.seed!(2) |
26 | 30 | u = rand_unitary(4) |
27 | | -using LinearAlgebra |
28 | | -#u[:,1] .*= -conj(det(u)) |
29 | | -#@show det(u) |
30 | | -c = learn_su4(u; optimizer=Adam(lr=0.005)) |
31 | | -det(mat(c)) |
| 31 | +c = learn_u4(u) |
0 commit comments