Upload model
Browse filesunitary compilation, qc target, 3 qubits, 12 gates max
- config.yaml +77 -0
- model.pt +3 -0
config.yaml
ADDED
@@ -0,0 +1,77 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
target: genQC.pipeline.diffusion_pipeline_special.DiffusionPipeline_Compilation
|
2 |
+
params:
|
3 |
+
scheduler:
|
4 |
+
target: genQC.scheduler.scheduler_ddim.DDIMScheduler
|
5 |
+
params:
|
6 |
+
device: cpu
|
7 |
+
num_train_timesteps: 1000
|
8 |
+
beta_start: 0.0001
|
9 |
+
beta_end: 0.02
|
10 |
+
beta_schedule: cos_alpha
|
11 |
+
input_perturbation: 0.1
|
12 |
+
eta: 1
|
13 |
+
model:
|
14 |
+
target: genQC.models.unet_qc.QC_Compilation_UNet
|
15 |
+
save_path: null
|
16 |
+
params:
|
17 |
+
model_features:
|
18 |
+
- 128
|
19 |
+
- 128
|
20 |
+
- 256
|
21 |
+
clr_dim: 8
|
22 |
+
num_clrs: 8
|
23 |
+
t_emb_size: 256
|
24 |
+
cond_emb_size: 512
|
25 |
+
num_heads:
|
26 |
+
- 8
|
27 |
+
- 8
|
28 |
+
- 2
|
29 |
+
num_res_blocks:
|
30 |
+
- 2
|
31 |
+
- 2
|
32 |
+
- 4
|
33 |
+
transformer_depths:
|
34 |
+
- 1
|
35 |
+
- 2
|
36 |
+
- 1
|
37 |
+
unitary_encoder_config:
|
38 |
+
cond_emb_size: 512
|
39 |
+
model_features:
|
40 |
+
- 2
|
41 |
+
- 32
|
42 |
+
- 64
|
43 |
+
- 512
|
44 |
+
num_heads: 8
|
45 |
+
transformer_depths:
|
46 |
+
- 2
|
47 |
+
- 2
|
48 |
+
dropout: 0.2
|
49 |
+
text_encoder:
|
50 |
+
target: genQC.models.frozen_open_clip.CachedFrozenOpenCLIPEmbedder
|
51 |
+
save_path: null
|
52 |
+
params:
|
53 |
+
arch: ViT-B-32
|
54 |
+
version: laion2b_s34b_b79k
|
55 |
+
device: cpu
|
56 |
+
max_length: 77
|
57 |
+
freeze: true
|
58 |
+
layer: penultimate
|
59 |
+
device: cpu
|
60 |
+
enable_guidance_train: true
|
61 |
+
guidance_train_p: 0.1
|
62 |
+
cached_text_enc: true
|
63 |
+
add_config:
|
64 |
+
dataset:
|
65 |
+
params:
|
66 |
+
num_of_qubits: 3
|
67 |
+
min_gates: 2
|
68 |
+
max_gates: 12
|
69 |
+
gate_pool:
|
70 |
+
- h
|
71 |
+
- cx
|
72 |
+
- z
|
73 |
+
- x
|
74 |
+
- ccx
|
75 |
+
- swap
|
76 |
+
pad_constant: 7
|
77 |
+
|
model.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:55170daeba77c78a94b99472fa3c207a04532b192fe5d022b04fe8179f119d1c
|
3 |
+
size 28839173
|