YufeiWeng commited on
Commit
773324a
·
verified ·
1 Parent(s): 19490b1

Model save

Browse files
README.md CHANGED
@@ -1,9 +1,6 @@
1
  ---
2
- license: apache-2.0
3
- base_model: google/vit-base-patch16-224-in21k
4
  tags:
5
- - image-classification
6
- - vision
7
  - generated_from_trainer
8
  metrics:
9
  - f1
@@ -17,10 +14,10 @@ should probably proofread and complete it, then remove this comment. -->
17
 
18
  # donut-base-beans
19
 
20
- This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on an unknown dataset.
21
  It achieves the following results on the evaluation set:
22
- - Loss: 0.0410
23
- - F1: 0.9461
24
 
25
  ## Model description
26
 
@@ -51,11 +48,11 @@ The following hyperparameters were used during training:
51
 
52
  | Training Loss | Epoch | Step | F1 | Validation Loss |
53
  |:-------------:|:-----:|:----:|:------:|:---------------:|
54
- | 0.0663 | 1.0 | 1981 | 0.9705 | 0.0594 |
55
- | 0.0747 | 2.0 | 3962 | 0.9705 | 0.0410 |
56
- | 0.0676 | 3.0 | 5943 | 0.9705 | 0.0463 |
57
- | 0.0706 | 4.0 | 7924 | 0.9705 | 0.0445 |
58
- | 0.0595 | 5.0 | 9905 | 0.0460 | 0.9429 |
59
 
60
 
61
  ### Framework versions
 
1
  ---
2
+ base_model: microsoft/dit-base-finetuned-rvlcdip
 
3
  tags:
 
 
4
  - generated_from_trainer
5
  metrics:
6
  - f1
 
14
 
15
  # donut-base-beans
16
 
17
+ This model is a fine-tuned version of [microsoft/dit-base-finetuned-rvlcdip](https://huggingface.co/microsoft/dit-base-finetuned-rvlcdip) on an unknown dataset.
18
  It achieves the following results on the evaluation set:
19
+ - F1: 0.9705
20
+ - Loss: 0.0391
21
 
22
  ## Model description
23
 
 
48
 
49
  | Training Loss | Epoch | Step | F1 | Validation Loss |
50
  |:-------------:|:-----:|:----:|:------:|:---------------:|
51
+ | 0.0594 | 1.0 | 1981 | 0.9705 | 0.0440 |
52
+ | 0.0678 | 2.0 | 3962 | 0.9705 | 0.0447 |
53
+ | 0.0615 | 3.0 | 5943 | 0.9705 | 0.0416 |
54
+ | 0.0685 | 4.0 | 7924 | 0.9705 | 0.0385 |
55
+ | 0.0582 | 5.0 | 9905 | 0.9705 | 0.0391 |
56
 
57
 
58
  ### Framework versions
all_results.json CHANGED
@@ -1,13 +1,13 @@
1
  {
2
  "epoch": 5.0,
3
- "eval_f1": 0.9460782887783499,
4
- "eval_loss": 0.04101279005408287,
5
- "eval_runtime": 559.6733,
6
- "eval_samples_per_second": 368.54,
7
- "eval_steps_per_second": 2.88,
8
- "total_flos": 9.820471825285631e+19,
9
- "train_loss": 0.011631221487809769,
10
- "train_runtime": 2940.6816,
11
- "train_samples_per_second": 430.955,
12
- "train_steps_per_second": 3.368
13
  }
 
1
  {
2
  "epoch": 5.0,
3
+ "eval_f1": 0.9557601497125016,
4
+ "eval_loss": 0.038467586040496826,
5
+ "eval_runtime": 795.0811,
6
+ "eval_samples_per_second": 259.423,
7
+ "eval_steps_per_second": 2.027,
8
+ "total_flos": 9.82152667464321e+19,
9
+ "train_loss": 0.0,
10
+ "train_runtime": 0.074,
11
+ "train_samples_per_second": 17128131.172,
12
+ "train_steps_per_second": 133870.543
13
  }
config.json CHANGED
@@ -1,10 +1,15 @@
1
  {
2
- "_name_or_path": "google/vit-base-patch16-224-in21k",
 
3
  "architectures": [
4
- "ViTForImageClassification"
5
  ],
6
  "attention_probs_dropout_prob": 0.0,
7
- "encoder_stride": 16,
 
 
 
 
8
  "finetuning_task": "image-classification",
9
  "hidden_act": "gelu",
10
  "hidden_dropout_prob": 0.0,
@@ -19,13 +24,54 @@
19
  "Value(dtype='int64', id=None)": "0"
20
  },
21
  "layer_norm_eps": 1e-12,
22
- "model_type": "vit",
 
23
  "num_attention_heads": 12,
24
  "num_channels": 3,
25
  "num_hidden_layers": 12,
 
 
 
 
 
 
 
 
 
 
 
 
26
  "patch_size": 16,
27
- "problem_type": "regression",
28
- "qkv_bias": true,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
29
  "torch_dtype": "float32",
30
- "transformers_version": "4.43.0.dev0"
 
 
 
 
 
 
 
31
  }
 
1
  {
2
+ "_name_or_path": "microsoft/dit-base-finetuned-rvlcdip",
3
+ "add_fpn": false,
4
  "architectures": [
5
+ "BeitForImageClassification"
6
  ],
7
  "attention_probs_dropout_prob": 0.0,
8
+ "auxiliary_channels": 256,
9
+ "auxiliary_concat_input": false,
10
+ "auxiliary_loss_weight": 0.4,
11
+ "auxiliary_num_convs": 1,
12
+ "drop_path_rate": 0.1,
13
  "finetuning_task": "image-classification",
14
  "hidden_act": "gelu",
15
  "hidden_dropout_prob": 0.0,
 
24
  "Value(dtype='int64', id=None)": "0"
25
  },
26
  "layer_norm_eps": 1e-12,
27
+ "layer_scale_init_value": 0.1,
28
+ "model_type": "beit",
29
  "num_attention_heads": 12,
30
  "num_channels": 3,
31
  "num_hidden_layers": 12,
32
+ "out_features": [
33
+ "stage3",
34
+ "stage5",
35
+ "stage7",
36
+ "stage11"
37
+ ],
38
+ "out_indices": [
39
+ 3,
40
+ 5,
41
+ 7,
42
+ 11
43
+ ],
44
  "patch_size": 16,
45
+ "pool_scales": [
46
+ 1,
47
+ 2,
48
+ 3,
49
+ 6
50
+ ],
51
+ "reshape_hidden_states": true,
52
+ "semantic_loss_ignore_index": 255,
53
+ "stage_names": [
54
+ "stem",
55
+ "stage1",
56
+ "stage2",
57
+ "stage3",
58
+ "stage4",
59
+ "stage5",
60
+ "stage6",
61
+ "stage7",
62
+ "stage8",
63
+ "stage9",
64
+ "stage10",
65
+ "stage11",
66
+ "stage12"
67
+ ],
68
  "torch_dtype": "float32",
69
+ "transformers_version": "4.43.0.dev0",
70
+ "use_absolute_position_embeddings": true,
71
+ "use_auxiliary_head": true,
72
+ "use_mask_token": false,
73
+ "use_mean_pooling": true,
74
+ "use_relative_position_bias": false,
75
+ "use_shared_relative_position_bias": false,
76
+ "vocab_size": 8192
77
  }
eval_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
  "epoch": 5.0,
3
- "eval_f1": 0.9460782887783499,
4
- "eval_loss": 0.04101279005408287,
5
- "eval_runtime": 559.6733,
6
- "eval_samples_per_second": 368.54,
7
- "eval_steps_per_second": 2.88
8
  }
 
1
  {
2
  "epoch": 5.0,
3
+ "eval_f1": 0.9557601497125016,
4
+ "eval_loss": 0.038467586040496826,
5
+ "eval_runtime": 795.0811,
6
+ "eval_samples_per_second": 259.423,
7
+ "eval_steps_per_second": 2.027
8
  }
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:572303f761b7a7387078fed0e6d4bb406e84528aa38cd5998d45fe55f1830673
3
- size 343220892
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e5e6ad99f326c79142618188e2f2311b61803f4a73856b3c1a458bcde5366aa2
3
+ size 343258940
p_object.json CHANGED
The diff for this file is too large to render. See raw diff
 
prediction_reference.json CHANGED
The diff for this file is too large to render. See raw diff
 
preprocessor_config.json CHANGED
@@ -1,5 +1,11 @@
1
  {
 
 
 
 
 
2
  "do_normalize": true,
 
3
  "do_rescale": true,
4
  "do_resize": true,
5
  "image_mean": [
@@ -7,7 +13,7 @@
7
  0.5,
8
  0.5
9
  ],
10
- "image_processor_type": "ViTImageProcessor",
11
  "image_std": [
12
  0.5,
13
  0.5,
 
1
  {
2
+ "crop_size": {
3
+ "height": 224,
4
+ "width": 224
5
+ },
6
+ "do_center_crop": false,
7
  "do_normalize": true,
8
+ "do_reduce_labels": false,
9
  "do_rescale": true,
10
  "do_resize": true,
11
  "image_mean": [
 
13
  0.5,
14
  0.5
15
  ],
16
+ "image_processor_type": "BeitImageProcessor",
17
  "image_std": [
18
  0.5,
19
  0.5,
train_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
  "epoch": 5.0,
3
- "total_flos": 9.820471825285631e+19,
4
- "train_loss": 0.011631221487809769,
5
- "train_runtime": 2940.6816,
6
- "train_samples_per_second": 430.955,
7
- "train_steps_per_second": 3.368
8
  }
 
1
  {
2
  "epoch": 5.0,
3
+ "total_flos": 9.82152667464321e+19,
4
+ "train_loss": 0.0,
5
+ "train_runtime": 0.074,
6
+ "train_samples_per_second": 17128131.172,
7
+ "train_steps_per_second": 133870.543
8
  }
trainer_state.json CHANGED
The diff for this file is too large to render. See raw diff
 
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e9d8436778df1e6f25ce1b6ddeaa31273eac5f992183149befb2bb90f6aa8b36
3
- size 5240
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fa4d5febffce511b6da18d708f0e85b9ddaebce0c9548a11dc40ef9e37284f67
3
+ size 5368