HarborYuan commited on
Commit
3992f72
·
verified ·
1 Parent(s): 1b3e0bd

Upload folder using huggingface_hub

Browse files
added_tokens.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "</box>": 92552,
3
+ "</img>": 92545,
4
+ "</p>": 92555,
5
+ "</quad>": 92548,
6
+ "</ref>": 92550,
7
+ "<IMG_CONTEXT>": 92546,
8
+ "<box>": 92551,
9
+ "<img>": 92544,
10
+ "<p>": 92554,
11
+ "<quad>": 92547,
12
+ "<ref>": 92549,
13
+ "[SEG]": 92553
14
+ }
config.json ADDED
@@ -0,0 +1,201 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_commit_hash": null,
3
+ "architectures": [
4
+ "Sa2VAChatModel"
5
+ ],
6
+ "auto_map": {
7
+ "AutoConfig": "configuration_sa2va_chat.Sa2VAChatConfig",
8
+ "AutoModel": "modeling_sa2va_chat.Sa2VAChatModel",
9
+ "AutoModelForCausalLM": "modeling_sa2va_chat.Sa2VAChatModel"
10
+ },
11
+ "downsample_ratio": 0.5,
12
+ "dynamic_image_size": true,
13
+ "force_image_size": 448,
14
+ "hidden_size": 6144,
15
+ "llm_config": {
16
+ "_name_or_path": "internlm/internlm2-chat-20b",
17
+ "add_cross_attention": false,
18
+ "architectures": [
19
+ "InternLM2ForCausalLM"
20
+ ],
21
+ "attn_implementation": "flash_attention_2",
22
+ "auto_map": {
23
+ "AutoConfig": "configuration_internlm2.InternLM2Config",
24
+ "AutoModel": "modeling_internlm2.InternLM2ForCausalLM",
25
+ "AutoModelForCausalLM": "modeling_internlm2.InternLM2ForCausalLM"
26
+ },
27
+ "bad_words_ids": null,
28
+ "begin_suppress_tokens": null,
29
+ "bias": false,
30
+ "bos_token_id": 1,
31
+ "chunk_size_feed_forward": 0,
32
+ "cross_attention_hidden_size": null,
33
+ "decoder_start_token_id": null,
34
+ "diversity_penalty": 0.0,
35
+ "do_sample": false,
36
+ "early_stopping": false,
37
+ "encoder_no_repeat_ngram_size": 0,
38
+ "eos_token_id": 2,
39
+ "exponential_decay_length_penalty": null,
40
+ "finetuning_task": null,
41
+ "forced_bos_token_id": null,
42
+ "forced_eos_token_id": null,
43
+ "hidden_act": "silu",
44
+ "hidden_size": 6144,
45
+ "id2label": {
46
+ "0": "LABEL_0",
47
+ "1": "LABEL_1"
48
+ },
49
+ "initializer_range": 0.02,
50
+ "intermediate_size": 16384,
51
+ "is_decoder": false,
52
+ "is_encoder_decoder": false,
53
+ "label2id": {
54
+ "LABEL_0": 0,
55
+ "LABEL_1": 1
56
+ },
57
+ "length_penalty": 1.0,
58
+ "max_length": 20,
59
+ "max_position_embeddings": 32768,
60
+ "min_length": 0,
61
+ "model_type": "internlm2",
62
+ "no_repeat_ngram_size": 0,
63
+ "num_attention_heads": 48,
64
+ "num_beam_groups": 1,
65
+ "num_beams": 1,
66
+ "num_hidden_layers": 48,
67
+ "num_key_value_heads": 8,
68
+ "num_return_sequences": 1,
69
+ "output_attentions": false,
70
+ "output_hidden_states": false,
71
+ "output_scores": false,
72
+ "pad_token_id": 2,
73
+ "prefix": null,
74
+ "problem_type": null,
75
+ "pruned_heads": {},
76
+ "remove_invalid_values": false,
77
+ "repetition_penalty": 1.0,
78
+ "return_dict": true,
79
+ "return_dict_in_generate": false,
80
+ "rms_norm_eps": 1e-05,
81
+ "rope_scaling": {
82
+ "factor": 3.0,
83
+ "type": "dynamic"
84
+ },
85
+ "rope_theta": 1000000,
86
+ "sep_token_id": null,
87
+ "suppress_tokens": null,
88
+ "task_specific_params": null,
89
+ "temperature": 1.0,
90
+ "tf_legacy_loss": false,
91
+ "tie_encoder_decoder": false,
92
+ "tie_word_embeddings": false,
93
+ "tokenizer_class": null,
94
+ "top_k": 50,
95
+ "top_p": 1.0,
96
+ "torch_dtype": "bfloat16",
97
+ "torchscript": false,
98
+ "transformers_version": "4.44.0",
99
+ "typical_p": 1.0,
100
+ "use_bfloat16": true,
101
+ "use_cache": true,
102
+ "vocab_size": 92556
103
+ },
104
+ "max_dynamic_patch": 12,
105
+ "min_dynamic_patch": 1,
106
+ "model_type": "sa2va_chat",
107
+ "pad2square": false,
108
+ "ps_version": "v2",
109
+ "select_layer": -1,
110
+ "template": "internlm2_chat",
111
+ "tie_word_embeddings": false,
112
+ "torch_dtype": "bfloat16",
113
+ "transformers_version": null,
114
+ "use_backbone_lora": 0,
115
+ "use_llm_lora": 0,
116
+ "use_thumbnail": true,
117
+ "vision_config": {
118
+ "_name_or_path": "",
119
+ "add_cross_attention": false,
120
+ "architectures": [
121
+ "InternVisionModel"
122
+ ],
123
+ "attention_dropout": 0.0,
124
+ "bad_words_ids": null,
125
+ "begin_suppress_tokens": null,
126
+ "bos_token_id": null,
127
+ "chunk_size_feed_forward": 0,
128
+ "cross_attention_hidden_size": null,
129
+ "decoder_start_token_id": null,
130
+ "diversity_penalty": 0.0,
131
+ "do_sample": false,
132
+ "drop_path_rate": 0.0,
133
+ "dropout": 0.0,
134
+ "early_stopping": false,
135
+ "encoder_no_repeat_ngram_size": 0,
136
+ "eos_token_id": null,
137
+ "exponential_decay_length_penalty": null,
138
+ "finetuning_task": null,
139
+ "forced_bos_token_id": null,
140
+ "forced_eos_token_id": null,
141
+ "hidden_act": "gelu",
142
+ "hidden_size": 3200,
143
+ "id2label": {
144
+ "0": "LABEL_0",
145
+ "1": "LABEL_1"
146
+ },
147
+ "image_size": 448,
148
+ "initializer_factor": 0.1,
149
+ "initializer_range": 1e-10,
150
+ "intermediate_size": 12800,
151
+ "is_decoder": false,
152
+ "is_encoder_decoder": false,
153
+ "label2id": {
154
+ "LABEL_0": 0,
155
+ "LABEL_1": 1
156
+ },
157
+ "layer_norm_eps": 1e-06,
158
+ "length_penalty": 1.0,
159
+ "max_length": 20,
160
+ "min_length": 0,
161
+ "model_type": "intern_vit_6b",
162
+ "no_repeat_ngram_size": 0,
163
+ "norm_type": "rms_norm",
164
+ "num_attention_heads": 25,
165
+ "num_beam_groups": 1,
166
+ "num_beams": 1,
167
+ "num_channels": 3,
168
+ "num_hidden_layers": 45,
169
+ "num_return_sequences": 1,
170
+ "output_attentions": false,
171
+ "output_hidden_states": false,
172
+ "output_scores": false,
173
+ "pad_token_id": null,
174
+ "patch_size": 14,
175
+ "prefix": null,
176
+ "problem_type": null,
177
+ "pruned_heads": {},
178
+ "qk_normalization": true,
179
+ "qkv_bias": false,
180
+ "remove_invalid_values": false,
181
+ "repetition_penalty": 1.0,
182
+ "return_dict": true,
183
+ "return_dict_in_generate": false,
184
+ "sep_token_id": null,
185
+ "suppress_tokens": null,
186
+ "task_specific_params": null,
187
+ "temperature": 1.0,
188
+ "tf_legacy_loss": false,
189
+ "tie_encoder_decoder": false,
190
+ "tie_word_embeddings": true,
191
+ "tokenizer_class": null,
192
+ "top_k": 50,
193
+ "top_p": 1.0,
194
+ "torch_dtype": "bfloat16",
195
+ "torchscript": false,
196
+ "transformers_version": "4.44.0",
197
+ "typical_p": 1.0,
198
+ "use_bfloat16": true,
199
+ "use_flash_attn": true
200
+ }
201
+ }
generation_config.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "transformers_version": "4.44.0"
4
+ }
model-00001-of-00020.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:43bbb2f1ae6d94ebbe6869a4c45b93cb19a562fe06dfab87fa5340c02c9c2f3d
3
+ size 4988569440
model-00002-of-00020.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0bed4eccf3cffcb860c1c41269c41527a6bef7c9ef6892be8f5d32f74886e075
3
+ size 4937253584
model-00003-of-00020.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c4dc190cfa1b9ba838e0e9799833f89abdea0dbc265d09f394a69a390d5f725c
3
+ size 4982226456
model-00004-of-00020.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:58fbbfc9d9c6ce4e9f735569a604be1f62ef338fc25457e9a9b3a23d4e85a089
3
+ size 4882320144
model-00005-of-00020.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8f7b4d71f81d5a9c62728385812052b5c9347f027e789d7068c324d369a4081b
3
+ size 4831988488
model-00006-of-00020.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dc624351bf98df16fd44bee1fa2ad34099a84338c567d577d1ab5e70c5e2d742
3
+ size 4680993424
model-00007-of-00020.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:39a0fb6fe0f0810fe0be97aa8531a715a42b07abfcc2c7959949cae20be42aae
3
+ size 4680993432
model-00008-of-00020.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7423e836db2ee6e374399ae8cd51187cb8631f030690300a3adf7ecee3d7bdbf
3
+ size 4680993432
model-00009-of-00020.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dc9ee2a805c01f9fb0d381b17600d49bc896813a50ae9d0d66548b79d15b7514
3
+ size 4680993432
model-00010-of-00020.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:062667da1c93563b17322d678e5827e504c8375427fe6d9dc56b60623aeab06a
3
+ size 4680993432
model-00011-of-00020.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:255a37255d9bc4624e3947aa5483155e1644cd87a592706d13deb09078fd1315
3
+ size 4680993432
model-00012-of-00020.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:81e19802072ccad224b615cd3217f0f57e17d08346ff1f6ff06793383a03fc4b
3
+ size 4680993432
model-00013-of-00020.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c436d924efd685a4a0a5b73131fdfa923c949f47fd78c290024b032dd1d03dc0
3
+ size 4680993432
model-00014-of-00020.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:59620ec286026e77791e23d0dc20a2c4a1c513605ef88ae35b760a321497e076
3
+ size 4680993432
model-00015-of-00020.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:82e8b3689712cfd32418d7f7b3c065d987534cac150f34124b08e9f00074dbb2
3
+ size 4680993432
model-00016-of-00020.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7e07890c5b27296b1e7c9a9f5160f739872e8f4296aaef86c018b5dc7cc85c68
3
+ size 4680993432
model-00017-of-00020.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ffd4370967aca26c386bc52296997b51ef0cd91c93e699ca70ea5ca28e3c0a09
3
+ size 4680993432
model-00018-of-00020.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f8aba5128247f402585acfe39ca2142549974bb190e8640d6367931491de1ad6
3
+ size 4680993432
model-00019-of-00020.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:94ada3c509b84bc9fe0a740326c8b37d40b60ddb533db5e2270d81f563b3a0c2
3
+ size 2768365184
model-00020-of-00020.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4b65477402b1a80c56e79d67bfc32eceb9a3cbfdfb229451197d0d1b9373deef
3
+ size 3795542160
model.safetensors.index.json ADDED
The diff for this file is too large to render. See raw diff
 
special_tokens_map.json ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<img>",
4
+ "</img>",
5
+ "<IMG_CONTEXT>",
6
+ "<quad>",
7
+ "</quad>",
8
+ "<ref>",
9
+ "</ref>",
10
+ "<box>",
11
+ "</box>",
12
+ {
13
+ "content": "[SEG]",
14
+ "lstrip": false,
15
+ "normalized": false,
16
+ "rstrip": false,
17
+ "single_word": false
18
+ },
19
+ {
20
+ "content": "<p>",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false
25
+ },
26
+ {
27
+ "content": "</p>",
28
+ "lstrip": false,
29
+ "normalized": false,
30
+ "rstrip": false,
31
+ "single_word": false
32
+ }
33
+ ],
34
+ "bos_token": {
35
+ "content": "<s>",
36
+ "lstrip": false,
37
+ "normalized": false,
38
+ "rstrip": false,
39
+ "single_word": false
40
+ },
41
+ "eos_token": {
42
+ "content": "</s>",
43
+ "lstrip": false,
44
+ "normalized": false,
45
+ "rstrip": false,
46
+ "single_word": false
47
+ },
48
+ "pad_token": {
49
+ "content": "</s>",
50
+ "lstrip": false,
51
+ "normalized": false,
52
+ "rstrip": false,
53
+ "single_word": false
54
+ },
55
+ "unk_token": {
56
+ "content": "<unk>",
57
+ "lstrip": false,
58
+ "normalized": false,
59
+ "rstrip": false,
60
+ "single_word": false
61
+ }
62
+ }
tokenization_internlm2.py ADDED
@@ -0,0 +1,235 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) The InternLM team and The HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # This code is based on transformers/src/transformers/models/llama/tokenization_llama.py
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ """Tokenization classes for InternLM."""
18
+ import os
19
+ from shutil import copyfile
20
+ from typing import Any, Dict, List, Optional, Tuple
21
+
22
+ import sentencepiece as spm
23
+ from transformers.tokenization_utils import PreTrainedTokenizer
24
+ from transformers.utils import logging
25
+
26
+ logger = logging.get_logger(__name__)
27
+
28
+ VOCAB_FILES_NAMES = {'vocab_file': './tokenizer.model'}
29
+
30
+ PRETRAINED_VOCAB_FILES_MAP = {}
31
+
32
+
33
+ # Modified from transformers.model.llama.tokenization_llama.LlamaTokenizer
34
+ class InternLM2Tokenizer(PreTrainedTokenizer):
35
+ """
36
+ Construct a InternLM2 tokenizer. Based on byte-level Byte-Pair-Encoding.
37
+
38
+ Args:
39
+ vocab_file (`str`):
40
+ Path to the vocabulary file.
41
+ """
42
+
43
+ vocab_files_names = VOCAB_FILES_NAMES
44
+ pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
45
+ model_input_names = ['input_ids', 'attention_mask']
46
+ _auto_class = 'AutoTokenizer'
47
+
48
+ def __init__(
49
+ self,
50
+ vocab_file,
51
+ unk_token='<unk>',
52
+ bos_token='<s>',
53
+ eos_token='</s>',
54
+ pad_token='</s>',
55
+ sp_model_kwargs: Optional[Dict[str, Any]] = None,
56
+ add_bos_token=True,
57
+ add_eos_token=False,
58
+ decode_with_prefix_space=False,
59
+ clean_up_tokenization_spaces=False,
60
+ **kwargs,
61
+ ):
62
+ self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
63
+ self.vocab_file = vocab_file
64
+ self.add_bos_token = add_bos_token
65
+ self.add_eos_token = add_eos_token
66
+ self.decode_with_prefix_space = decode_with_prefix_space
67
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
68
+ self.sp_model.Load(vocab_file)
69
+ self._no_prefix_space_tokens = None
70
+ super().__init__(
71
+ bos_token=bos_token,
72
+ eos_token=eos_token,
73
+ unk_token=unk_token,
74
+ pad_token=pad_token,
75
+ clean_up_tokenization_spaces=clean_up_tokenization_spaces,
76
+ **kwargs,
77
+ )
78
+
79
+ @property
80
+ def no_prefix_space_tokens(self):
81
+ if self._no_prefix_space_tokens is None:
82
+ vocab = self.convert_ids_to_tokens(list(range(self.vocab_size)))
83
+ self._no_prefix_space_tokens = {i for i, tok in enumerate(vocab) if not tok.startswith('▁')}
84
+ return self._no_prefix_space_tokens
85
+
86
+ @property
87
+ def vocab_size(self):
88
+ """Returns vocab size"""
89
+ return self.sp_model.get_piece_size()
90
+
91
+ @property
92
+ def bos_token_id(self) -> Optional[int]:
93
+ return self.sp_model.bos_id()
94
+
95
+ @property
96
+ def eos_token_id(self) -> Optional[int]:
97
+ return self.sp_model.eos_id()
98
+
99
+ def get_vocab(self):
100
+ """Returns vocab as a dict"""
101
+ vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
102
+ vocab.update(self.added_tokens_encoder)
103
+ return vocab
104
+
105
+ def _tokenize(self, text):
106
+ """Returns a tokenized string."""
107
+ return self.sp_model.encode(text, out_type=str)
108
+
109
+ def _convert_token_to_id(self, token):
110
+ """Converts a token (str) in an id using the vocab."""
111
+ return self.sp_model.piece_to_id(token)
112
+
113
+ def _convert_id_to_token(self, index):
114
+ """Converts an index (integer) in a token (str) using the vocab."""
115
+ token = self.sp_model.IdToPiece(index)
116
+ return token
117
+
118
+ def _maybe_add_prefix_space(self, tokens, decoded):
119
+ if tokens and tokens[0] not in self.no_prefix_space_tokens:
120
+ return ' ' + decoded
121
+ else:
122
+ return decoded
123
+
124
+ def convert_tokens_to_string(self, tokens):
125
+ """Converts a sequence of tokens (string) in a single string."""
126
+ current_sub_tokens = []
127
+ out_string = ''
128
+ prev_is_special = False
129
+ for token in tokens:
130
+ # make sure that special tokens are not decoded using sentencepiece model
131
+ if token in self.all_special_tokens:
132
+ if not prev_is_special:
133
+ out_string += ' '
134
+ out_string += self.sp_model.decode(current_sub_tokens) + token
135
+ prev_is_special = True
136
+ current_sub_tokens = []
137
+ else:
138
+ current_sub_tokens.append(token)
139
+ prev_is_special = False
140
+ out_string += self.sp_model.decode(current_sub_tokens)
141
+ out_string = self.clean_up_tokenization(out_string)
142
+ out_string = self._maybe_add_prefix_space(tokens=tokens, decoded=out_string)
143
+ return out_string[1:]
144
+
145
+ def save_vocabulary(self, save_directory, filename_prefix: Optional[str] = None) -> Tuple[str]:
146
+ """
147
+ Save the vocabulary and special tokens file to a directory.
148
+
149
+ Args:
150
+ save_directory (`str`):
151
+ The directory in which to save the vocabulary.
152
+
153
+ Returns:
154
+ `Tuple(str)`: Paths to the files saved.
155
+ """
156
+ if not os.path.isdir(save_directory):
157
+ logger.error(f'Vocabulary path ({save_directory}) should be a directory')
158
+ return
159
+ out_vocab_file = os.path.join(
160
+ save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']
161
+ )
162
+
163
+ if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
164
+ copyfile(self.vocab_file, out_vocab_file)
165
+ elif not os.path.isfile(self.vocab_file):
166
+ with open(out_vocab_file, 'wb') as fi:
167
+ content_spiece_model = self.sp_model.serialized_model_proto()
168
+ fi.write(content_spiece_model)
169
+
170
+ return (out_vocab_file,)
171
+
172
+ def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
173
+ if self.add_bos_token:
174
+ bos_token_ids = [self.bos_token_id]
175
+ else:
176
+ bos_token_ids = []
177
+
178
+ output = bos_token_ids + token_ids_0
179
+
180
+ if token_ids_1 is not None:
181
+ output = output + token_ids_1
182
+
183
+ if self.add_eos_token:
184
+ output = output + [self.eos_token_id]
185
+
186
+ return output
187
+
188
+ def get_special_tokens_mask(
189
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
190
+ ) -> List[int]:
191
+ """
192
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
193
+ special tokens using the tokenizer `prepare_for_model` method.
194
+
195
+ Args:
196
+ token_ids_0 (`List[int]`):
197
+ List of IDs.
198
+ token_ids_1 (`List[int]`, *optional*):
199
+ Optional second list of IDs for sequence pairs.
200
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
201
+ Whether or not the token list is already formatted with special tokens for the model.
202
+
203
+ Returns:
204
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
205
+ """
206
+ if already_has_special_tokens:
207
+ return super().get_special_tokens_mask(
208
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
209
+ )
210
+
211
+ if token_ids_1 is None:
212
+ return [1] + ([0] * len(token_ids_0)) + [1]
213
+ return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)) + [1]
214
+
215
+ def create_token_type_ids_from_sequences(
216
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
217
+ ) -> List[int]:
218
+ """
219
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. T5 does not make
220
+ use of token type ids, therefore a list of zeros is returned.
221
+
222
+ Args:
223
+ token_ids_0 (`List[int]`):
224
+ List of IDs.
225
+ token_ids_1 (`List[int]`, *optional*):
226
+ Optional second list of IDs for sequence pairs.
227
+
228
+ Returns:
229
+ `List[int]`: List of zeros.
230
+ """
231
+ eos = [self.eos_token_id]
232
+
233
+ if token_ids_1 is None:
234
+ return len(token_ids_0 + eos) * [0]
235
+ return len(token_ids_0 + eos + token_ids_1 + eos) * [0]
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f868398fc4e05ee1e8aeba95ddf18ddcc45b8bce55d5093bead5bbf80429b48b
3
+ size 1477754
tokenizer_config.json ADDED
@@ -0,0 +1,201 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "<unk>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "1": {
12
+ "content": "<s>",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "2": {
20
+ "content": "</s>",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "92538": {
28
+ "content": "<|plugin|>",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "92539": {
36
+ "content": "<|interpreter|>",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ },
43
+ "92540": {
44
+ "content": "<|action_end|>",
45
+ "lstrip": false,
46
+ "normalized": false,
47
+ "rstrip": false,
48
+ "single_word": false,
49
+ "special": true
50
+ },
51
+ "92541": {
52
+ "content": "<|action_start|>",
53
+ "lstrip": false,
54
+ "normalized": false,
55
+ "rstrip": false,
56
+ "single_word": false,
57
+ "special": true
58
+ },
59
+ "92542": {
60
+ "content": "<|im_end|>",
61
+ "lstrip": false,
62
+ "normalized": false,
63
+ "rstrip": false,
64
+ "single_word": false,
65
+ "special": true
66
+ },
67
+ "92543": {
68
+ "content": "<|im_start|>",
69
+ "lstrip": false,
70
+ "normalized": false,
71
+ "rstrip": false,
72
+ "single_word": false,
73
+ "special": true
74
+ },
75
+ "92544": {
76
+ "content": "<img>",
77
+ "lstrip": false,
78
+ "normalized": false,
79
+ "rstrip": false,
80
+ "single_word": false,
81
+ "special": true
82
+ },
83
+ "92545": {
84
+ "content": "</img>",
85
+ "lstrip": false,
86
+ "normalized": false,
87
+ "rstrip": false,
88
+ "single_word": false,
89
+ "special": true
90
+ },
91
+ "92546": {
92
+ "content": "<IMG_CONTEXT>",
93
+ "lstrip": false,
94
+ "normalized": false,
95
+ "rstrip": false,
96
+ "single_word": false,
97
+ "special": true
98
+ },
99
+ "92547": {
100
+ "content": "<quad>",
101
+ "lstrip": false,
102
+ "normalized": false,
103
+ "rstrip": false,
104
+ "single_word": false,
105
+ "special": true
106
+ },
107
+ "92548": {
108
+ "content": "</quad>",
109
+ "lstrip": false,
110
+ "normalized": false,
111
+ "rstrip": false,
112
+ "single_word": false,
113
+ "special": true
114
+ },
115
+ "92549": {
116
+ "content": "<ref>",
117
+ "lstrip": false,
118
+ "normalized": false,
119
+ "rstrip": false,
120
+ "single_word": false,
121
+ "special": true
122
+ },
123
+ "92550": {
124
+ "content": "</ref>",
125
+ "lstrip": false,
126
+ "normalized": false,
127
+ "rstrip": false,
128
+ "single_word": false,
129
+ "special": true
130
+ },
131
+ "92551": {
132
+ "content": "<box>",
133
+ "lstrip": false,
134
+ "normalized": false,
135
+ "rstrip": false,
136
+ "single_word": false,
137
+ "special": true
138
+ },
139
+ "92552": {
140
+ "content": "</box>",
141
+ "lstrip": false,
142
+ "normalized": false,
143
+ "rstrip": false,
144
+ "single_word": false,
145
+ "special": true
146
+ },
147
+ "92553": {
148
+ "content": "[SEG]",
149
+ "lstrip": false,
150
+ "normalized": false,
151
+ "rstrip": false,
152
+ "single_word": false,
153
+ "special": true
154
+ },
155
+ "92554": {
156
+ "content": "<p>",
157
+ "lstrip": false,
158
+ "normalized": false,
159
+ "rstrip": false,
160
+ "single_word": false,
161
+ "special": true
162
+ },
163
+ "92555": {
164
+ "content": "</p>",
165
+ "lstrip": false,
166
+ "normalized": false,
167
+ "rstrip": false,
168
+ "single_word": false,
169
+ "special": true
170
+ }
171
+ },
172
+ "additional_special_tokens": [
173
+ "<img>",
174
+ "</img>",
175
+ "<IMG_CONTEXT>",
176
+ "<quad>",
177
+ "</quad>",
178
+ "<ref>",
179
+ "</ref>",
180
+ "<box>",
181
+ "</box>",
182
+ "[SEG]",
183
+ "<p>",
184
+ "</p>"
185
+ ],
186
+ "auto_map": {
187
+ "AutoTokenizer": [
188
+ "tokenization_internlm2.InternLM2Tokenizer",
189
+ null
190
+ ]
191
+ },
192
+ "bos_token": "<s>",
193
+ "chat_template": "{{ bos_token }}{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
194
+ "clean_up_tokenization_spaces": false,
195
+ "eos_token": "</s>",
196
+ "model_max_length": 8192,
197
+ "pad_token": "</s>",
198
+ "padding_side": "right",
199
+ "tokenizer_class": "InternLM2Tokenizer",
200
+ "unk_token": "<unk>"
201
+ }