colva_internvl2_4b / config.json
zhouyik's picture
Upload folder using huggingface_hub
0e999c2 verified
{
"_commit_hash": null,
"_name_or_path": "./OpenGVLab/InternVL2-4B",
"architectures": [
"InternVLChatModel"
],
"auto_map": {
"AutoConfig": "configuration_internvl_chat.InternVLChatConfig",
"AutoModel": "modeling_internvl_chat.InternVLChatModel",
"AutoModelForCausalLM": "modeling_internvl_chat.InternVLChatModel"
},
"downsample_ratio": 0.5,
"dynamic_image_size": true,
"force_image_size": 448,
"llm_config": {
"_attn_implementation_autoset": false,
"_name_or_path": "microsoft/Phi-3-mini-128k-instruct",
"add_cross_attention": false,
"architectures": [
"Phi3ForCausalLM"
],
"attention_dropout": 0.0,
"auto_map": {
"AutoConfig": "configuration_phi3.Phi3Config",
"AutoModelForCausalLM": "modeling_phi3.Phi3ForCausalLM"
},
"bad_words_ids": null,
"begin_suppress_tokens": null,
"bos_token_id": 1,
"chunk_size_feed_forward": 0,
"cross_attention_hidden_size": null,
"decoder_start_token_id": null,
"diversity_penalty": 0.0,
"do_sample": false,
"early_stopping": false,
"embd_pdrop": 0.0,
"encoder_no_repeat_ngram_size": 0,
"eos_token_id": 32000,
"exponential_decay_length_penalty": null,
"finetuning_task": null,
"forced_bos_token_id": null,
"forced_eos_token_id": null,
"hidden_act": "silu",
"hidden_size": 3072,
"id2label": {
"0": "LABEL_0",
"1": "LABEL_1"
},
"initializer_range": 0.02,
"intermediate_size": 8192,
"is_decoder": false,
"is_encoder_decoder": false,
"label2id": {
"LABEL_0": 0,
"LABEL_1": 1
},
"length_penalty": 1.0,
"max_length": 20,
"max_position_embeddings": 131072,
"min_length": 0,
"model_type": "phi3",
"no_repeat_ngram_size": 0,
"num_attention_heads": 32,
"num_beam_groups": 1,
"num_beams": 1,
"num_hidden_layers": 32,
"num_key_value_heads": 32,
"num_return_sequences": 1,
"original_max_position_embeddings": 4096,
"output_attentions": false,
"output_hidden_states": false,
"output_scores": false,
"pad_token_id": 32000,
"prefix": null,
"problem_type": null,
"pruned_heads": {},
"remove_invalid_values": false,
"repetition_penalty": 1.0,
"resid_pdrop": 0.0,
"return_dict": true,
"return_dict_in_generate": false,
"rms_norm_eps": 1e-05,
"rope_scaling": {
"long_factor": [
1.0299999713897705,
1.0499999523162842,
1.0499999523162842,
1.0799999237060547,
1.2299998998641968,
1.2299998998641968,
1.2999999523162842,
1.4499999284744263,
1.5999999046325684,
1.6499998569488525,
1.8999998569488525,
2.859999895095825,
3.68999981880188,
5.419999599456787,
5.489999771118164,
5.489999771118164,
9.09000015258789,
11.579999923706055,
15.65999984741211,
15.769999504089355,
15.789999961853027,
18.360000610351562,
21.989999771118164,
23.079999923706055,
30.009998321533203,
32.35000228881836,
32.590003967285156,
35.56000518798828,
39.95000457763672,
53.840003967285156,
56.20000457763672,
57.95000457763672,
59.29000473022461,
59.77000427246094,
59.920005798339844,
61.190006256103516,
61.96000671386719,
62.50000762939453,
63.3700065612793,
63.48000717163086,
63.48000717163086,
63.66000747680664,
63.850006103515625,
64.08000946044922,
64.760009765625,
64.80001068115234,
64.81001281738281,
64.81001281738281
],
"short_factor": [
1.05,
1.05,
1.05,
1.1,
1.1,
1.1500000000000001,
1.2000000000000002,
1.2500000000000002,
1.3000000000000003,
1.3500000000000003,
1.5000000000000004,
2.000000000000001,
2.000000000000001,
2.000000000000001,
2.000000000000001,
2.000000000000001,
2.000000000000001,
2.000000000000001,
2.000000000000001,
2.000000000000001,
2.000000000000001,
2.000000000000001,
2.000000000000001,
2.000000000000001,
2.000000000000001,
2.000000000000001,
2.000000000000001,
2.000000000000001,
2.000000000000001,
2.000000000000001,
2.000000000000001,
2.000000000000001,
2.0500000000000007,
2.0500000000000007,
2.0500000000000007,
2.1000000000000005,
2.1000000000000005,
2.1000000000000005,
2.1500000000000004,
2.1500000000000004,
2.3499999999999996,
2.549999999999999,
2.5999999999999988,
2.5999999999999988,
2.7499999999999982,
2.849999999999998,
2.849999999999998,
2.9499999999999975
],
"type": "su"
},
"rope_theta": 10000.0,
"sep_token_id": null,
"sliding_window": 262144,
"suppress_tokens": null,
"task_specific_params": null,
"temperature": 1.0,
"tf_legacy_loss": false,
"tie_encoder_decoder": false,
"tie_word_embeddings": false,
"tokenizer_class": null,
"top_k": 50,
"top_p": 1.0,
"torch_dtype": "bfloat16",
"torchscript": false,
"transformers_version": "4.47.1",
"typical_p": 1.0,
"use_bfloat16": true,
"use_cache": true,
"vocab_size": 32020
},
"max_dynamic_patch": 12,
"min_dynamic_patch": 1,
"model_type": "internvl_chat",
"ps_version": "v2",
"radio_config": {
"_attn_implementation_autoset": true,
"_name_or_path": "./nvidia/RADIO",
"adaptor_names": null,
"add_cross_attention": false,
"architectures": [
"RADIOModel"
],
"args": {
"aa": null,
"amp": false,
"amp_dtype": "float16",
"amp_impl": "native",
"aug_repeats": 0,
"aug_splits": 0,
"auto_loss_balance_mode": "manual",
"batch_size": 32,
"bn_eps": null,
"bn_momentum": null,
"cache_dir": null,
"channels_last": false,
"checkpoint_hist": 10,
"chk_keep_forever": 10,
"class_map": "",
"clip_grad": null,
"clip_mode": "norm",
"cls_token_per_teacher": true,
"coco_annotations_file": "/datasets/coco2017-adlsa/annotations/captions_val2017.json",
"coco_image_dir": "/datasets/coco2017-adlsa/val2017",
"color_jitter": 0.4,
"cooldown_epochs": 0,
"cpe_max_size": 2048,
"crd_loss": false,
"crd_loss_weight": 0.8,
"crop_pct": null,
"cutmix": 0.0,
"cutmix_minmax": null,
"data_dir": [
[
"/lustre/fsw/portfolios/llmservice/projects/llmservice_nlp_fm/datasets/captioning/datacomp/dc1b/stage2",
0.95
],
[
"/lustre/fsw/portfolios/llmservice/projects/llmservice_nlp_fm/datasets/segmentation/sam/stage1",
0.05
]
],
"dataset": "nvgpt4",
"dataset_download": false,
"debug_full_knn": false,
"decay_epochs": 90,
"decay_milestones": [
90,
180,
270
],
"decay_rate": 0.1,
"device": "cuda:0",
"dist_bn": "reduce",
"distributed": true,
"drop": 0.0,
"drop_block": null,
"drop_connect": null,
"drop_path": null,
"dtype": "bfloat16",
"epoch_repeats": 0.0,
"epochs": 50,
"eval": false,
"eval_metric": "knn_top1",
"eval_teacher": false,
"eval_teacher_only": false,
"eval_throughput": false,
"experiment": "checkpoints",
"fast_norm": false,
"feature_summarizer": "cls_token",
"feature_upscale_factor": null,
"force_new_wandb_id": false,
"force_spectral_reparam": false,
"fuser": "",
"gp": null,
"grad_accum_steps": 1,
"grad_checkpointing": false,
"head_init_bias": null,
"head_init_scale": null,
"hflip": 0.5,
"img_size": null,
"in_chans": null,
"initial_checkpoint": "",
"input_size": null,
"interpolation": "",
"layer_decay": null,
"local_rank": 0,
"log_interval": 50,
"log_mlflow": false,
"log_wandb": true,
"loss": "cosine",
"loss_auto_balance": false,
"lr": 0.001,
"lr_base": 0.1,
"lr_base_scale": "",
"lr_base_size": 256,
"lr_cycle_decay": 0.5,
"lr_cycle_limit": 1,
"lr_cycle_mul": 1.0,
"lr_k_decay": 1.0,
"lr_noise": null,
"lr_noise_pct": 0.67,
"lr_noise_std": 1.0,
"mean": null,
"mesa": {
"gaussian_kl": false,
"start_epoch": 100
},
"min_lr": 0,
"mixup": 0.0,
"mixup_mode": "batch",
"mixup_off_epoch": 0,
"mixup_prob": 1.0,
"mixup_switch_prob": 0.5,
"mlp_hidden_size": 1520,
"mlp_num_inner": 3,
"mlp_version": "v2",
"model": "vit_huge_patch16_224_mlpnorm",
"model_ema": {
"decay": 0.9998,
"force_cpu": false,
"power": false,
"power_stds": [
0.05,
0.1
],
"start_epoch": 0
},
"model_kwargs": {},
"model_norm": true,
"momentum": 0.9,
"no_aug": false,
"no_ddp_bb": false,
"no_prefetcher": false,
"no_resume_opt": false,
"num_classes": null,
"opt": "lamb",
"opt_betas": null,
"opt_eps": null,
"opt_kwargs": {
"filter_bias_and_bn": false
},
"output": "/lustre/fs6/portfolios/llmservice/users/mranzinger/output/evfm/ohem/3-13-24_vit-h-16_bf16_ep50",
"patience_epochs": 10,
"pin_mem": false,
"prefetcher": true,
"pretrained": false,
"rank": 0,
"ratio": [
0.75,
1.3333333333333333
],
"recount": 1,
"recovery_interval": 0,
"register_multiple": 16,
"remode": "pixel",
"reprob": 0.0,
"resplit": false,
"resume": "/lustre/fs6/portfolios/llmservice/users/mranzinger/output/evfm/ohem/3-13-24_vit-h-16_bf16_ep50/checkpoints/checkpoint-48.pth.tar",
"save_images": false,
"scale": [
0.5,
1.0
],
"sched": "cosine",
"sched_on_updates": true,
"seed": 42,
"smoothing": 0.1,
"spectral_reparam": false,
"split_bn": false,
"start_epoch": null,
"std": null,
"steps_per_epoch": 2000,
"sync_bn": false,
"synchronize_step": true,
"teachers": [
{
"amp": true,
"amp_dtype": "bfloat16",
"batch_size": 16,
"data_dir": [
[
"/lustre/fsw/portfolios/llmservice/projects/llmservice_nlp_fm/datasets/captioning/datacomp/dc1b/stage2",
0.95
],
[
"/lustre/fsw/portfolios/llmservice/projects/llmservice_nlp_fm/datasets/segmentation/sam/stage1",
0.05
]
],
"fd_loss_weight": 1.0,
"fd_normalize": false,
"feature_distillation": true,
"input_size": 378,
"match_pre_proj": false,
"model": "ViT-H-14-378-quickgelu",
"name": "clip",
"pretrained": "dfn5b",
"sample_rate": 16,
"student_resolution": 432,
"summary_loss_weight": 1.0,
"torchcompile": true,
"type": "open_clip",
"vitdet_prob": 0.05,
"vitdet_window_sizes": [
3,
9,
9,
9
]
},
{
"amp": false,
"amp_dtype": "bfloat16",
"batch_size": 16,
"fd_loss_weight": 0.8,
"fd_normalize": false,
"feature_distillation": true,
"input_size": 336,
"match_pre_proj": false,
"model": "ViT-L/14@336px",
"name": "openai_clip",
"pretrained": "openai",
"sample_rate": 16,
"summary_loss_weight": 0.8,
"torchcompile": true,
"type": "openai_clip",
"use_summary": false
},
{
"amp": true,
"amp_dtype": "bfloat16",
"batch_size": 16,
"fd_loss_weight": 2.0,
"fd_normalize": false,
"feature_distillation": true,
"input_size": 378,
"model": "dinov2_vitg14_reg",
"name": "dino_v2",
"sample_rate": 16,
"summary_loss_weight": 1.0,
"torchcompile": true,
"type": "dino_v2"
},
{
"amp": false,
"batch_size": 2,
"data_dir": [
[
"/lustre/fsw/portfolios/llmservice/projects/llmservice_nlp_fm/datasets/segmentation/sam/stage1",
0.4
]
],
"fd_loss_fn": "MSE",
"fd_loss_weight": 0.25,
"fd_normalize": false,
"fd_ohem": true,
"feature_distillation": true,
"input_size": 1024,
"model": "vit-h",
"name": "sam",
"sample_rate": 2,
"student_resolution": 1024,
"summary_loss_weight": 1e-05,
"type": "sam",
"use_summary": false,
"vitdet_prob": 0.99,
"vitdet_window_sizes": [
8,
16,
16
]
},
{
"amp": true,
"batch_size": 2,
"data_dir": [
[
"/lustre/fsw/portfolios/llmservice/projects/llmservice_nlp_fm/datasets/ocr/publaynet/webdataset",
0.4
],
[
"/lustre/fsw/portfolios/llmservice/projects/llmservice_nlp_fm/datasets/ocr/staging/arxiv/hocr",
0.4
],
[
"/lustre/fsw/portfolios/llmservice/projects/llmservice_nlp_fm/datasets/ocr/scene-text/scene-text/text_ocr/webdataset",
0.15
],
[
"/lustre/fsw/portfolios/llmservice/projects/llmservice_nlp_fm/datasets/ocr/scene-text/scene-text/hiertext/webdataset",
0.05
]
],
"fd_loss_fn": "MSE",
"fd_loss_weight": 0.13,
"fd_normalize": false,
"fd_ohem": true,
"fd_upsample_factor": 4,
"feature_distillation": true,
"input_size": 1024,
"model": "quality",
"name": "rtx-translate",
"sample_rate": 2,
"student_resolution": 1024,
"summary_loss_weight": 1e-05,
"type": "rtx_translate",
"use_summary": false,
"vitdet_prob": 0.99,
"vitdet_window_sizes": [
8,
16,
16
]
}
],
"torchcompile": null,
"torchscript": false,
"train_interpolation": "random",
"train_split": "train",
"tta": 0,
"use_coco": false,
"use_multi_epochs_loader": false,
"val_data_dir": "/lustre/fsw/portfolios/llmservice/projects/llmservice_nlp_fm/datasets/classification/imagenet-1k/webdataset",
"val_ema_only": false,
"val_img_size": 432,
"val_jobs_script": "run_validation_jobs_vit-h-16.sh",
"val_split": "val",
"validation_batch_size": 64,
"vflip": 0.0,
"wandb_entity": "",
"wandb_group": "ohem",
"wandb_job_type": "",
"wandb_name": "",
"wandb_project": "",
"warmup_epochs": 0.5,
"warmup_lr": 1e-05,
"warmup_prefix": false,
"weight_decay": 0.02,
"worker_seeding": "all",
"workers": 8,
"world_size": 128
},
"auto_map": {
"AutoConfig": "configuraion_radio.RADIOConfig",
"AutoModel": "modeling_radio.RADIOModel"
},
"bad_words_ids": null,
"begin_suppress_tokens": null,
"bos_token_id": null,
"chunk_size_feed_forward": 0,
"cross_attention_hidden_size": null,
"decoder_start_token_id": null,
"diversity_penalty": 0.0,
"do_sample": false,
"early_stopping": false,
"encoder_no_repeat_ngram_size": 0,
"eos_token_id": null,
"exponential_decay_length_penalty": null,
"finetuning_task": null,
"forced_bos_token_id": null,
"forced_eos_token_id": null,
"id2label": {
"0": "LABEL_0",
"1": "LABEL_1"
},
"is_decoder": false,
"is_encoder_decoder": false,
"label2id": {
"LABEL_0": 0,
"LABEL_1": 1
},
"length_penalty": 1.0,
"max_length": 20,
"max_resolution": 2048,
"min_length": 0,
"model_type": "",
"no_repeat_ngram_size": 0,
"num_beam_groups": 1,
"num_beams": 1,
"num_return_sequences": 1,
"output_attentions": false,
"output_hidden_states": false,
"output_scores": false,
"pad_token_id": null,
"patch_size": 16,
"preferred_resolution": [
432,
432
],
"prefix": null,
"problem_type": null,
"pruned_heads": {},
"remove_invalid_values": false,
"repetition_penalty": 1.0,
"return_dict": true,
"return_dict_in_generate": false,
"sep_token_id": null,
"suppress_tokens": null,
"task_specific_params": null,
"temperature": 1.0,
"tf_legacy_loss": false,
"tie_encoder_decoder": false,
"tie_word_embeddings": true,
"tokenizer_class": null,
"top_k": 50,
"top_p": 1.0,
"torch_dtype": "bfloat16",
"torchscript": false,
"transformers_version": "4.47.1",
"typical_p": 1.0,
"use_bfloat16": false,
"version": "radio_v2.1",
"vitdet_window_size": null
},
"select_layer": -1,
"template": "phi3-chat",
"torch_dtype": "bfloat16",
"transformers_version": null,
"use_backbone_lora": 0,
"use_llm_lora": 0,
"use_thumbnail": true,
"vision_config": {
"_attn_implementation_autoset": false,
"_name_or_path": "",
"add_cross_attention": false,
"architectures": [
"InternVisionModel"
],
"attention_dropout": 0.0,
"bad_words_ids": null,
"begin_suppress_tokens": null,
"bos_token_id": null,
"chunk_size_feed_forward": 0,
"cross_attention_hidden_size": null,
"decoder_start_token_id": null,
"diversity_penalty": 0.0,
"do_sample": false,
"drop_path_rate": 0.0,
"dropout": 0.0,
"early_stopping": false,
"encoder_no_repeat_ngram_size": 0,
"eos_token_id": null,
"exponential_decay_length_penalty": null,
"finetuning_task": null,
"forced_bos_token_id": null,
"forced_eos_token_id": null,
"hidden_act": "gelu",
"hidden_size": 1024,
"id2label": {
"0": "LABEL_0",
"1": "LABEL_1"
},
"image_size": 448,
"initializer_factor": 1.0,
"initializer_range": 0.02,
"intermediate_size": 4096,
"is_decoder": false,
"is_encoder_decoder": false,
"label2id": {
"LABEL_0": 0,
"LABEL_1": 1
},
"layer_norm_eps": 1e-06,
"length_penalty": 1.0,
"max_length": 20,
"min_length": 0,
"model_type": "intern_vit_6b",
"no_repeat_ngram_size": 0,
"norm_type": "layer_norm",
"num_attention_heads": 16,
"num_beam_groups": 1,
"num_beams": 1,
"num_channels": 3,
"num_hidden_layers": 24,
"num_return_sequences": 1,
"output_attentions": false,
"output_hidden_states": false,
"output_scores": false,
"pad_token_id": null,
"patch_size": 14,
"prefix": null,
"problem_type": null,
"pruned_heads": {},
"qk_normalization": false,
"qkv_bias": true,
"remove_invalid_values": false,
"repetition_penalty": 1.0,
"return_dict": true,
"return_dict_in_generate": false,
"sep_token_id": null,
"suppress_tokens": null,
"task_specific_params": null,
"temperature": 1.0,
"tf_legacy_loss": false,
"tie_encoder_decoder": false,
"tie_word_embeddings": true,
"tokenizer_class": null,
"top_k": 50,
"top_p": 1.0,
"torch_dtype": "bfloat16",
"torchscript": false,
"transformers_version": "4.47.1",
"typical_p": 1.0,
"use_bfloat16": true,
"use_flash_attn": true
}
}