|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 5.991137370753323, |
|
"eval_steps": 500, |
|
"global_step": 1014, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.29542097488921715, |
|
"grad_norm": 1.8936359882354736, |
|
"learning_rate": 2.5e-05, |
|
"loss": 2.2777, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.5908419497784343, |
|
"grad_norm": 0.42867106199264526, |
|
"learning_rate": 5e-05, |
|
"loss": 0.9288, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.8862629246676514, |
|
"grad_norm": 0.5983214378356934, |
|
"learning_rate": 4.726477024070022e-05, |
|
"loss": 0.6469, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 1.1816838995568686, |
|
"grad_norm": 0.5473693013191223, |
|
"learning_rate": 4.452954048140044e-05, |
|
"loss": 0.5595, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 1.4771048744460857, |
|
"grad_norm": 0.5971556901931763, |
|
"learning_rate": 4.179431072210066e-05, |
|
"loss": 0.537, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 1.7725258493353029, |
|
"grad_norm": 0.6025377511978149, |
|
"learning_rate": 3.9059080962800876e-05, |
|
"loss": 0.5271, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 2.06794682422452, |
|
"grad_norm": 0.9687663912773132, |
|
"learning_rate": 3.6323851203501094e-05, |
|
"loss": 0.5318, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 2.363367799113737, |
|
"grad_norm": 0.673847496509552, |
|
"learning_rate": 3.358862144420131e-05, |
|
"loss": 0.5116, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 2.658788774002954, |
|
"grad_norm": 0.7841825485229492, |
|
"learning_rate": 3.085339168490153e-05, |
|
"loss": 0.5114, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 2.9542097488921715, |
|
"grad_norm": 0.8053774237632751, |
|
"learning_rate": 2.811816192560175e-05, |
|
"loss": 0.5049, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 3.2496307237813884, |
|
"grad_norm": 0.8071188926696777, |
|
"learning_rate": 2.538293216630197e-05, |
|
"loss": 0.5045, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 3.5450516986706058, |
|
"grad_norm": 0.8850335478782654, |
|
"learning_rate": 2.264770240700219e-05, |
|
"loss": 0.4958, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 3.8404726735598227, |
|
"grad_norm": 1.001068115234375, |
|
"learning_rate": 1.9912472647702408e-05, |
|
"loss": 0.4927, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 4.13589364844904, |
|
"grad_norm": 1.3853482007980347, |
|
"learning_rate": 1.7177242888402626e-05, |
|
"loss": 0.494, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 4.431314623338257, |
|
"grad_norm": 1.1378982067108154, |
|
"learning_rate": 1.4442013129102846e-05, |
|
"loss": 0.4853, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 4.726735598227474, |
|
"grad_norm": 1.11444890499115, |
|
"learning_rate": 1.1706783369803063e-05, |
|
"loss": 0.4797, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 5.022156573116692, |
|
"grad_norm": 1.133715033531189, |
|
"learning_rate": 8.971553610503283e-06, |
|
"loss": 0.4822, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 5.317577548005908, |
|
"grad_norm": 1.230751633644104, |
|
"learning_rate": 6.2363238512035015e-06, |
|
"loss": 0.475, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 5.612998522895126, |
|
"grad_norm": 1.201663613319397, |
|
"learning_rate": 3.50109409190372e-06, |
|
"loss": 0.4729, |
|
"step": 950 |
|
}, |
|
{ |
|
"epoch": 5.908419497784343, |
|
"grad_norm": 1.250101089477539, |
|
"learning_rate": 7.658643326039388e-07, |
|
"loss": 0.4689, |
|
"step": 1000 |
|
} |
|
], |
|
"logging_steps": 50, |
|
"max_steps": 1014, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 6, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.6016891541848064e+17, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|