|
{ |
|
"best_metric": 27.8203, |
|
"best_model_checkpoint": "./ko-en_mbartLarge_exp20p_batch64_linear/checkpoint-8000", |
|
"epoch": 11.138183083884442, |
|
"eval_steps": 4000, |
|
"global_step": 24000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 1.25e-05, |
|
"loss": 1.5426, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 2.5e-05, |
|
"loss": 1.343, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 3.7500000000000003e-05, |
|
"loss": 1.2768, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"learning_rate": 5e-05, |
|
"loss": 1.242, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 1.16, |
|
"learning_rate": 4.970294676806084e-05, |
|
"loss": 1.1208, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 1.39, |
|
"learning_rate": 4.940589353612168e-05, |
|
"loss": 1.0718, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 1.62, |
|
"learning_rate": 4.910884030418251e-05, |
|
"loss": 1.061, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 1.86, |
|
"learning_rate": 4.881178707224335e-05, |
|
"loss": 1.0585, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 1.86, |
|
"eval_bleu": 27.494, |
|
"eval_gen_len": 18.9136, |
|
"eval_loss": 1.1434648036956787, |
|
"eval_runtime": 1314.8151, |
|
"eval_samples_per_second": 13.11, |
|
"eval_steps_per_second": 1.639, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 2.09, |
|
"learning_rate": 4.851473384030418e-05, |
|
"loss": 0.9558, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 2.32, |
|
"learning_rate": 4.8217680608365025e-05, |
|
"loss": 0.8186, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 2.55, |
|
"learning_rate": 4.792062737642586e-05, |
|
"loss": 0.8398, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 2.78, |
|
"learning_rate": 4.7623574144486695e-05, |
|
"loss": 0.8454, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 3.02, |
|
"learning_rate": 4.732652091254753e-05, |
|
"loss": 0.8416, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 3.25, |
|
"learning_rate": 4.702946768060837e-05, |
|
"loss": 0.6354, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 3.48, |
|
"learning_rate": 4.67324144486692e-05, |
|
"loss": 0.6545, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 3.71, |
|
"learning_rate": 4.643536121673004e-05, |
|
"loss": 0.6719, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 3.71, |
|
"eval_bleu": 27.8203, |
|
"eval_gen_len": 18.6815, |
|
"eval_loss": 1.204200267791748, |
|
"eval_runtime": 1295.2995, |
|
"eval_samples_per_second": 13.307, |
|
"eval_steps_per_second": 1.664, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 3.94, |
|
"learning_rate": 4.613830798479088e-05, |
|
"loss": 0.6824, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 4.18, |
|
"learning_rate": 4.584125475285171e-05, |
|
"loss": 0.5317, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 4.41, |
|
"learning_rate": 4.554420152091255e-05, |
|
"loss": 0.5004, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 4.64, |
|
"learning_rate": 4.524714828897338e-05, |
|
"loss": 0.5208, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 4.87, |
|
"learning_rate": 4.4950095057034226e-05, |
|
"loss": 0.5339, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 5.11, |
|
"learning_rate": 4.465304182509506e-05, |
|
"loss": 0.4608, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 5.34, |
|
"learning_rate": 4.4355988593155896e-05, |
|
"loss": 0.3783, |
|
"step": 11500 |
|
}, |
|
{ |
|
"epoch": 5.57, |
|
"learning_rate": 4.405893536121673e-05, |
|
"loss": 0.3964, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 5.57, |
|
"eval_bleu": 27.1711, |
|
"eval_gen_len": 18.5184, |
|
"eval_loss": 1.4261032342910767, |
|
"eval_runtime": 1285.7174, |
|
"eval_samples_per_second": 13.407, |
|
"eval_steps_per_second": 1.676, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 5.8, |
|
"learning_rate": 4.376188212927757e-05, |
|
"loss": 0.4104, |
|
"step": 12500 |
|
}, |
|
{ |
|
"epoch": 6.03, |
|
"learning_rate": 4.3464828897338404e-05, |
|
"loss": 0.4036, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 6.27, |
|
"learning_rate": 4.316777566539924e-05, |
|
"loss": 0.2859, |
|
"step": 13500 |
|
}, |
|
{ |
|
"epoch": 6.5, |
|
"learning_rate": 4.2870722433460074e-05, |
|
"loss": 0.3004, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 6.73, |
|
"learning_rate": 4.257366920152092e-05, |
|
"loss": 0.3148, |
|
"step": 14500 |
|
}, |
|
{ |
|
"epoch": 6.96, |
|
"learning_rate": 4.227661596958175e-05, |
|
"loss": 0.3262, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 7.19, |
|
"learning_rate": 4.197956273764259e-05, |
|
"loss": 0.234, |
|
"step": 15500 |
|
}, |
|
{ |
|
"epoch": 7.43, |
|
"learning_rate": 4.168250950570343e-05, |
|
"loss": 0.2282, |
|
"step": 16000 |
|
}, |
|
{ |
|
"epoch": 7.43, |
|
"eval_bleu": 26.1891, |
|
"eval_gen_len": 18.5479, |
|
"eval_loss": 1.7274521589279175, |
|
"eval_runtime": 1283.8263, |
|
"eval_samples_per_second": 13.426, |
|
"eval_steps_per_second": 1.679, |
|
"step": 16000 |
|
}, |
|
{ |
|
"epoch": 7.66, |
|
"learning_rate": 4.138545627376426e-05, |
|
"loss": 0.2426, |
|
"step": 16500 |
|
}, |
|
{ |
|
"epoch": 7.89, |
|
"learning_rate": 4.10884030418251e-05, |
|
"loss": 0.2507, |
|
"step": 17000 |
|
}, |
|
{ |
|
"epoch": 8.12, |
|
"learning_rate": 4.079134980988593e-05, |
|
"loss": 0.2085, |
|
"step": 17500 |
|
}, |
|
{ |
|
"epoch": 8.35, |
|
"learning_rate": 4.0494296577946774e-05, |
|
"loss": 0.1756, |
|
"step": 18000 |
|
}, |
|
{ |
|
"epoch": 8.59, |
|
"learning_rate": 4.0197243346007605e-05, |
|
"loss": 0.1859, |
|
"step": 18500 |
|
}, |
|
{ |
|
"epoch": 8.82, |
|
"learning_rate": 3.9900190114068444e-05, |
|
"loss": 0.1957, |
|
"step": 19000 |
|
}, |
|
{ |
|
"epoch": 9.05, |
|
"learning_rate": 3.9603136882129275e-05, |
|
"loss": 0.1845, |
|
"step": 19500 |
|
}, |
|
{ |
|
"epoch": 9.28, |
|
"learning_rate": 3.930608365019012e-05, |
|
"loss": 0.1338, |
|
"step": 20000 |
|
}, |
|
{ |
|
"epoch": 9.28, |
|
"eval_bleu": 26.1533, |
|
"eval_gen_len": 18.505, |
|
"eval_loss": 1.9251435995101929, |
|
"eval_runtime": 1285.914, |
|
"eval_samples_per_second": 13.404, |
|
"eval_steps_per_second": 1.676, |
|
"step": 20000 |
|
}, |
|
{ |
|
"epoch": 9.51, |
|
"learning_rate": 3.900903041825095e-05, |
|
"loss": 0.1447, |
|
"step": 20500 |
|
}, |
|
{ |
|
"epoch": 9.75, |
|
"learning_rate": 3.871197718631179e-05, |
|
"loss": 0.1527, |
|
"step": 21000 |
|
}, |
|
{ |
|
"epoch": 9.98, |
|
"learning_rate": 3.841492395437263e-05, |
|
"loss": 0.1584, |
|
"step": 21500 |
|
}, |
|
{ |
|
"epoch": 10.21, |
|
"learning_rate": 3.811787072243346e-05, |
|
"loss": 0.1119, |
|
"step": 22000 |
|
}, |
|
{ |
|
"epoch": 10.44, |
|
"learning_rate": 3.78208174904943e-05, |
|
"loss": 0.1127, |
|
"step": 22500 |
|
}, |
|
{ |
|
"epoch": 10.67, |
|
"learning_rate": 3.752376425855513e-05, |
|
"loss": 0.1233, |
|
"step": 23000 |
|
}, |
|
{ |
|
"epoch": 10.91, |
|
"learning_rate": 3.7226711026615975e-05, |
|
"loss": 0.1269, |
|
"step": 23500 |
|
}, |
|
{ |
|
"epoch": 11.14, |
|
"learning_rate": 3.6929657794676806e-05, |
|
"loss": 0.1033, |
|
"step": 24000 |
|
}, |
|
{ |
|
"epoch": 11.14, |
|
"eval_bleu": 26.0643, |
|
"eval_gen_len": 18.4275, |
|
"eval_loss": 2.0758345127105713, |
|
"eval_runtime": 1283.6981, |
|
"eval_samples_per_second": 13.428, |
|
"eval_steps_per_second": 1.679, |
|
"step": 24000 |
|
}, |
|
{ |
|
"epoch": 11.14, |
|
"step": 24000, |
|
"total_flos": 3.328711323549696e+18, |
|
"train_loss": 0.5229005074501037, |
|
"train_runtime": 88579.3039, |
|
"train_samples_per_second": 62.272, |
|
"train_steps_per_second": 0.973 |
|
} |
|
], |
|
"logging_steps": 500, |
|
"max_steps": 86160, |
|
"num_train_epochs": 40, |
|
"save_steps": 4000, |
|
"total_flos": 3.328711323549696e+18, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|