Llama-8b-policy-cot / trainer_state.json
talzoomanzoo's picture
Upload folder using huggingface_hub
6b7ef02 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.9855421686746988,
"eval_steps": 52,
"global_step": 414,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.004819277108433735,
"grad_norm": 22.358972549438477,
"learning_rate": 2.0000000000000003e-06,
"loss": 1.5652,
"step": 1
},
{
"epoch": 0.004819277108433735,
"eval_loss": 1.4657918214797974,
"eval_runtime": 407.7477,
"eval_samples_per_second": 3.983,
"eval_steps_per_second": 0.996,
"step": 1
},
{
"epoch": 0.00963855421686747,
"grad_norm": 21.81947135925293,
"learning_rate": 4.000000000000001e-06,
"loss": 1.4671,
"step": 2
},
{
"epoch": 0.014457831325301205,
"grad_norm": 17.827760696411133,
"learning_rate": 6e-06,
"loss": 1.3721,
"step": 3
},
{
"epoch": 0.01927710843373494,
"grad_norm": 12.67513656616211,
"learning_rate": 8.000000000000001e-06,
"loss": 1.2023,
"step": 4
},
{
"epoch": 0.024096385542168676,
"grad_norm": 12.69899845123291,
"learning_rate": 1e-05,
"loss": 1.1047,
"step": 5
},
{
"epoch": 0.02891566265060241,
"grad_norm": 10.573343276977539,
"learning_rate": 1.2e-05,
"loss": 1.0638,
"step": 6
},
{
"epoch": 0.033734939759036145,
"grad_norm": 7.3869709968566895,
"learning_rate": 1.4e-05,
"loss": 1.0308,
"step": 7
},
{
"epoch": 0.03855421686746988,
"grad_norm": 8.336923599243164,
"learning_rate": 1.6000000000000003e-05,
"loss": 1.0038,
"step": 8
},
{
"epoch": 0.043373493975903614,
"grad_norm": 8.090559959411621,
"learning_rate": 1.8e-05,
"loss": 1.0536,
"step": 9
},
{
"epoch": 0.04819277108433735,
"grad_norm": 5.664218425750732,
"learning_rate": 2e-05,
"loss": 0.9744,
"step": 10
},
{
"epoch": 0.05301204819277108,
"grad_norm": 27.36557388305664,
"learning_rate": 1.9999697653579705e-05,
"loss": 1.0602,
"step": 11
},
{
"epoch": 0.05783132530120482,
"grad_norm": 13.406028747558594,
"learning_rate": 1.9998790632601496e-05,
"loss": 1.038,
"step": 12
},
{
"epoch": 0.06265060240963856,
"grad_norm": 5.966604709625244,
"learning_rate": 1.999727899191228e-05,
"loss": 0.9748,
"step": 13
},
{
"epoch": 0.06746987951807229,
"grad_norm": 5.611815452575684,
"learning_rate": 1.999516282291988e-05,
"loss": 0.9866,
"step": 14
},
{
"epoch": 0.07228915662650602,
"grad_norm": 5.1906819343566895,
"learning_rate": 1.9992442253587533e-05,
"loss": 0.9375,
"step": 15
},
{
"epoch": 0.07710843373493977,
"grad_norm": 5.197139739990234,
"learning_rate": 1.998911744842611e-05,
"loss": 0.9655,
"step": 16
},
{
"epoch": 0.0819277108433735,
"grad_norm": 3.9094479084014893,
"learning_rate": 1.99851886084842e-05,
"loss": 0.95,
"step": 17
},
{
"epoch": 0.08674698795180723,
"grad_norm": 5.180599212646484,
"learning_rate": 1.9980655971335944e-05,
"loss": 1.0035,
"step": 18
},
{
"epoch": 0.09156626506024096,
"grad_norm": 4.283572673797607,
"learning_rate": 1.9975519811066665e-05,
"loss": 0.9105,
"step": 19
},
{
"epoch": 0.0963855421686747,
"grad_norm": 3.92655086517334,
"learning_rate": 1.9969780438256295e-05,
"loss": 1.0165,
"step": 20
},
{
"epoch": 0.10120481927710843,
"grad_norm": 4.255762577056885,
"learning_rate": 1.99634381999606e-05,
"loss": 0.9621,
"step": 21
},
{
"epoch": 0.10602409638554217,
"grad_norm": 4.64040994644165,
"learning_rate": 1.995649347969019e-05,
"loss": 0.9977,
"step": 22
},
{
"epoch": 0.1108433734939759,
"grad_norm": 4.476572513580322,
"learning_rate": 1.9948946697387322e-05,
"loss": 1.0254,
"step": 23
},
{
"epoch": 0.11566265060240964,
"grad_norm": 4.513229846954346,
"learning_rate": 1.9940798309400527e-05,
"loss": 0.9874,
"step": 24
},
{
"epoch": 0.12048192771084337,
"grad_norm": 4.014408111572266,
"learning_rate": 1.993204880845699e-05,
"loss": 0.9334,
"step": 25
},
{
"epoch": 0.12530120481927712,
"grad_norm": 3.4205386638641357,
"learning_rate": 1.992269872363277e-05,
"loss": 0.8867,
"step": 26
},
{
"epoch": 0.13012048192771083,
"grad_norm": 3.855297088623047,
"learning_rate": 1.9912748620320796e-05,
"loss": 0.9767,
"step": 27
},
{
"epoch": 0.13493975903614458,
"grad_norm": 3.8717806339263916,
"learning_rate": 1.9902199100196697e-05,
"loss": 0.9989,
"step": 28
},
{
"epoch": 0.13975903614457832,
"grad_norm": 3.8353829383850098,
"learning_rate": 1.98910508011824e-05,
"loss": 0.9553,
"step": 29
},
{
"epoch": 0.14457831325301204,
"grad_norm": 3.8910715579986572,
"learning_rate": 1.987930439740757e-05,
"loss": 0.9659,
"step": 30
},
{
"epoch": 0.1493975903614458,
"grad_norm": 4.098902225494385,
"learning_rate": 1.9866960599168825e-05,
"loss": 0.9066,
"step": 31
},
{
"epoch": 0.15421686746987953,
"grad_norm": 3.609893321990967,
"learning_rate": 1.9854020152886816e-05,
"loss": 0.9287,
"step": 32
},
{
"epoch": 0.15903614457831325,
"grad_norm": 3.6637232303619385,
"learning_rate": 1.9840483841061058e-05,
"loss": 0.8945,
"step": 33
},
{
"epoch": 0.163855421686747,
"grad_norm": 3.991074562072754,
"learning_rate": 1.982635248222264e-05,
"loss": 0.8992,
"step": 34
},
{
"epoch": 0.1686746987951807,
"grad_norm": 4.303783416748047,
"learning_rate": 1.981162693088471e-05,
"loss": 0.9494,
"step": 35
},
{
"epoch": 0.17349397590361446,
"grad_norm": 4.269317150115967,
"learning_rate": 1.9796308077490817e-05,
"loss": 0.9754,
"step": 36
},
{
"epoch": 0.1783132530120482,
"grad_norm": 4.001611709594727,
"learning_rate": 1.978039684836106e-05,
"loss": 0.9202,
"step": 37
},
{
"epoch": 0.18313253012048192,
"grad_norm": 4.386765956878662,
"learning_rate": 1.976389420563607e-05,
"loss": 0.9723,
"step": 38
},
{
"epoch": 0.18795180722891566,
"grad_norm": 3.8781235218048096,
"learning_rate": 1.9746801147218844e-05,
"loss": 0.9458,
"step": 39
},
{
"epoch": 0.1927710843373494,
"grad_norm": 4.019350528717041,
"learning_rate": 1.9729118706714377e-05,
"loss": 0.9313,
"step": 40
},
{
"epoch": 0.19759036144578312,
"grad_norm": 4.535061836242676,
"learning_rate": 1.9710847953367193e-05,
"loss": 1.0018,
"step": 41
},
{
"epoch": 0.20240963855421687,
"grad_norm": 4.199214458465576,
"learning_rate": 1.9691989991996663e-05,
"loss": 1.0205,
"step": 42
},
{
"epoch": 0.20722891566265061,
"grad_norm": 3.905357599258423,
"learning_rate": 1.9672545962930214e-05,
"loss": 0.8749,
"step": 43
},
{
"epoch": 0.21204819277108433,
"grad_norm": 5.23562479019165,
"learning_rate": 1.9652517041934357e-05,
"loss": 0.9942,
"step": 44
},
{
"epoch": 0.21686746987951808,
"grad_norm": 4.048227310180664,
"learning_rate": 1.9631904440143614e-05,
"loss": 0.8923,
"step": 45
},
{
"epoch": 0.2216867469879518,
"grad_norm": 3.829437732696533,
"learning_rate": 1.9610709403987248e-05,
"loss": 0.9125,
"step": 46
},
{
"epoch": 0.22650602409638554,
"grad_norm": 3.8191184997558594,
"learning_rate": 1.9588933215113926e-05,
"loss": 0.8825,
"step": 47
},
{
"epoch": 0.23132530120481928,
"grad_norm": 3.622243642807007,
"learning_rate": 1.95665771903142e-05,
"loss": 0.9214,
"step": 48
},
{
"epoch": 0.236144578313253,
"grad_norm": 4.483166217803955,
"learning_rate": 1.954364268144088e-05,
"loss": 0.9027,
"step": 49
},
{
"epoch": 0.24096385542168675,
"grad_norm": 4.152469635009766,
"learning_rate": 1.95201310753273e-05,
"loss": 0.9439,
"step": 50
},
{
"epoch": 0.2457831325301205,
"grad_norm": 3.4553298950195312,
"learning_rate": 1.949604379370345e-05,
"loss": 0.9282,
"step": 51
},
{
"epoch": 0.25060240963855424,
"grad_norm": 3.4458351135253906,
"learning_rate": 1.9471382293110004e-05,
"loss": 0.8734,
"step": 52
},
{
"epoch": 0.25060240963855424,
"eval_loss": 0.9173946380615234,
"eval_runtime": 409.5887,
"eval_samples_per_second": 3.965,
"eval_steps_per_second": 0.991,
"step": 52
},
{
"epoch": 0.25542168674698795,
"grad_norm": 3.958357810974121,
"learning_rate": 1.9446148064810243e-05,
"loss": 0.9208,
"step": 53
},
{
"epoch": 0.26024096385542167,
"grad_norm": 3.440983295440674,
"learning_rate": 1.9420342634699893e-05,
"loss": 0.9079,
"step": 54
},
{
"epoch": 0.26506024096385544,
"grad_norm": 3.14013934135437,
"learning_rate": 1.9393967563214833e-05,
"loss": 0.8653,
"step": 55
},
{
"epoch": 0.26987951807228916,
"grad_norm": 3.123026132583618,
"learning_rate": 1.936702444523675e-05,
"loss": 0.8783,
"step": 56
},
{
"epoch": 0.2746987951807229,
"grad_norm": 3.9587645530700684,
"learning_rate": 1.9339514909996706e-05,
"loss": 0.9239,
"step": 57
},
{
"epoch": 0.27951807228915665,
"grad_norm": 4.004755973815918,
"learning_rate": 1.9311440620976597e-05,
"loss": 0.9284,
"step": 58
},
{
"epoch": 0.28433734939759037,
"grad_norm": 3.3966100215911865,
"learning_rate": 1.928280327580858e-05,
"loss": 0.9284,
"step": 59
},
{
"epoch": 0.2891566265060241,
"grad_norm": 3.665738105773926,
"learning_rate": 1.925360460617242e-05,
"loss": 0.9185,
"step": 60
},
{
"epoch": 0.29397590361445786,
"grad_norm": 3.625847339630127,
"learning_rate": 1.9223846377690754e-05,
"loss": 0.9208,
"step": 61
},
{
"epoch": 0.2987951807228916,
"grad_norm": 4.189015865325928,
"learning_rate": 1.9193530389822364e-05,
"loss": 0.9652,
"step": 62
},
{
"epoch": 0.3036144578313253,
"grad_norm": 4.030600070953369,
"learning_rate": 1.9162658475753328e-05,
"loss": 0.9312,
"step": 63
},
{
"epoch": 0.30843373493975906,
"grad_norm": 3.5443010330200195,
"learning_rate": 1.913123250228619e-05,
"loss": 0.9061,
"step": 64
},
{
"epoch": 0.3132530120481928,
"grad_norm": 4.126009464263916,
"learning_rate": 1.9099254369727062e-05,
"loss": 0.9663,
"step": 65
},
{
"epoch": 0.3180722891566265,
"grad_norm": 2.7072594165802,
"learning_rate": 1.9066726011770725e-05,
"loss": 0.8867,
"step": 66
},
{
"epoch": 0.3228915662650602,
"grad_norm": 3.835609197616577,
"learning_rate": 1.90336493953837e-05,
"loss": 0.8732,
"step": 67
},
{
"epoch": 0.327710843373494,
"grad_norm": 3.9530820846557617,
"learning_rate": 1.90000265206853e-05,
"loss": 0.9577,
"step": 68
},
{
"epoch": 0.3325301204819277,
"grad_norm": 3.488438129425049,
"learning_rate": 1.8965859420826685e-05,
"loss": 0.8677,
"step": 69
},
{
"epoch": 0.3373493975903614,
"grad_norm": 3.690389394760132,
"learning_rate": 1.8931150161867917e-05,
"loss": 0.9178,
"step": 70
},
{
"epoch": 0.3421686746987952,
"grad_norm": 3.0712807178497314,
"learning_rate": 1.889590084265304e-05,
"loss": 0.8756,
"step": 71
},
{
"epoch": 0.3469879518072289,
"grad_norm": 2.9820151329040527,
"learning_rate": 1.8860113594683148e-05,
"loss": 0.8897,
"step": 72
},
{
"epoch": 0.35180722891566263,
"grad_norm": 3.4041998386383057,
"learning_rate": 1.882379058198751e-05,
"loss": 0.9138,
"step": 73
},
{
"epoch": 0.3566265060240964,
"grad_norm": 3.2408714294433594,
"learning_rate": 1.878693400099269e-05,
"loss": 0.9034,
"step": 74
},
{
"epoch": 0.3614457831325301,
"grad_norm": 3.3161394596099854,
"learning_rate": 1.874954608038976e-05,
"loss": 0.9153,
"step": 75
},
{
"epoch": 0.36626506024096384,
"grad_norm": 3.4559402465820312,
"learning_rate": 1.8711629080999506e-05,
"loss": 0.9402,
"step": 76
},
{
"epoch": 0.3710843373493976,
"grad_norm": 3.232393264770508,
"learning_rate": 1.867318529563574e-05,
"loss": 0.934,
"step": 77
},
{
"epoch": 0.3759036144578313,
"grad_norm": 2.9000368118286133,
"learning_rate": 1.8634217048966638e-05,
"loss": 0.899,
"step": 78
},
{
"epoch": 0.38072289156626504,
"grad_norm": 3.0255680084228516,
"learning_rate": 1.8594726697374173e-05,
"loss": 0.8689,
"step": 79
},
{
"epoch": 0.3855421686746988,
"grad_norm": 3.2437119483947754,
"learning_rate": 1.855471662881164e-05,
"loss": 0.8719,
"step": 80
},
{
"epoch": 0.39036144578313253,
"grad_norm": 3.1790125370025635,
"learning_rate": 1.8514189262659235e-05,
"loss": 0.9318,
"step": 81
},
{
"epoch": 0.39518072289156625,
"grad_norm": 3.26911997795105,
"learning_rate": 1.8473147049577777e-05,
"loss": 0.8369,
"step": 82
},
{
"epoch": 0.4,
"grad_norm": 3.6041669845581055,
"learning_rate": 1.8431592471360506e-05,
"loss": 0.937,
"step": 83
},
{
"epoch": 0.40481927710843374,
"grad_norm": 3.253331422805786,
"learning_rate": 1.8389528040783014e-05,
"loss": 0.8296,
"step": 84
},
{
"epoch": 0.40963855421686746,
"grad_norm": 3.0919277667999268,
"learning_rate": 1.8346956301451303e-05,
"loss": 0.9584,
"step": 85
},
{
"epoch": 0.41445783132530123,
"grad_norm": 2.9307501316070557,
"learning_rate": 1.8303879827647977e-05,
"loss": 0.8596,
"step": 86
},
{
"epoch": 0.41927710843373495,
"grad_norm": 2.7852823734283447,
"learning_rate": 1.826030122417656e-05,
"loss": 0.8651,
"step": 87
},
{
"epoch": 0.42409638554216866,
"grad_norm": 3.0654289722442627,
"learning_rate": 1.821622312620401e-05,
"loss": 0.8692,
"step": 88
},
{
"epoch": 0.42891566265060244,
"grad_norm": 3.1769044399261475,
"learning_rate": 1.8171648199101347e-05,
"loss": 0.884,
"step": 89
},
{
"epoch": 0.43373493975903615,
"grad_norm": 3.5062806606292725,
"learning_rate": 1.8126579138282502e-05,
"loss": 0.8249,
"step": 90
},
{
"epoch": 0.43855421686746987,
"grad_norm": 3.4617791175842285,
"learning_rate": 1.8081018669041324e-05,
"loss": 0.8441,
"step": 91
},
{
"epoch": 0.4433734939759036,
"grad_norm": 2.880967140197754,
"learning_rate": 1.803496954638676e-05,
"loss": 0.8334,
"step": 92
},
{
"epoch": 0.44819277108433736,
"grad_norm": 3.091942071914673,
"learning_rate": 1.798843455487629e-05,
"loss": 0.8333,
"step": 93
},
{
"epoch": 0.4530120481927711,
"grad_norm": 3.1967666149139404,
"learning_rate": 1.7941416508447537e-05,
"loss": 0.8893,
"step": 94
},
{
"epoch": 0.4578313253012048,
"grad_norm": 2.6479837894439697,
"learning_rate": 1.7893918250248106e-05,
"loss": 0.8183,
"step": 95
},
{
"epoch": 0.46265060240963857,
"grad_norm": 3.1210310459136963,
"learning_rate": 1.784594265246366e-05,
"loss": 0.9058,
"step": 96
},
{
"epoch": 0.4674698795180723,
"grad_norm": 3.483987331390381,
"learning_rate": 1.7797492616144256e-05,
"loss": 0.8344,
"step": 97
},
{
"epoch": 0.472289156626506,
"grad_norm": 3.0755786895751953,
"learning_rate": 1.77485710710289e-05,
"loss": 0.8815,
"step": 98
},
{
"epoch": 0.4771084337349398,
"grad_norm": 3.1040232181549072,
"learning_rate": 1.7699180975368397e-05,
"loss": 0.8541,
"step": 99
},
{
"epoch": 0.4819277108433735,
"grad_norm": 2.973416805267334,
"learning_rate": 1.764932531574648e-05,
"loss": 0.8576,
"step": 100
},
{
"epoch": 0.4867469879518072,
"grad_norm": 2.6803696155548096,
"learning_rate": 1.759900710689918e-05,
"loss": 0.8481,
"step": 101
},
{
"epoch": 0.491566265060241,
"grad_norm": 2.7588868141174316,
"learning_rate": 1.7548229391532572e-05,
"loss": 0.8094,
"step": 102
},
{
"epoch": 0.4963855421686747,
"grad_norm": 3.0891292095184326,
"learning_rate": 1.7496995240138745e-05,
"loss": 0.8736,
"step": 103
},
{
"epoch": 0.5012048192771085,
"grad_norm": 2.803755521774292,
"learning_rate": 1.7445307750810153e-05,
"loss": 0.8493,
"step": 104
},
{
"epoch": 0.5012048192771085,
"eval_loss": 0.8754510283470154,
"eval_runtime": 409.6247,
"eval_samples_per_second": 3.965,
"eval_steps_per_second": 0.991,
"step": 104
},
{
"epoch": 0.5060240963855421,
"grad_norm": 3.214620590209961,
"learning_rate": 1.7393170049052274e-05,
"loss": 0.898,
"step": 105
},
{
"epoch": 0.5108433734939759,
"grad_norm": 2.732611656188965,
"learning_rate": 1.7340585287594605e-05,
"loss": 0.862,
"step": 106
},
{
"epoch": 0.5156626506024097,
"grad_norm": 3.1261470317840576,
"learning_rate": 1.728755664620002e-05,
"loss": 0.9223,
"step": 107
},
{
"epoch": 0.5204819277108433,
"grad_norm": 3.0365259647369385,
"learning_rate": 1.72340873314725e-05,
"loss": 0.8637,
"step": 108
},
{
"epoch": 0.5253012048192771,
"grad_norm": 3.233238458633423,
"learning_rate": 1.718018057666323e-05,
"loss": 0.8649,
"step": 109
},
{
"epoch": 0.5301204819277109,
"grad_norm": 2.6572329998016357,
"learning_rate": 1.7125839641475074e-05,
"loss": 0.8035,
"step": 110
},
{
"epoch": 0.5349397590361445,
"grad_norm": 2.850344181060791,
"learning_rate": 1.7071067811865477e-05,
"loss": 0.8206,
"step": 111
},
{
"epoch": 0.5397590361445783,
"grad_norm": 3.0765762329101562,
"learning_rate": 1.7015868399847768e-05,
"loss": 0.8926,
"step": 112
},
{
"epoch": 0.5445783132530121,
"grad_norm": 2.928662061691284,
"learning_rate": 1.6960244743290867e-05,
"loss": 0.8418,
"step": 113
},
{
"epoch": 0.5493975903614458,
"grad_norm": 2.8436033725738525,
"learning_rate": 1.690420020571747e-05,
"loss": 0.8105,
"step": 114
},
{
"epoch": 0.5542168674698795,
"grad_norm": 3.2781031131744385,
"learning_rate": 1.6847738176100632e-05,
"loss": 0.9141,
"step": 115
},
{
"epoch": 0.5590361445783133,
"grad_norm": 2.989654302597046,
"learning_rate": 1.6790862068658863e-05,
"loss": 0.9185,
"step": 116
},
{
"epoch": 0.563855421686747,
"grad_norm": 2.646963596343994,
"learning_rate": 1.673357532264966e-05,
"loss": 0.8768,
"step": 117
},
{
"epoch": 0.5686746987951807,
"grad_norm": 2.966282844543457,
"learning_rate": 1.667588140216154e-05,
"loss": 0.8473,
"step": 118
},
{
"epoch": 0.5734939759036145,
"grad_norm": 3.1524553298950195,
"learning_rate": 1.6617783795904564e-05,
"loss": 0.8382,
"step": 119
},
{
"epoch": 0.5783132530120482,
"grad_norm": 3.058159112930298,
"learning_rate": 1.65592860169994e-05,
"loss": 0.8664,
"step": 120
},
{
"epoch": 0.5831325301204819,
"grad_norm": 2.9778072834014893,
"learning_rate": 1.650039160276485e-05,
"loss": 0.8986,
"step": 121
},
{
"epoch": 0.5879518072289157,
"grad_norm": 3.4090819358825684,
"learning_rate": 1.644110411450398e-05,
"loss": 0.8697,
"step": 122
},
{
"epoch": 0.5927710843373494,
"grad_norm": 3.1264090538024902,
"learning_rate": 1.6381427137288756e-05,
"loss": 0.8611,
"step": 123
},
{
"epoch": 0.5975903614457831,
"grad_norm": 2.660439968109131,
"learning_rate": 1.6321364279743267e-05,
"loss": 0.7971,
"step": 124
},
{
"epoch": 0.6024096385542169,
"grad_norm": 3.0773823261260986,
"learning_rate": 1.6260919173825507e-05,
"loss": 0.8135,
"step": 125
},
{
"epoch": 0.6072289156626506,
"grad_norm": 2.897467613220215,
"learning_rate": 1.6200095474607753e-05,
"loss": 0.8868,
"step": 126
},
{
"epoch": 0.6120481927710844,
"grad_norm": 3.2068772315979004,
"learning_rate": 1.6138896860055555e-05,
"loss": 0.8584,
"step": 127
},
{
"epoch": 0.6168674698795181,
"grad_norm": 2.853588581085205,
"learning_rate": 1.6077327030805318e-05,
"loss": 0.8486,
"step": 128
},
{
"epoch": 0.6216867469879518,
"grad_norm": 2.908766984939575,
"learning_rate": 1.601538970994054e-05,
"loss": 0.8701,
"step": 129
},
{
"epoch": 0.6265060240963856,
"grad_norm": 3.316112518310547,
"learning_rate": 1.595308864276666e-05,
"loss": 0.8063,
"step": 130
},
{
"epoch": 0.6313253012048192,
"grad_norm": 2.69521427154541,
"learning_rate": 1.589042759658462e-05,
"loss": 0.787,
"step": 131
},
{
"epoch": 0.636144578313253,
"grad_norm": 2.609143018722534,
"learning_rate": 1.582741036046301e-05,
"loss": 0.8263,
"step": 132
},
{
"epoch": 0.6409638554216868,
"grad_norm": 3.1294214725494385,
"learning_rate": 1.5764040745008987e-05,
"loss": 0.8637,
"step": 133
},
{
"epoch": 0.6457831325301204,
"grad_norm": 2.8580658435821533,
"learning_rate": 1.570032258213783e-05,
"loss": 0.8542,
"step": 134
},
{
"epoch": 0.6506024096385542,
"grad_norm": 2.952737808227539,
"learning_rate": 1.5636259724841224e-05,
"loss": 0.7718,
"step": 135
},
{
"epoch": 0.655421686746988,
"grad_norm": 3.2111058235168457,
"learning_rate": 1.5571856046954284e-05,
"loss": 0.8604,
"step": 136
},
{
"epoch": 0.6602409638554216,
"grad_norm": 3.454385995864868,
"learning_rate": 1.550711544292131e-05,
"loss": 0.8955,
"step": 137
},
{
"epoch": 0.6650602409638554,
"grad_norm": 2.990410566329956,
"learning_rate": 1.5442041827560274e-05,
"loss": 0.8322,
"step": 138
},
{
"epoch": 0.6698795180722892,
"grad_norm": 2.85209059715271,
"learning_rate": 1.537663913582611e-05,
"loss": 0.8396,
"step": 139
},
{
"epoch": 0.6746987951807228,
"grad_norm": 2.8276262283325195,
"learning_rate": 1.531091132257275e-05,
"loss": 0.8183,
"step": 140
},
{
"epoch": 0.6795180722891566,
"grad_norm": 3.024148464202881,
"learning_rate": 1.5244862362314021e-05,
"loss": 0.8604,
"step": 141
},
{
"epoch": 0.6843373493975904,
"grad_norm": 2.975719451904297,
"learning_rate": 1.5178496248983254e-05,
"loss": 0.8631,
"step": 142
},
{
"epoch": 0.689156626506024,
"grad_norm": 2.9066529273986816,
"learning_rate": 1.511181699569181e-05,
"loss": 0.7869,
"step": 143
},
{
"epoch": 0.6939759036144578,
"grad_norm": 3.047508716583252,
"learning_rate": 1.50448286344864e-05,
"loss": 0.8827,
"step": 144
},
{
"epoch": 0.6987951807228916,
"grad_norm": 2.746082305908203,
"learning_rate": 1.4977535216105258e-05,
"loss": 0.9077,
"step": 145
},
{
"epoch": 0.7036144578313253,
"grad_norm": 2.787168025970459,
"learning_rate": 1.4909940809733223e-05,
"loss": 0.8554,
"step": 146
},
{
"epoch": 0.708433734939759,
"grad_norm": 2.918994903564453,
"learning_rate": 1.484204950275565e-05,
"loss": 0.84,
"step": 147
},
{
"epoch": 0.7132530120481928,
"grad_norm": 2.950530767440796,
"learning_rate": 1.477386540051127e-05,
"loss": 0.8038,
"step": 148
},
{
"epoch": 0.7180722891566265,
"grad_norm": 3.0625436305999756,
"learning_rate": 1.4705392626043931e-05,
"loss": 0.8355,
"step": 149
},
{
"epoch": 0.7228915662650602,
"grad_norm": 2.4308924674987793,
"learning_rate": 1.4636635319853274e-05,
"loss": 0.824,
"step": 150
},
{
"epoch": 0.727710843373494,
"grad_norm": 3.4381263256073,
"learning_rate": 1.4567597639644387e-05,
"loss": 0.9047,
"step": 151
},
{
"epoch": 0.7325301204819277,
"grad_norm": 2.490755319595337,
"learning_rate": 1.4498283760076362e-05,
"loss": 0.8172,
"step": 152
},
{
"epoch": 0.7373493975903614,
"grad_norm": 2.623565196990967,
"learning_rate": 1.4428697872509868e-05,
"loss": 0.8643,
"step": 153
},
{
"epoch": 0.7421686746987952,
"grad_norm": 2.729555606842041,
"learning_rate": 1.4358844184753713e-05,
"loss": 0.7942,
"step": 154
},
{
"epoch": 0.7469879518072289,
"grad_norm": 2.9937925338745117,
"learning_rate": 1.4288726920810381e-05,
"loss": 0.8734,
"step": 155
},
{
"epoch": 0.7518072289156627,
"grad_norm": 2.592301368713379,
"learning_rate": 1.4218350320620625e-05,
"loss": 0.8604,
"step": 156
},
{
"epoch": 0.7518072289156627,
"eval_loss": 0.8515476584434509,
"eval_runtime": 409.2403,
"eval_samples_per_second": 3.968,
"eval_steps_per_second": 0.992,
"step": 156
},
{
"epoch": 0.7566265060240964,
"grad_norm": 2.74635648727417,
"learning_rate": 1.4147718639807071e-05,
"loss": 0.8341,
"step": 157
},
{
"epoch": 0.7614457831325301,
"grad_norm": 2.956916332244873,
"learning_rate": 1.4076836149416889e-05,
"loss": 0.8565,
"step": 158
},
{
"epoch": 0.7662650602409639,
"grad_norm": 3.0407042503356934,
"learning_rate": 1.4005707135663529e-05,
"loss": 0.9829,
"step": 159
},
{
"epoch": 0.7710843373493976,
"grad_norm": 2.7380025386810303,
"learning_rate": 1.3934335899667526e-05,
"loss": 0.8517,
"step": 160
},
{
"epoch": 0.7759036144578313,
"grad_norm": 2.8586325645446777,
"learning_rate": 1.386272675719642e-05,
"loss": 0.8482,
"step": 161
},
{
"epoch": 0.7807228915662651,
"grad_norm": 3.482114315032959,
"learning_rate": 1.3790884038403796e-05,
"loss": 0.9079,
"step": 162
},
{
"epoch": 0.7855421686746988,
"grad_norm": 2.5210723876953125,
"learning_rate": 1.3718812087567414e-05,
"loss": 0.8087,
"step": 163
},
{
"epoch": 0.7903614457831325,
"grad_norm": 2.4037303924560547,
"learning_rate": 1.3646515262826551e-05,
"loss": 0.7977,
"step": 164
},
{
"epoch": 0.7951807228915663,
"grad_norm": 3.141289710998535,
"learning_rate": 1.357399793591844e-05,
"loss": 0.8443,
"step": 165
},
{
"epoch": 0.8,
"grad_norm": 2.783255100250244,
"learning_rate": 1.3501264491913909e-05,
"loss": 0.8331,
"step": 166
},
{
"epoch": 0.8048192771084337,
"grad_norm": 2.3387300968170166,
"learning_rate": 1.3428319328952254e-05,
"loss": 0.7909,
"step": 167
},
{
"epoch": 0.8096385542168675,
"grad_norm": 2.699117422103882,
"learning_rate": 1.335516685797525e-05,
"loss": 0.7948,
"step": 168
},
{
"epoch": 0.8144578313253013,
"grad_norm": 2.6115429401397705,
"learning_rate": 1.3281811502460448e-05,
"loss": 0.7995,
"step": 169
},
{
"epoch": 0.8192771084337349,
"grad_norm": 2.520070791244507,
"learning_rate": 1.3208257698153677e-05,
"loss": 0.8357,
"step": 170
},
{
"epoch": 0.8240963855421687,
"grad_norm": 2.7537450790405273,
"learning_rate": 1.3134509892800821e-05,
"loss": 0.822,
"step": 171
},
{
"epoch": 0.8289156626506025,
"grad_norm": 2.5548155307769775,
"learning_rate": 1.3060572545878875e-05,
"loss": 0.7743,
"step": 172
},
{
"epoch": 0.8337349397590361,
"grad_norm": 2.4238061904907227,
"learning_rate": 1.2986450128326267e-05,
"loss": 0.8116,
"step": 173
},
{
"epoch": 0.8385542168674699,
"grad_norm": 3.0254759788513184,
"learning_rate": 1.2912147122272523e-05,
"loss": 0.8264,
"step": 174
},
{
"epoch": 0.8433734939759037,
"grad_norm": 2.9106860160827637,
"learning_rate": 1.283766802076722e-05,
"loss": 0.849,
"step": 175
},
{
"epoch": 0.8481927710843373,
"grad_norm": 2.2837445735931396,
"learning_rate": 1.2763017327508304e-05,
"loss": 0.7899,
"step": 176
},
{
"epoch": 0.8530120481927711,
"grad_norm": 2.833048105239868,
"learning_rate": 1.2688199556569753e-05,
"loss": 0.7616,
"step": 177
},
{
"epoch": 0.8578313253012049,
"grad_norm": 2.5235068798065186,
"learning_rate": 1.2613219232128608e-05,
"loss": 0.8034,
"step": 178
},
{
"epoch": 0.8626506024096385,
"grad_norm": 3.159371852874756,
"learning_rate": 1.2538080888191408e-05,
"loss": 0.8073,
"step": 179
},
{
"epoch": 0.8674698795180723,
"grad_norm": 3.462261199951172,
"learning_rate": 1.2462789068320016e-05,
"loss": 0.8688,
"step": 180
},
{
"epoch": 0.8722891566265061,
"grad_norm": 2.8163440227508545,
"learning_rate": 1.2387348325356873e-05,
"loss": 0.8153,
"step": 181
},
{
"epoch": 0.8771084337349397,
"grad_norm": 2.5280866622924805,
"learning_rate": 1.23117632211497e-05,
"loss": 0.7912,
"step": 182
},
{
"epoch": 0.8819277108433735,
"grad_norm": 2.8253021240234375,
"learning_rate": 1.2236038326275628e-05,
"loss": 0.8437,
"step": 183
},
{
"epoch": 0.8867469879518072,
"grad_norm": 2.289564371109009,
"learning_rate": 1.2160178219764838e-05,
"loss": 0.8026,
"step": 184
},
{
"epoch": 0.891566265060241,
"grad_norm": 2.4745798110961914,
"learning_rate": 1.2084187488823657e-05,
"loss": 0.8489,
"step": 185
},
{
"epoch": 0.8963855421686747,
"grad_norm": 3.2900331020355225,
"learning_rate": 1.2008070728557186e-05,
"loss": 0.8812,
"step": 186
},
{
"epoch": 0.9012048192771084,
"grad_norm": 2.9827733039855957,
"learning_rate": 1.193183254169142e-05,
"loss": 0.8048,
"step": 187
},
{
"epoch": 0.9060240963855422,
"grad_norm": 2.4809494018554688,
"learning_rate": 1.1855477538294934e-05,
"loss": 0.8123,
"step": 188
},
{
"epoch": 0.9108433734939759,
"grad_norm": 2.5557541847229004,
"learning_rate": 1.177901033550012e-05,
"loss": 0.8306,
"step": 189
},
{
"epoch": 0.9156626506024096,
"grad_norm": 2.4625046253204346,
"learning_rate": 1.1702435557223988e-05,
"loss": 0.7583,
"step": 190
},
{
"epoch": 0.9204819277108434,
"grad_norm": 2.7508351802825928,
"learning_rate": 1.1625757833888552e-05,
"loss": 0.7602,
"step": 191
},
{
"epoch": 0.9253012048192771,
"grad_norm": 2.6322593688964844,
"learning_rate": 1.1548981802140849e-05,
"loss": 0.8308,
"step": 192
},
{
"epoch": 0.9301204819277108,
"grad_norm": 2.4383065700531006,
"learning_rate": 1.1472112104572547e-05,
"loss": 0.805,
"step": 193
},
{
"epoch": 0.9349397590361446,
"grad_norm": 2.955080270767212,
"learning_rate": 1.1395153389439232e-05,
"loss": 0.8125,
"step": 194
},
{
"epoch": 0.9397590361445783,
"grad_norm": 2.8347744941711426,
"learning_rate": 1.1318110310379303e-05,
"loss": 0.8082,
"step": 195
},
{
"epoch": 0.944578313253012,
"grad_norm": 3.7410006523132324,
"learning_rate": 1.1240987526132595e-05,
"loss": 0.7615,
"step": 196
},
{
"epoch": 0.9493975903614458,
"grad_norm": 2.831632137298584,
"learning_rate": 1.1163789700258656e-05,
"loss": 0.8224,
"step": 197
},
{
"epoch": 0.9542168674698795,
"grad_norm": 3.450864553451538,
"learning_rate": 1.1086521500854746e-05,
"loss": 0.8494,
"step": 198
},
{
"epoch": 0.9590361445783132,
"grad_norm": 2.5779266357421875,
"learning_rate": 1.1009187600273565e-05,
"loss": 0.7948,
"step": 199
},
{
"epoch": 0.963855421686747,
"grad_norm": 2.808676242828369,
"learning_rate": 1.0931792674840718e-05,
"loss": 0.8196,
"step": 200
},
{
"epoch": 0.9686746987951808,
"grad_norm": 2.3105671405792236,
"learning_rate": 1.0854341404571929e-05,
"loss": 0.7321,
"step": 201
},
{
"epoch": 0.9734939759036144,
"grad_norm": 2.495274543762207,
"learning_rate": 1.0776838472890065e-05,
"loss": 0.811,
"step": 202
},
{
"epoch": 0.9783132530120482,
"grad_norm": 2.6084389686584473,
"learning_rate": 1.0699288566341914e-05,
"loss": 0.7649,
"step": 203
},
{
"epoch": 0.983132530120482,
"grad_norm": 2.7586421966552734,
"learning_rate": 1.0621696374314807e-05,
"loss": 0.8226,
"step": 204
},
{
"epoch": 0.9879518072289156,
"grad_norm": 2.8034958839416504,
"learning_rate": 1.0544066588753044e-05,
"loss": 0.8318,
"step": 205
},
{
"epoch": 0.9927710843373494,
"grad_norm": 3.051496982574463,
"learning_rate": 1.0466403903874176e-05,
"loss": 0.8513,
"step": 206
},
{
"epoch": 0.9975903614457832,
"grad_norm": 2.5570504665374756,
"learning_rate": 1.0388713015885161e-05,
"loss": 0.786,
"step": 207
},
{
"epoch": 1.002409638554217,
"grad_norm": 2.5539052486419678,
"learning_rate": 1.031099862269837e-05,
"loss": 0.7932,
"step": 208
},
{
"epoch": 1.002409638554217,
"eval_loss": 0.8150397539138794,
"eval_runtime": 408.9584,
"eval_samples_per_second": 3.971,
"eval_steps_per_second": 0.993,
"step": 208
},
{
"epoch": 1.0072289156626506,
"grad_norm": 2.4404540061950684,
"learning_rate": 1.0233265423647523e-05,
"loss": 0.7868,
"step": 209
},
{
"epoch": 1.002409638554217,
"grad_norm": 2.5475072860717773,
"learning_rate": 1.0155518119203511e-05,
"loss": 0.6593,
"step": 210
},
{
"epoch": 1.0072289156626506,
"grad_norm": 2.1499063968658447,
"learning_rate": 1.0077761410690172e-05,
"loss": 0.5762,
"step": 211
},
{
"epoch": 1.0120481927710843,
"grad_norm": 2.4631576538085938,
"learning_rate": 1e-05,
"loss": 0.5896,
"step": 212
},
{
"epoch": 1.0168674698795181,
"grad_norm": 2.553774833679199,
"learning_rate": 9.92223858930983e-06,
"loss": 0.6091,
"step": 213
},
{
"epoch": 1.0216867469879518,
"grad_norm": 2.09922456741333,
"learning_rate": 9.844481880796492e-06,
"loss": 0.5918,
"step": 214
},
{
"epoch": 1.0265060240963855,
"grad_norm": 2.326040744781494,
"learning_rate": 9.766734576352478e-06,
"loss": 0.5406,
"step": 215
},
{
"epoch": 1.0313253012048194,
"grad_norm": 2.230414628982544,
"learning_rate": 9.689001377301634e-06,
"loss": 0.5337,
"step": 216
},
{
"epoch": 1.036144578313253,
"grad_norm": 2.1315929889678955,
"learning_rate": 9.61128698411484e-06,
"loss": 0.5076,
"step": 217
},
{
"epoch": 1.0409638554216867,
"grad_norm": 2.31592059135437,
"learning_rate": 9.533596096125826e-06,
"loss": 0.5451,
"step": 218
},
{
"epoch": 1.0457831325301206,
"grad_norm": 2.209798812866211,
"learning_rate": 9.45593341124696e-06,
"loss": 0.5725,
"step": 219
},
{
"epoch": 1.0506024096385542,
"grad_norm": 2.8493363857269287,
"learning_rate": 9.378303625685196e-06,
"loss": 0.5594,
"step": 220
},
{
"epoch": 1.0554216867469879,
"grad_norm": 2.948335647583008,
"learning_rate": 9.300711433658088e-06,
"loss": 0.4995,
"step": 221
},
{
"epoch": 1.0602409638554218,
"grad_norm": 2.581233263015747,
"learning_rate": 9.223161527109938e-06,
"loss": 0.5273,
"step": 222
},
{
"epoch": 1.0650602409638554,
"grad_norm": 2.436361312866211,
"learning_rate": 9.145658595428075e-06,
"loss": 0.5356,
"step": 223
},
{
"epoch": 1.069879518072289,
"grad_norm": 2.5706756114959717,
"learning_rate": 9.068207325159285e-06,
"loss": 0.5082,
"step": 224
},
{
"epoch": 1.074698795180723,
"grad_norm": 2.5452582836151123,
"learning_rate": 8.990812399726435e-06,
"loss": 0.5098,
"step": 225
},
{
"epoch": 1.0795180722891566,
"grad_norm": 2.2918717861175537,
"learning_rate": 8.913478499145255e-06,
"loss": 0.493,
"step": 226
},
{
"epoch": 1.0843373493975903,
"grad_norm": 2.837512969970703,
"learning_rate": 8.836210299741346e-06,
"loss": 0.5036,
"step": 227
},
{
"epoch": 1.0891566265060242,
"grad_norm": 2.7534472942352295,
"learning_rate": 8.759012473867407e-06,
"loss": 0.5333,
"step": 228
},
{
"epoch": 1.0939759036144578,
"grad_norm": 2.761195659637451,
"learning_rate": 8.681889689620699e-06,
"loss": 0.5186,
"step": 229
},
{
"epoch": 1.0987951807228915,
"grad_norm": 2.570779800415039,
"learning_rate": 8.604846610560771e-06,
"loss": 0.5496,
"step": 230
},
{
"epoch": 1.1036144578313254,
"grad_norm": 2.4266934394836426,
"learning_rate": 8.527887895427454e-06,
"loss": 0.5453,
"step": 231
},
{
"epoch": 1.108433734939759,
"grad_norm": 2.634979248046875,
"learning_rate": 8.451018197859153e-06,
"loss": 0.5121,
"step": 232
},
{
"epoch": 1.1132530120481927,
"grad_norm": 2.1403777599334717,
"learning_rate": 8.374242166111448e-06,
"loss": 0.5354,
"step": 233
},
{
"epoch": 1.1180722891566266,
"grad_norm": 2.7985663414001465,
"learning_rate": 8.297564442776014e-06,
"loss": 0.5565,
"step": 234
},
{
"epoch": 1.1228915662650603,
"grad_norm": 2.486628532409668,
"learning_rate": 8.22098966449988e-06,
"loss": 0.5428,
"step": 235
},
{
"epoch": 1.127710843373494,
"grad_norm": 2.3803107738494873,
"learning_rate": 8.144522461705067e-06,
"loss": 0.509,
"step": 236
},
{
"epoch": 1.1325301204819278,
"grad_norm": 2.7206227779388428,
"learning_rate": 8.068167458308582e-06,
"loss": 0.5339,
"step": 237
},
{
"epoch": 1.1373493975903615,
"grad_norm": 2.4364774227142334,
"learning_rate": 7.991929271442817e-06,
"loss": 0.5168,
"step": 238
},
{
"epoch": 1.1421686746987951,
"grad_norm": 2.6346094608306885,
"learning_rate": 7.915812511176348e-06,
"loss": 0.5861,
"step": 239
},
{
"epoch": 1.146987951807229,
"grad_norm": 2.683432102203369,
"learning_rate": 7.839821780235168e-06,
"loss": 0.5066,
"step": 240
},
{
"epoch": 1.1518072289156627,
"grad_norm": 2.444256544113159,
"learning_rate": 7.763961673724379e-06,
"loss": 0.498,
"step": 241
},
{
"epoch": 1.1566265060240963,
"grad_norm": 2.436868906021118,
"learning_rate": 7.688236778850307e-06,
"loss": 0.5013,
"step": 242
},
{
"epoch": 1.16144578313253,
"grad_norm": 2.420361042022705,
"learning_rate": 7.61265167464313e-06,
"loss": 0.506,
"step": 243
},
{
"epoch": 1.1662650602409639,
"grad_norm": 2.5880956649780273,
"learning_rate": 7.537210931679988e-06,
"loss": 0.5056,
"step": 244
},
{
"epoch": 1.1710843373493975,
"grad_norm": 3.139289140701294,
"learning_rate": 7.4619191118085955e-06,
"loss": 0.5481,
"step": 245
},
{
"epoch": 1.1759036144578312,
"grad_norm": 2.5460047721862793,
"learning_rate": 7.3867807678713965e-06,
"loss": 0.45,
"step": 246
},
{
"epoch": 1.180722891566265,
"grad_norm": 2.1692583560943604,
"learning_rate": 7.311800443430251e-06,
"loss": 0.4814,
"step": 247
},
{
"epoch": 1.1855421686746987,
"grad_norm": 2.748295307159424,
"learning_rate": 7.236982672491699e-06,
"loss": 0.5,
"step": 248
},
{
"epoch": 1.1903614457831324,
"grad_norm": 2.367530584335327,
"learning_rate": 7.162331979232784e-06,
"loss": 0.5303,
"step": 249
},
{
"epoch": 1.1951807228915663,
"grad_norm": 2.0665979385375977,
"learning_rate": 7.0878528777274814e-06,
"loss": 0.4724,
"step": 250
},
{
"epoch": 1.2,
"grad_norm": 2.257098913192749,
"learning_rate": 7.013549871673736e-06,
"loss": 0.5427,
"step": 251
},
{
"epoch": 1.2048192771084336,
"grad_norm": 2.7129740715026855,
"learning_rate": 6.939427454121128e-06,
"loss": 0.5241,
"step": 252
},
{
"epoch": 1.2096385542168675,
"grad_norm": 2.3564581871032715,
"learning_rate": 6.865490107199182e-06,
"loss": 0.5323,
"step": 253
},
{
"epoch": 1.2144578313253012,
"grad_norm": 2.4660191535949707,
"learning_rate": 6.791742301846325e-06,
"loss": 0.5461,
"step": 254
},
{
"epoch": 1.2192771084337348,
"grad_norm": 2.495473861694336,
"learning_rate": 6.718188497539554e-06,
"loss": 0.5202,
"step": 255
},
{
"epoch": 1.2240963855421687,
"grad_norm": 2.41900897026062,
"learning_rate": 6.644833142024752e-06,
"loss": 0.4786,
"step": 256
},
{
"epoch": 1.2289156626506024,
"grad_norm": 2.2576987743377686,
"learning_rate": 6.571680671047749e-06,
"loss": 0.5425,
"step": 257
},
{
"epoch": 1.233734939759036,
"grad_norm": 2.370042562484741,
"learning_rate": 6.498735508086094e-06,
"loss": 0.4975,
"step": 258
},
{
"epoch": 1.23855421686747,
"grad_norm": 2.3406431674957275,
"learning_rate": 6.426002064081565e-06,
"loss": 0.5147,
"step": 259
},
{
"epoch": 1.2433734939759036,
"grad_norm": 2.116950750350952,
"learning_rate": 6.35348473717345e-06,
"loss": 0.5234,
"step": 260
},
{
"epoch": 1.2433734939759036,
"eval_loss": 0.8304641842842102,
"eval_runtime": 409.2944,
"eval_samples_per_second": 3.968,
"eval_steps_per_second": 0.992,
"step": 260
},
{
"epoch": 1.2481927710843372,
"grad_norm": 2.3892273902893066,
"learning_rate": 6.281187912432587e-06,
"loss": 0.4892,
"step": 261
},
{
"epoch": 1.2530120481927711,
"grad_norm": 2.5329415798187256,
"learning_rate": 6.209115961596208e-06,
"loss": 0.5576,
"step": 262
},
{
"epoch": 1.2578313253012048,
"grad_norm": 2.706501007080078,
"learning_rate": 6.137273242803581e-06,
"loss": 0.5035,
"step": 263
},
{
"epoch": 1.2626506024096384,
"grad_norm": 2.6569039821624756,
"learning_rate": 6.065664100332478e-06,
"loss": 0.5538,
"step": 264
},
{
"epoch": 1.2674698795180723,
"grad_norm": 2.657332420349121,
"learning_rate": 5.994292864336473e-06,
"loss": 0.5215,
"step": 265
},
{
"epoch": 1.272289156626506,
"grad_norm": 2.228058099746704,
"learning_rate": 5.923163850583114e-06,
"loss": 0.5005,
"step": 266
},
{
"epoch": 1.2771084337349397,
"grad_norm": 2.5667221546173096,
"learning_rate": 5.852281360192933e-06,
"loss": 0.471,
"step": 267
},
{
"epoch": 1.2819277108433735,
"grad_norm": 2.6076254844665527,
"learning_rate": 5.781649679379379e-06,
"loss": 0.4565,
"step": 268
},
{
"epoch": 1.2867469879518072,
"grad_norm": 2.777376651763916,
"learning_rate": 5.711273079189621e-06,
"loss": 0.4975,
"step": 269
},
{
"epoch": 1.2915662650602409,
"grad_norm": 2.7435245513916016,
"learning_rate": 5.64115581524629e-06,
"loss": 0.4969,
"step": 270
},
{
"epoch": 1.2963855421686747,
"grad_norm": 2.2904560565948486,
"learning_rate": 5.571302127490133e-06,
"loss": 0.5133,
"step": 271
},
{
"epoch": 1.3012048192771084,
"grad_norm": 2.808234930038452,
"learning_rate": 5.501716239923642e-06,
"loss": 0.5225,
"step": 272
},
{
"epoch": 1.306024096385542,
"grad_norm": 2.489271879196167,
"learning_rate": 5.432402360355616e-06,
"loss": 0.5022,
"step": 273
},
{
"epoch": 1.310843373493976,
"grad_norm": 2.973238229751587,
"learning_rate": 5.3633646801467255e-06,
"loss": 0.5028,
"step": 274
},
{
"epoch": 1.3156626506024096,
"grad_norm": 2.9348108768463135,
"learning_rate": 5.294607373956071e-06,
"loss": 0.5381,
"step": 275
},
{
"epoch": 1.3204819277108433,
"grad_norm": 2.496760368347168,
"learning_rate": 5.226134599488728e-06,
"loss": 0.5031,
"step": 276
},
{
"epoch": 1.3253012048192772,
"grad_norm": 2.667630910873413,
"learning_rate": 5.15795049724435e-06,
"loss": 0.4783,
"step": 277
},
{
"epoch": 1.3301204819277108,
"grad_norm": 2.5155699253082275,
"learning_rate": 5.090059190266779e-06,
"loss": 0.5119,
"step": 278
},
{
"epoch": 1.3349397590361445,
"grad_norm": 2.7496049404144287,
"learning_rate": 5.022464783894743e-06,
"loss": 0.5757,
"step": 279
},
{
"epoch": 1.3397590361445784,
"grad_norm": 2.7545154094696045,
"learning_rate": 4.955171365513603e-06,
"loss": 0.516,
"step": 280
},
{
"epoch": 1.344578313253012,
"grad_norm": 2.573042631149292,
"learning_rate": 4.88818300430819e-06,
"loss": 0.5566,
"step": 281
},
{
"epoch": 1.3493975903614457,
"grad_norm": 2.389958381652832,
"learning_rate": 4.821503751016746e-06,
"loss": 0.4836,
"step": 282
},
{
"epoch": 1.3542168674698796,
"grad_norm": 2.508965015411377,
"learning_rate": 4.7551376376859794e-06,
"loss": 0.5694,
"step": 283
},
{
"epoch": 1.3590361445783132,
"grad_norm": 2.017404794692993,
"learning_rate": 4.689088677427249e-06,
"loss": 0.4656,
"step": 284
},
{
"epoch": 1.363855421686747,
"grad_norm": 3.2286880016326904,
"learning_rate": 4.623360864173893e-06,
"loss": 0.4912,
"step": 285
},
{
"epoch": 1.3686746987951808,
"grad_norm": 2.6164350509643555,
"learning_rate": 4.557958172439726e-06,
"loss": 0.5043,
"step": 286
},
{
"epoch": 1.3734939759036144,
"grad_norm": 2.4611318111419678,
"learning_rate": 4.492884557078688e-06,
"loss": 0.5335,
"step": 287
},
{
"epoch": 1.378313253012048,
"grad_norm": 2.9601011276245117,
"learning_rate": 4.4281439530457174e-06,
"loss": 0.4957,
"step": 288
},
{
"epoch": 1.383132530120482,
"grad_norm": 2.7370502948760986,
"learning_rate": 4.36374027515878e-06,
"loss": 0.4763,
"step": 289
},
{
"epoch": 1.3879518072289156,
"grad_norm": 2.5553810596466064,
"learning_rate": 4.299677417862174e-06,
"loss": 0.5059,
"step": 290
},
{
"epoch": 1.3927710843373493,
"grad_norm": 2.570694923400879,
"learning_rate": 4.2359592549910145e-06,
"loss": 0.5084,
"step": 291
},
{
"epoch": 1.3975903614457832,
"grad_norm": 2.36863112449646,
"learning_rate": 4.172589639536992e-06,
"loss": 0.4869,
"step": 292
},
{
"epoch": 1.4024096385542169,
"grad_norm": 2.5481040477752686,
"learning_rate": 4.109572403415386e-06,
"loss": 0.4549,
"step": 293
},
{
"epoch": 1.4072289156626505,
"grad_norm": 2.881804943084717,
"learning_rate": 4.046911357233343e-06,
"loss": 0.471,
"step": 294
},
{
"epoch": 1.4120481927710844,
"grad_norm": 2.4108259677886963,
"learning_rate": 3.984610290059467e-06,
"loss": 0.5182,
"step": 295
},
{
"epoch": 1.416867469879518,
"grad_norm": 2.496755599975586,
"learning_rate": 3.9226729691946865e-06,
"loss": 0.487,
"step": 296
},
{
"epoch": 1.4216867469879517,
"grad_norm": 2.440626859664917,
"learning_rate": 3.861103139944448e-06,
"loss": 0.4929,
"step": 297
},
{
"epoch": 1.4265060240963856,
"grad_norm": 2.4067342281341553,
"learning_rate": 3.799904525392251e-06,
"loss": 0.4646,
"step": 298
},
{
"epoch": 1.4313253012048193,
"grad_norm": 2.7123348712921143,
"learning_rate": 3.739080826174498e-06,
"loss": 0.4928,
"step": 299
},
{
"epoch": 1.436144578313253,
"grad_norm": 2.511476516723633,
"learning_rate": 3.6786357202567367e-06,
"loss": 0.5088,
"step": 300
},
{
"epoch": 1.4409638554216868,
"grad_norm": 2.1973259449005127,
"learning_rate": 3.618572862711247e-06,
"loss": 0.4619,
"step": 301
},
{
"epoch": 1.4457831325301205,
"grad_norm": 2.9753270149230957,
"learning_rate": 3.558895885496023e-06,
"loss": 0.4815,
"step": 302
},
{
"epoch": 1.4506024096385541,
"grad_norm": 2.129323959350586,
"learning_rate": 3.4996083972351514e-06,
"loss": 0.4618,
"step": 303
},
{
"epoch": 1.455421686746988,
"grad_norm": 2.362595319747925,
"learning_rate": 3.440713983000601e-06,
"loss": 0.4782,
"step": 304
},
{
"epoch": 1.4602409638554217,
"grad_norm": 2.060520648956299,
"learning_rate": 3.3822162040954355e-06,
"loss": 0.5068,
"step": 305
},
{
"epoch": 1.4650602409638553,
"grad_norm": 2.2528958320617676,
"learning_rate": 3.3241185978384636e-06,
"loss": 0.5308,
"step": 306
},
{
"epoch": 1.4698795180722892,
"grad_norm": 2.2133140563964844,
"learning_rate": 3.266424677350346e-06,
"loss": 0.5077,
"step": 307
},
{
"epoch": 1.4746987951807229,
"grad_norm": 2.223902463912964,
"learning_rate": 3.209137931341143e-06,
"loss": 0.4697,
"step": 308
},
{
"epoch": 1.4795180722891565,
"grad_norm": 2.8000707626342773,
"learning_rate": 3.1522618238993728e-06,
"loss": 0.4934,
"step": 309
},
{
"epoch": 1.4843373493975904,
"grad_norm": 2.3651952743530273,
"learning_rate": 3.0957997942825337e-06,
"loss": 0.5103,
"step": 310
},
{
"epoch": 1.489156626506024,
"grad_norm": 2.443889856338501,
"learning_rate": 3.039755256709134e-06,
"loss": 0.4617,
"step": 311
},
{
"epoch": 1.4939759036144578,
"grad_norm": 2.4047086238861084,
"learning_rate": 2.9841316001522345e-06,
"loss": 0.4875,
"step": 312
},
{
"epoch": 1.4939759036144578,
"eval_loss": 0.8194815516471863,
"eval_runtime": 409.5498,
"eval_samples_per_second": 3.965,
"eval_steps_per_second": 0.991,
"step": 312
},
{
"epoch": 1.4987951807228916,
"grad_norm": 2.333664655685425,
"learning_rate": 2.9289321881345257e-06,
"loss": 0.4577,
"step": 313
},
{
"epoch": 1.5036144578313253,
"grad_norm": 2.499919891357422,
"learning_rate": 2.8741603585249312e-06,
"loss": 0.4762,
"step": 314
},
{
"epoch": 1.508433734939759,
"grad_norm": 2.4571406841278076,
"learning_rate": 2.8198194233367747e-06,
"loss": 0.4675,
"step": 315
},
{
"epoch": 1.5132530120481928,
"grad_norm": 2.5920493602752686,
"learning_rate": 2.7659126685275028e-06,
"loss": 0.5091,
"step": 316
},
{
"epoch": 1.5180722891566265,
"grad_norm": 2.5612003803253174,
"learning_rate": 2.7124433537999838e-06,
"loss": 0.5184,
"step": 317
},
{
"epoch": 1.5228915662650602,
"grad_norm": 2.8769936561584473,
"learning_rate": 2.6594147124053983e-06,
"loss": 0.479,
"step": 318
},
{
"epoch": 1.527710843373494,
"grad_norm": 2.339310884475708,
"learning_rate": 2.6068299509477267e-06,
"loss": 0.4722,
"step": 319
},
{
"epoch": 1.5325301204819277,
"grad_norm": 2.3183751106262207,
"learning_rate": 2.5546922491898497e-06,
"loss": 0.5014,
"step": 320
},
{
"epoch": 1.5373493975903614,
"grad_norm": 2.3917415142059326,
"learning_rate": 2.5030047598612585e-06,
"loss": 0.4792,
"step": 321
},
{
"epoch": 1.5421686746987953,
"grad_norm": 2.690859079360962,
"learning_rate": 2.451770608467432e-06,
"loss": 0.4994,
"step": 322
},
{
"epoch": 1.546987951807229,
"grad_norm": 2.50500750541687,
"learning_rate": 2.400992893100822e-06,
"loss": 0.5582,
"step": 323
},
{
"epoch": 1.5518072289156626,
"grad_norm": 2.4828755855560303,
"learning_rate": 2.3506746842535244e-06,
"loss": 0.4803,
"step": 324
},
{
"epoch": 1.5566265060240965,
"grad_norm": 2.1874091625213623,
"learning_rate": 2.3008190246316033e-06,
"loss": 0.508,
"step": 325
},
{
"epoch": 1.5614457831325301,
"grad_norm": 2.585437774658203,
"learning_rate": 2.251428928971102e-06,
"loss": 0.4728,
"step": 326
},
{
"epoch": 1.5662650602409638,
"grad_norm": 2.979546070098877,
"learning_rate": 2.2025073838557454e-06,
"loss": 0.5259,
"step": 327
},
{
"epoch": 1.5710843373493977,
"grad_norm": 2.4768764972686768,
"learning_rate": 2.1540573475363402e-06,
"loss": 0.4947,
"step": 328
},
{
"epoch": 1.5759036144578313,
"grad_norm": 2.252917528152466,
"learning_rate": 2.106081749751897e-06,
"loss": 0.4853,
"step": 329
},
{
"epoch": 1.580722891566265,
"grad_norm": 2.5890207290649414,
"learning_rate": 2.058583491552465e-06,
"loss": 0.4351,
"step": 330
},
{
"epoch": 1.5855421686746989,
"grad_norm": 2.643648862838745,
"learning_rate": 2.011565445123711e-06,
"loss": 0.4515,
"step": 331
},
{
"epoch": 1.5903614457831325,
"grad_norm": 2.6879475116729736,
"learning_rate": 1.9650304536132426e-06,
"loss": 0.5402,
"step": 332
},
{
"epoch": 1.5951807228915662,
"grad_norm": 2.378323793411255,
"learning_rate": 1.918981330958678e-06,
"loss": 0.5162,
"step": 333
},
{
"epoch": 1.6,
"grad_norm": 2.823969602584839,
"learning_rate": 1.8734208617174986e-06,
"loss": 0.4819,
"step": 334
},
{
"epoch": 1.6048192771084338,
"grad_norm": 3.2256662845611572,
"learning_rate": 1.8283518008986566e-06,
"loss": 0.5214,
"step": 335
},
{
"epoch": 1.6096385542168674,
"grad_norm": 2.3168673515319824,
"learning_rate": 1.7837768737959937e-06,
"loss": 0.5203,
"step": 336
},
{
"epoch": 1.6144578313253013,
"grad_norm": 2.1533286571502686,
"learning_rate": 1.7396987758234418e-06,
"loss": 0.4727,
"step": 337
},
{
"epoch": 1.619277108433735,
"grad_norm": 2.7853829860687256,
"learning_rate": 1.6961201723520248e-06,
"loss": 0.436,
"step": 338
},
{
"epoch": 1.6240963855421686,
"grad_norm": 2.411254405975342,
"learning_rate": 1.6530436985486997e-06,
"loss": 0.4624,
"step": 339
},
{
"epoch": 1.6289156626506025,
"grad_norm": 2.624218225479126,
"learning_rate": 1.6104719592169905e-06,
"loss": 0.4959,
"step": 340
},
{
"epoch": 1.6337349397590362,
"grad_norm": 2.527923107147217,
"learning_rate": 1.5684075286394983e-06,
"loss": 0.4542,
"step": 341
},
{
"epoch": 1.6385542168674698,
"grad_norm": 2.568006992340088,
"learning_rate": 1.5268529504222262e-06,
"loss": 0.5073,
"step": 342
},
{
"epoch": 1.6433734939759037,
"grad_norm": 2.477860689163208,
"learning_rate": 1.485810737340767e-06,
"loss": 0.4342,
"step": 343
},
{
"epoch": 1.6481927710843374,
"grad_norm": 2.5466296672821045,
"learning_rate": 1.4452833711883629e-06,
"loss": 0.4665,
"step": 344
},
{
"epoch": 1.653012048192771,
"grad_norm": 3.1020963191986084,
"learning_rate": 1.405273302625828e-06,
"loss": 0.46,
"step": 345
},
{
"epoch": 1.657831325301205,
"grad_norm": 2.133432149887085,
"learning_rate": 1.3657829510333653e-06,
"loss": 0.4656,
"step": 346
},
{
"epoch": 1.6626506024096386,
"grad_norm": 2.116241216659546,
"learning_rate": 1.326814704364262e-06,
"loss": 0.4492,
"step": 347
},
{
"epoch": 1.6674698795180722,
"grad_norm": 2.3933207988739014,
"learning_rate": 1.2883709190004956e-06,
"loss": 0.4416,
"step": 348
},
{
"epoch": 1.6722891566265061,
"grad_norm": 2.776743173599243,
"learning_rate": 1.2504539196102438e-06,
"loss": 0.498,
"step": 349
},
{
"epoch": 1.6771084337349398,
"grad_norm": 2.7146615982055664,
"learning_rate": 1.2130659990073146e-06,
"loss": 0.4832,
"step": 350
},
{
"epoch": 1.6819277108433734,
"grad_norm": 2.236459255218506,
"learning_rate": 1.176209418012495e-06,
"loss": 0.4773,
"step": 351
},
{
"epoch": 1.6867469879518073,
"grad_norm": 2.7243292331695557,
"learning_rate": 1.1398864053168534e-06,
"loss": 0.5527,
"step": 352
},
{
"epoch": 1.691566265060241,
"grad_norm": 2.543335199356079,
"learning_rate": 1.1040991573469629e-06,
"loss": 0.4674,
"step": 353
},
{
"epoch": 1.6963855421686747,
"grad_norm": 2.4757049083709717,
"learning_rate": 1.0688498381320855e-06,
"loss": 0.4628,
"step": 354
},
{
"epoch": 1.7012048192771085,
"grad_norm": 2.4857890605926514,
"learning_rate": 1.0341405791733183e-06,
"loss": 0.4914,
"step": 355
},
{
"epoch": 1.7060240963855422,
"grad_norm": 2.2958590984344482,
"learning_rate": 9.999734793146998e-07,
"loss": 0.4194,
"step": 356
},
{
"epoch": 1.7108433734939759,
"grad_norm": 2.729526996612549,
"learning_rate": 9.663506046162986e-07,
"loss": 0.4839,
"step": 357
},
{
"epoch": 1.7156626506024097,
"grad_norm": 2.457387924194336,
"learning_rate": 9.332739882292752e-07,
"loss": 0.402,
"step": 358
},
{
"epoch": 1.7204819277108434,
"grad_norm": 2.145829439163208,
"learning_rate": 9.0074563027294e-07,
"loss": 0.4728,
"step": 359
},
{
"epoch": 1.725301204819277,
"grad_norm": 2.3702030181884766,
"learning_rate": 8.687674977138116e-07,
"loss": 0.484,
"step": 360
},
{
"epoch": 1.730120481927711,
"grad_norm": 2.976496934890747,
"learning_rate": 8.373415242466721e-07,
"loss": 0.4865,
"step": 361
},
{
"epoch": 1.7349397590361446,
"grad_norm": 2.717363119125366,
"learning_rate": 8.06469610177636e-07,
"loss": 0.434,
"step": 362
},
{
"epoch": 1.7397590361445783,
"grad_norm": 2.484947443008423,
"learning_rate": 7.761536223092459e-07,
"loss": 0.4643,
"step": 363
},
{
"epoch": 1.7445783132530122,
"grad_norm": 2.8820717334747314,
"learning_rate": 7.463953938275859e-07,
"loss": 0.518,
"step": 364
},
{
"epoch": 1.7445783132530122,
"eval_loss": 0.81573086977005,
"eval_runtime": 409.3624,
"eval_samples_per_second": 3.967,
"eval_steps_per_second": 0.992,
"step": 364
},
{
"epoch": 1.7493975903614458,
"grad_norm": 2.5007619857788086,
"learning_rate": 7.171967241914224e-07,
"loss": 0.5117,
"step": 365
},
{
"epoch": 1.7542168674698795,
"grad_norm": 2.8851635456085205,
"learning_rate": 6.885593790234057e-07,
"loss": 0.433,
"step": 366
},
{
"epoch": 1.7590361445783134,
"grad_norm": 2.6130123138427734,
"learning_rate": 6.604850900032956e-07,
"loss": 0.4695,
"step": 367
},
{
"epoch": 1.763855421686747,
"grad_norm": 2.500542402267456,
"learning_rate": 6.329755547632499e-07,
"loss": 0.5145,
"step": 368
},
{
"epoch": 1.7686746987951807,
"grad_norm": 2.8018507957458496,
"learning_rate": 6.0603243678517e-07,
"loss": 0.4638,
"step": 369
},
{
"epoch": 1.7734939759036146,
"grad_norm": 2.414989709854126,
"learning_rate": 5.796573653001091e-07,
"loss": 0.4589,
"step": 370
},
{
"epoch": 1.7783132530120482,
"grad_norm": 2.3818142414093018,
"learning_rate": 5.538519351897575e-07,
"loss": 0.4495,
"step": 371
},
{
"epoch": 1.783132530120482,
"grad_norm": 2.182438373565674,
"learning_rate": 5.286177068899989e-07,
"loss": 0.4818,
"step": 372
},
{
"epoch": 1.7879518072289158,
"grad_norm": 2.615506649017334,
"learning_rate": 5.039562062965508e-07,
"loss": 0.4734,
"step": 373
},
{
"epoch": 1.7927710843373494,
"grad_norm": 2.4921176433563232,
"learning_rate": 4.798689246727006e-07,
"loss": 0.4741,
"step": 374
},
{
"epoch": 1.797590361445783,
"grad_norm": 2.8143961429595947,
"learning_rate": 4.563573185591219e-07,
"loss": 0.4854,
"step": 375
},
{
"epoch": 1.802409638554217,
"grad_norm": 2.3554911613464355,
"learning_rate": 4.3342280968580287e-07,
"loss": 0.4648,
"step": 376
},
{
"epoch": 1.8072289156626506,
"grad_norm": 2.3002068996429443,
"learning_rate": 4.11066784886075e-07,
"loss": 0.5014,
"step": 377
},
{
"epoch": 1.8120481927710843,
"grad_norm": 2.49564266204834,
"learning_rate": 3.8929059601275463e-07,
"loss": 0.4487,
"step": 378
},
{
"epoch": 1.8168674698795182,
"grad_norm": 2.61617112159729,
"learning_rate": 3.6809555985639065e-07,
"loss": 0.5117,
"step": 379
},
{
"epoch": 1.8216867469879519,
"grad_norm": 2.6199090480804443,
"learning_rate": 3.474829580656436e-07,
"loss": 0.462,
"step": 380
},
{
"epoch": 1.8265060240963855,
"grad_norm": 2.755880355834961,
"learning_rate": 3.2745403706978876e-07,
"loss": 0.4853,
"step": 381
},
{
"epoch": 1.8313253012048194,
"grad_norm": 2.9723000526428223,
"learning_rate": 3.080100080033388e-07,
"loss": 0.4672,
"step": 382
},
{
"epoch": 1.836144578313253,
"grad_norm": 2.1068785190582275,
"learning_rate": 2.8915204663281014e-07,
"loss": 0.4219,
"step": 383
},
{
"epoch": 1.8409638554216867,
"grad_norm": 2.6642982959747314,
"learning_rate": 2.708812932856253e-07,
"loss": 0.5069,
"step": 384
},
{
"epoch": 1.8457831325301206,
"grad_norm": 2.3558390140533447,
"learning_rate": 2.5319885278115907e-07,
"loss": 0.5048,
"step": 385
},
{
"epoch": 1.8506024096385543,
"grad_norm": 2.335449457168579,
"learning_rate": 2.3610579436392999e-07,
"loss": 0.429,
"step": 386
},
{
"epoch": 1.855421686746988,
"grad_norm": 2.720520496368408,
"learning_rate": 2.1960315163894075e-07,
"loss": 0.4955,
"step": 387
},
{
"epoch": 1.8602409638554218,
"grad_norm": 2.581988573074341,
"learning_rate": 2.036919225091827e-07,
"loss": 0.4604,
"step": 388
},
{
"epoch": 1.8650602409638555,
"grad_norm": 2.5386810302734375,
"learning_rate": 1.8837306911529185e-07,
"loss": 0.4738,
"step": 389
},
{
"epoch": 1.8698795180722891,
"grad_norm": 2.541105031967163,
"learning_rate": 1.7364751777736334e-07,
"loss": 0.498,
"step": 390
},
{
"epoch": 1.874698795180723,
"grad_norm": 2.8123772144317627,
"learning_rate": 1.595161589389449e-07,
"loss": 0.443,
"step": 391
},
{
"epoch": 1.8795180722891565,
"grad_norm": 2.696023464202881,
"learning_rate": 1.459798471131868e-07,
"loss": 0.4207,
"step": 392
},
{
"epoch": 1.8843373493975903,
"grad_norm": 2.6544225215911865,
"learning_rate": 1.3303940083117527e-07,
"loss": 0.4832,
"step": 393
},
{
"epoch": 1.8891566265060242,
"grad_norm": 3.1610257625579834,
"learning_rate": 1.206956025924333e-07,
"loss": 0.5528,
"step": 394
},
{
"epoch": 1.8939759036144577,
"grad_norm": 2.7090446949005127,
"learning_rate": 1.0894919881760168e-07,
"loss": 0.4851,
"step": 395
},
{
"epoch": 1.8987951807228916,
"grad_norm": 2.694115400314331,
"learning_rate": 9.780089980330643e-08,
"loss": 0.5122,
"step": 396
},
{
"epoch": 1.9036144578313254,
"grad_norm": 2.570265531539917,
"learning_rate": 8.725137967920739e-08,
"loss": 0.4875,
"step": 397
},
{
"epoch": 1.9084337349397589,
"grad_norm": 2.4135944843292236,
"learning_rate": 7.730127636723539e-08,
"loss": 0.4679,
"step": 398
},
{
"epoch": 1.9132530120481928,
"grad_norm": 2.6173839569091797,
"learning_rate": 6.795119154301199e-08,
"loss": 0.4829,
"step": 399
},
{
"epoch": 1.9180722891566266,
"grad_norm": 2.9347667694091797,
"learning_rate": 5.920169059947412e-08,
"loss": 0.4665,
"step": 400
},
{
"epoch": 1.92289156626506,
"grad_norm": 2.5810000896453857,
"learning_rate": 5.105330261267916e-08,
"loss": 0.4994,
"step": 401
},
{
"epoch": 1.927710843373494,
"grad_norm": 2.610971450805664,
"learning_rate": 4.350652030981395e-08,
"loss": 0.4584,
"step": 402
},
{
"epoch": 1.9325301204819278,
"grad_norm": 2.3277273178100586,
"learning_rate": 3.6561800039403016e-08,
"loss": 0.4905,
"step": 403
},
{
"epoch": 1.9373493975903613,
"grad_norm": 2.5518689155578613,
"learning_rate": 3.0219561743707326e-08,
"loss": 0.4783,
"step": 404
},
{
"epoch": 1.9421686746987952,
"grad_norm": 2.45817232131958,
"learning_rate": 2.4480188933336812e-08,
"loss": 0.4618,
"step": 405
},
{
"epoch": 1.946987951807229,
"grad_norm": 2.610630750656128,
"learning_rate": 1.9344028664056715e-08,
"loss": 0.4827,
"step": 406
},
{
"epoch": 1.9518072289156625,
"grad_norm": 2.5346062183380127,
"learning_rate": 1.4811391515799911e-08,
"loss": 0.5063,
"step": 407
},
{
"epoch": 1.9566265060240964,
"grad_norm": 2.244114637374878,
"learning_rate": 1.0882551573891953e-08,
"loss": 0.465,
"step": 408
},
{
"epoch": 1.9614457831325303,
"grad_norm": 2.285471200942993,
"learning_rate": 7.557746412468758e-09,
"loss": 0.4649,
"step": 409
},
{
"epoch": 1.9662650602409637,
"grad_norm": 2.321476936340332,
"learning_rate": 4.837177080119215e-09,
"loss": 0.4664,
"step": 410
},
{
"epoch": 1.9710843373493976,
"grad_norm": 2.40159273147583,
"learning_rate": 2.7210080877237978e-09,
"loss": 0.497,
"step": 411
},
{
"epoch": 1.9759036144578315,
"grad_norm": 2.6286189556121826,
"learning_rate": 1.209367398504746e-09,
"loss": 0.5062,
"step": 412
},
{
"epoch": 1.980722891566265,
"grad_norm": 2.8081414699554443,
"learning_rate": 3.023464202944748e-10,
"loss": 0.503,
"step": 413
},
{
"epoch": 1.9855421686746988,
"grad_norm": 2.3685240745544434,
"learning_rate": 0.0,
"loss": 0.4999,
"step": 414
}
],
"logging_steps": 1,
"max_steps": 414,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 104,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.2766188277900247e+18,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}