|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 1.0, |
|
"eval_steps": 50, |
|
"global_step": 436, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.022935779816513763, |
|
"grad_norm": 5.594593346811413, |
|
"learning_rate": 1.1363636363636363e-07, |
|
"logits/chosen": -2.619579315185547, |
|
"logits/rejected": -2.552640676498413, |
|
"logps/chosen": -265.4322814941406, |
|
"logps/rejected": -236.1514434814453, |
|
"loss": 0.1942, |
|
"rewards/accuracies": 0.42500001192092896, |
|
"rewards/chosen": 0.0001355366694042459, |
|
"rewards/margins": 0.0003500286547932774, |
|
"rewards/rejected": -0.00021449197083711624, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.045871559633027525, |
|
"grad_norm": 5.257491443462003, |
|
"learning_rate": 2.2727272727272726e-07, |
|
"logits/chosen": -2.6575067043304443, |
|
"logits/rejected": -2.575622081756592, |
|
"logps/chosen": -298.8291320800781, |
|
"logps/rejected": -274.30499267578125, |
|
"loss": 0.1958, |
|
"rewards/accuracies": 0.6000000238418579, |
|
"rewards/chosen": -0.0004384421627037227, |
|
"rewards/margins": 0.0015119289746508002, |
|
"rewards/rejected": -0.001950371079146862, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.06880733944954129, |
|
"grad_norm": 4.9882040114909, |
|
"learning_rate": 3.4090909090909085e-07, |
|
"logits/chosen": -2.6754746437072754, |
|
"logits/rejected": -2.60184383392334, |
|
"logps/chosen": -290.3965759277344, |
|
"logps/rejected": -234.4066619873047, |
|
"loss": 0.1923, |
|
"rewards/accuracies": 0.706250011920929, |
|
"rewards/chosen": 0.009362037293612957, |
|
"rewards/margins": 0.013609759509563446, |
|
"rewards/rejected": -0.004247722681611776, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.09174311926605505, |
|
"grad_norm": 5.21855757124415, |
|
"learning_rate": 4.545454545454545e-07, |
|
"logits/chosen": -2.6603915691375732, |
|
"logits/rejected": -2.610896348953247, |
|
"logps/chosen": -281.05230712890625, |
|
"logps/rejected": -267.76873779296875, |
|
"loss": 0.1836, |
|
"rewards/accuracies": 0.675000011920929, |
|
"rewards/chosen": 0.04116874188184738, |
|
"rewards/margins": 0.04092608764767647, |
|
"rewards/rejected": 0.00024265461252070963, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.11467889908256881, |
|
"grad_norm": 5.5623018036583085, |
|
"learning_rate": 4.997110275491701e-07, |
|
"logits/chosen": -2.619241714477539, |
|
"logits/rejected": -2.6116530895233154, |
|
"logps/chosen": -293.481689453125, |
|
"logps/rejected": -303.96014404296875, |
|
"loss": 0.1732, |
|
"rewards/accuracies": 0.6875, |
|
"rewards/chosen": 0.02769497036933899, |
|
"rewards/margins": 0.07545463740825653, |
|
"rewards/rejected": -0.047759659588336945, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.11467889908256881, |
|
"eval_logits/chosen": -2.5722098350524902, |
|
"eval_logits/rejected": -2.492361068725586, |
|
"eval_logps/chosen": -283.61468505859375, |
|
"eval_logps/rejected": -257.6287841796875, |
|
"eval_loss": 0.16397468745708466, |
|
"eval_rewards/accuracies": 0.7068965435028076, |
|
"eval_rewards/chosen": 0.014754011295735836, |
|
"eval_rewards/margins": 0.12581734359264374, |
|
"eval_rewards/rejected": -0.11106333881616592, |
|
"eval_runtime": 94.5224, |
|
"eval_samples_per_second": 19.234, |
|
"eval_steps_per_second": 0.307, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.13761467889908258, |
|
"grad_norm": 7.227949877380974, |
|
"learning_rate": 4.979475034558115e-07, |
|
"logits/chosen": -2.5612680912017822, |
|
"logits/rejected": -2.5027718544006348, |
|
"logps/chosen": -293.4398193359375, |
|
"logps/rejected": -273.11016845703125, |
|
"loss": 0.1593, |
|
"rewards/accuracies": 0.699999988079071, |
|
"rewards/chosen": -0.03955172747373581, |
|
"rewards/margins": 0.11598478257656097, |
|
"rewards/rejected": -0.155536487698555, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.16055045871559634, |
|
"grad_norm": 7.344770125290469, |
|
"learning_rate": 4.945923025551788e-07, |
|
"logits/chosen": -2.451911449432373, |
|
"logits/rejected": -2.4021246433258057, |
|
"logps/chosen": -327.6131591796875, |
|
"logps/rejected": -284.70257568359375, |
|
"loss": 0.1564, |
|
"rewards/accuracies": 0.699999988079071, |
|
"rewards/chosen": -0.00042213575216010213, |
|
"rewards/margins": 0.2325272113084793, |
|
"rewards/rejected": -0.23294934630393982, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.1834862385321101, |
|
"grad_norm": 8.03952466935575, |
|
"learning_rate": 4.896669632591651e-07, |
|
"logits/chosen": -2.312901020050049, |
|
"logits/rejected": -2.1888813972473145, |
|
"logps/chosen": -298.2128601074219, |
|
"logps/rejected": -291.5619201660156, |
|
"loss": 0.1475, |
|
"rewards/accuracies": 0.731249988079071, |
|
"rewards/chosen": -0.1236300840973854, |
|
"rewards/margins": 0.2654283344745636, |
|
"rewards/rejected": -0.3890584111213684, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.20642201834862386, |
|
"grad_norm": 10.413197257520066, |
|
"learning_rate": 4.832031033425662e-07, |
|
"logits/chosen": -1.9742157459259033, |
|
"logits/rejected": -1.9053027629852295, |
|
"logps/chosen": -299.2644958496094, |
|
"logps/rejected": -292.5104064941406, |
|
"loss": 0.1436, |
|
"rewards/accuracies": 0.706250011920929, |
|
"rewards/chosen": -0.11671074479818344, |
|
"rewards/margins": 0.34038084745407104, |
|
"rewards/rejected": -0.4570915102958679, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.22935779816513763, |
|
"grad_norm": 8.350562582651383, |
|
"learning_rate": 4.752422169756047e-07, |
|
"logits/chosen": -1.6017181873321533, |
|
"logits/rejected": -1.5160282850265503, |
|
"logps/chosen": -289.7087097167969, |
|
"logps/rejected": -317.5405578613281, |
|
"loss": 0.1403, |
|
"rewards/accuracies": 0.668749988079071, |
|
"rewards/chosen": -0.17284031212329865, |
|
"rewards/margins": 0.28328073024749756, |
|
"rewards/rejected": -0.4561210572719574, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.22935779816513763, |
|
"eval_logits/chosen": -2.056002616882324, |
|
"eval_logits/rejected": -1.8343770503997803, |
|
"eval_logps/chosen": -298.7873229980469, |
|
"eval_logps/rejected": -293.1888427734375, |
|
"eval_loss": 0.1361575871706009, |
|
"eval_rewards/accuracies": 0.693965494632721, |
|
"eval_rewards/chosen": -0.13697203993797302, |
|
"eval_rewards/margins": 0.3296918570995331, |
|
"eval_rewards/rejected": -0.4666639268398285, |
|
"eval_runtime": 95.6937, |
|
"eval_samples_per_second": 18.998, |
|
"eval_steps_per_second": 0.303, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.25229357798165136, |
|
"grad_norm": 8.176356995673753, |
|
"learning_rate": 4.658354083558188e-07, |
|
"logits/chosen": -2.1571879386901855, |
|
"logits/rejected": -1.9664814472198486, |
|
"logps/chosen": -285.4144287109375, |
|
"logps/rejected": -280.06732177734375, |
|
"loss": 0.1397, |
|
"rewards/accuracies": 0.706250011920929, |
|
"rewards/chosen": -0.2022629678249359, |
|
"rewards/margins": 0.2610785961151123, |
|
"rewards/rejected": -0.46334153413772583, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.27522935779816515, |
|
"grad_norm": 11.47061183691114, |
|
"learning_rate": 4.550430636492389e-07, |
|
"logits/chosen": -1.4951406717300415, |
|
"logits/rejected": -1.340619444847107, |
|
"logps/chosen": -316.77838134765625, |
|
"logps/rejected": -329.01495361328125, |
|
"loss": 0.1386, |
|
"rewards/accuracies": 0.675000011920929, |
|
"rewards/chosen": -0.4095904231071472, |
|
"rewards/margins": 0.3659195899963379, |
|
"rewards/rejected": -0.7755100131034851, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.2981651376146789, |
|
"grad_norm": 12.47670192079271, |
|
"learning_rate": 4.429344633468004e-07, |
|
"logits/chosen": -1.376880168914795, |
|
"logits/rejected": -1.1415055990219116, |
|
"logps/chosen": -303.68109130859375, |
|
"logps/rejected": -326.821533203125, |
|
"loss": 0.1383, |
|
"rewards/accuracies": 0.6937500238418579, |
|
"rewards/chosen": -0.47500982880592346, |
|
"rewards/margins": 0.3670799136161804, |
|
"rewards/rejected": -0.8420897722244263, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.3211009174311927, |
|
"grad_norm": 10.567717397857333, |
|
"learning_rate": 4.2958733752443187e-07, |
|
"logits/chosen": -1.6405372619628906, |
|
"logits/rejected": -1.487768530845642, |
|
"logps/chosen": -310.8406677246094, |
|
"logps/rejected": -290.4248962402344, |
|
"loss": 0.1328, |
|
"rewards/accuracies": 0.7124999761581421, |
|
"rewards/chosen": -0.3465554118156433, |
|
"rewards/margins": 0.31472834944725037, |
|
"rewards/rejected": -0.6612837910652161, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.3440366972477064, |
|
"grad_norm": 10.103632857127238, |
|
"learning_rate": 4.150873668617898e-07, |
|
"logits/chosen": -1.5787580013275146, |
|
"logits/rejected": -1.270272970199585, |
|
"logps/chosen": -316.16461181640625, |
|
"logps/rejected": -323.0017395019531, |
|
"loss": 0.1324, |
|
"rewards/accuracies": 0.7250000238418579, |
|
"rewards/chosen": -0.49621015787124634, |
|
"rewards/margins": 0.4019313454627991, |
|
"rewards/rejected": -0.8981413841247559, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.3440366972477064, |
|
"eval_logits/chosen": -1.6554096937179565, |
|
"eval_logits/rejected": -1.2887206077575684, |
|
"eval_logps/chosen": -332.78277587890625, |
|
"eval_logps/rejected": -341.6122741699219, |
|
"eval_loss": 0.12861265242099762, |
|
"eval_rewards/accuracies": 0.7370689511299133, |
|
"eval_rewards/chosen": -0.47692668437957764, |
|
"eval_rewards/margins": 0.47397124767303467, |
|
"eval_rewards/rejected": -0.9508979916572571, |
|
"eval_runtime": 94.5745, |
|
"eval_samples_per_second": 19.223, |
|
"eval_steps_per_second": 0.307, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.3669724770642202, |
|
"grad_norm": 12.610771394953652, |
|
"learning_rate": 3.9952763262280397e-07, |
|
"logits/chosen": -1.6438308954238892, |
|
"logits/rejected": -1.378089427947998, |
|
"logps/chosen": -342.8047790527344, |
|
"logps/rejected": -378.4410705566406, |
|
"loss": 0.125, |
|
"rewards/accuracies": 0.731249988079071, |
|
"rewards/chosen": -0.46253666281700134, |
|
"rewards/margins": 0.4621877670288086, |
|
"rewards/rejected": -0.9247244000434875, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.38990825688073394, |
|
"grad_norm": 7.3876281210799375, |
|
"learning_rate": 3.8300801912883414e-07, |
|
"logits/chosen": -1.8106731176376343, |
|
"logits/rejected": -1.5751054286956787, |
|
"logps/chosen": -300.5574035644531, |
|
"logps/rejected": -343.215576171875, |
|
"loss": 0.12, |
|
"rewards/accuracies": 0.6937500238418579, |
|
"rewards/chosen": -0.28956499695777893, |
|
"rewards/margins": 0.4419015049934387, |
|
"rewards/rejected": -0.73146653175354, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.41284403669724773, |
|
"grad_norm": 9.731458591388959, |
|
"learning_rate": 3.6563457256020884e-07, |
|
"logits/chosen": -1.6891987323760986, |
|
"logits/rejected": -1.344957709312439, |
|
"logps/chosen": -351.0888671875, |
|
"logps/rejected": -328.3114013671875, |
|
"loss": 0.129, |
|
"rewards/accuracies": 0.731249988079071, |
|
"rewards/chosen": -0.47730937600135803, |
|
"rewards/margins": 0.42085570096969604, |
|
"rewards/rejected": -0.8981650471687317, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.43577981651376146, |
|
"grad_norm": 10.618153958266253, |
|
"learning_rate": 3.475188202022617e-07, |
|
"logits/chosen": -1.5754601955413818, |
|
"logits/rejected": -1.322837471961975, |
|
"logps/chosen": -299.17999267578125, |
|
"logps/rejected": -353.1037292480469, |
|
"loss": 0.1248, |
|
"rewards/accuracies": 0.768750011920929, |
|
"rewards/chosen": -0.41065484285354614, |
|
"rewards/margins": 0.4769380986690521, |
|
"rewards/rejected": -0.8875927925109863, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.45871559633027525, |
|
"grad_norm": 8.299502199250934, |
|
"learning_rate": 3.287770545059052e-07, |
|
"logits/chosen": -1.7796523571014404, |
|
"logits/rejected": -1.4286386966705322, |
|
"logps/chosen": -312.58306884765625, |
|
"logps/rejected": -324.02142333984375, |
|
"loss": 0.1249, |
|
"rewards/accuracies": 0.762499988079071, |
|
"rewards/chosen": -0.3430142104625702, |
|
"rewards/margins": 0.4586402475833893, |
|
"rewards/rejected": -0.8016544580459595, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.45871559633027525, |
|
"eval_logits/chosen": -1.8577966690063477, |
|
"eval_logits/rejected": -1.4797794818878174, |
|
"eval_logps/chosen": -314.017578125, |
|
"eval_logps/rejected": -322.6352233886719, |
|
"eval_loss": 0.12169274687767029, |
|
"eval_rewards/accuracies": 0.7241379022598267, |
|
"eval_rewards/chosen": -0.2892747223377228, |
|
"eval_rewards/margins": 0.4718529284000397, |
|
"eval_rewards/rejected": -0.7611277103424072, |
|
"eval_runtime": 95.5757, |
|
"eval_samples_per_second": 19.022, |
|
"eval_steps_per_second": 0.303, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.481651376146789, |
|
"grad_norm": 8.358424851083543, |
|
"learning_rate": 3.0952958655864954e-07, |
|
"logits/chosen": -1.7499780654907227, |
|
"logits/rejected": -1.4417035579681396, |
|
"logps/chosen": -303.82586669921875, |
|
"logps/rejected": -331.75396728515625, |
|
"loss": 0.1201, |
|
"rewards/accuracies": 0.7749999761581421, |
|
"rewards/chosen": -0.30521491169929504, |
|
"rewards/margins": 0.46890267729759216, |
|
"rewards/rejected": -0.7741175293922424, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.5045871559633027, |
|
"grad_norm": 9.67017691007605, |
|
"learning_rate": 2.898999737583448e-07, |
|
"logits/chosen": -1.6984879970550537, |
|
"logits/rejected": -1.103266954421997, |
|
"logps/chosen": -371.213623046875, |
|
"logps/rejected": -387.9047546386719, |
|
"loss": 0.1164, |
|
"rewards/accuracies": 0.78125, |
|
"rewards/chosen": -0.4853332042694092, |
|
"rewards/margins": 0.5440121293067932, |
|
"rewards/rejected": -1.0293452739715576, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.5275229357798165, |
|
"grad_norm": 9.051099459635976, |
|
"learning_rate": 2.7001422664752333e-07, |
|
"logits/chosen": -1.5834382772445679, |
|
"logits/rejected": -1.3102777004241943, |
|
"logps/chosen": -317.35888671875, |
|
"logps/rejected": -356.3561096191406, |
|
"loss": 0.1216, |
|
"rewards/accuracies": 0.7250000238418579, |
|
"rewards/chosen": -0.4941536784172058, |
|
"rewards/margins": 0.4132082462310791, |
|
"rewards/rejected": -0.9073619842529297, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.5504587155963303, |
|
"grad_norm": 8.670604935851552, |
|
"learning_rate": 2.5e-07, |
|
"logits/chosen": -1.79386305809021, |
|
"logits/rejected": -1.4528472423553467, |
|
"logps/chosen": -328.12200927734375, |
|
"logps/rejected": -345.81500244140625, |
|
"loss": 0.1217, |
|
"rewards/accuracies": 0.71875, |
|
"rewards/chosen": -0.40590184926986694, |
|
"rewards/margins": 0.43874797224998474, |
|
"rewards/rejected": -0.8446499109268188, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.573394495412844, |
|
"grad_norm": 8.325091240643497, |
|
"learning_rate": 2.2998577335247667e-07, |
|
"logits/chosen": -1.7739427089691162, |
|
"logits/rejected": -1.3005108833312988, |
|
"logps/chosen": -344.14129638671875, |
|
"logps/rejected": -352.2928771972656, |
|
"loss": 0.1189, |
|
"rewards/accuracies": 0.7875000238418579, |
|
"rewards/chosen": -0.4034748673439026, |
|
"rewards/margins": 0.515015721321106, |
|
"rewards/rejected": -0.9184905886650085, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.573394495412844, |
|
"eval_logits/chosen": -1.6726821660995483, |
|
"eval_logits/rejected": -1.2226585149765015, |
|
"eval_logps/chosen": -327.7221374511719, |
|
"eval_logps/rejected": -344.0638427734375, |
|
"eval_loss": 0.11745046824216843, |
|
"eval_rewards/accuracies": 0.7629310488700867, |
|
"eval_rewards/chosen": -0.4263203740119934, |
|
"eval_rewards/margins": 0.5490937232971191, |
|
"eval_rewards/rejected": -0.9754140973091125, |
|
"eval_runtime": 95.1095, |
|
"eval_samples_per_second": 19.115, |
|
"eval_steps_per_second": 0.305, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.5963302752293578, |
|
"grad_norm": 8.867617693309038, |
|
"learning_rate": 2.1010002624165524e-07, |
|
"logits/chosen": -1.6618998050689697, |
|
"logits/rejected": -1.3614200353622437, |
|
"logps/chosen": -315.7143249511719, |
|
"logps/rejected": -377.2685546875, |
|
"loss": 0.117, |
|
"rewards/accuracies": 0.800000011920929, |
|
"rewards/chosen": -0.44999808073043823, |
|
"rewards/margins": 0.5787504315376282, |
|
"rewards/rejected": -1.0287485122680664, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.6192660550458715, |
|
"grad_norm": 8.935506104756715, |
|
"learning_rate": 1.9047041344135043e-07, |
|
"logits/chosen": -1.6130893230438232, |
|
"logits/rejected": -1.4423035383224487, |
|
"logps/chosen": -308.60711669921875, |
|
"logps/rejected": -346.1998291015625, |
|
"loss": 0.1177, |
|
"rewards/accuracies": 0.793749988079071, |
|
"rewards/chosen": -0.4254485070705414, |
|
"rewards/margins": 0.5180791020393372, |
|
"rewards/rejected": -0.9435275793075562, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.6422018348623854, |
|
"grad_norm": 9.329726294195847, |
|
"learning_rate": 1.7122294549409482e-07, |
|
"logits/chosen": -1.6924329996109009, |
|
"logits/rejected": -1.2535055875778198, |
|
"logps/chosen": -311.0262451171875, |
|
"logps/rejected": -363.9076232910156, |
|
"loss": 0.1216, |
|
"rewards/accuracies": 0.793749988079071, |
|
"rewards/chosen": -0.3802736699581146, |
|
"rewards/margins": 0.6208445429801941, |
|
"rewards/rejected": -1.0011183023452759, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.6651376146788991, |
|
"grad_norm": 11.266324554606376, |
|
"learning_rate": 1.524811797977383e-07, |
|
"logits/chosen": -1.6415001153945923, |
|
"logits/rejected": -1.2690132856369019, |
|
"logps/chosen": -327.433837890625, |
|
"logps/rejected": -346.80560302734375, |
|
"loss": 0.1149, |
|
"rewards/accuracies": 0.768750011920929, |
|
"rewards/chosen": -0.3970295786857605, |
|
"rewards/margins": 0.5318586826324463, |
|
"rewards/rejected": -0.928888201713562, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.6880733944954128, |
|
"grad_norm": 9.171141301041647, |
|
"learning_rate": 1.3436542743979125e-07, |
|
"logits/chosen": -1.6442855596542358, |
|
"logits/rejected": -1.4366796016693115, |
|
"logps/chosen": -349.1554870605469, |
|
"logps/rejected": -343.85443115234375, |
|
"loss": 0.1252, |
|
"rewards/accuracies": 0.737500011920929, |
|
"rewards/chosen": -0.4320616126060486, |
|
"rewards/margins": 0.4178841710090637, |
|
"rewards/rejected": -0.8499458432197571, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.6880733944954128, |
|
"eval_logits/chosen": -1.663388729095459, |
|
"eval_logits/rejected": -1.1890878677368164, |
|
"eval_logps/chosen": -328.0691223144531, |
|
"eval_logps/rejected": -345.0453796386719, |
|
"eval_loss": 0.11537355184555054, |
|
"eval_rewards/accuracies": 0.7543103694915771, |
|
"eval_rewards/chosen": -0.4297903776168823, |
|
"eval_rewards/margins": 0.5554393529891968, |
|
"eval_rewards/rejected": -0.9852296710014343, |
|
"eval_runtime": 94.7636, |
|
"eval_samples_per_second": 19.185, |
|
"eval_steps_per_second": 0.306, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.7110091743119266, |
|
"grad_norm": 8.786286019785855, |
|
"learning_rate": 1.1699198087116588e-07, |
|
"logits/chosen": -1.6734371185302734, |
|
"logits/rejected": -1.3057066202163696, |
|
"logps/chosen": -325.54986572265625, |
|
"logps/rejected": -359.43572998046875, |
|
"loss": 0.123, |
|
"rewards/accuracies": 0.731249988079071, |
|
"rewards/chosen": -0.47584667801856995, |
|
"rewards/margins": 0.4540133476257324, |
|
"rewards/rejected": -0.92985999584198, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.7339449541284404, |
|
"grad_norm": 9.586519281194345, |
|
"learning_rate": 1.00472367377196e-07, |
|
"logits/chosen": -1.605583906173706, |
|
"logits/rejected": -1.1544862985610962, |
|
"logps/chosen": -318.45654296875, |
|
"logps/rejected": -339.86163330078125, |
|
"loss": 0.1189, |
|
"rewards/accuracies": 0.768750011920929, |
|
"rewards/chosen": -0.4422459006309509, |
|
"rewards/margins": 0.611468493938446, |
|
"rewards/rejected": -1.053714394569397, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.7568807339449541, |
|
"grad_norm": 9.694294177484474, |
|
"learning_rate": 8.49126331382102e-08, |
|
"logits/chosen": -1.499674677848816, |
|
"logits/rejected": -1.1590025424957275, |
|
"logps/chosen": -327.3614807128906, |
|
"logps/rejected": -342.82550048828125, |
|
"loss": 0.1187, |
|
"rewards/accuracies": 0.7124999761581421, |
|
"rewards/chosen": -0.5283932685852051, |
|
"rewards/margins": 0.4288608431816101, |
|
"rewards/rejected": -0.9572542309761047, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 0.7798165137614679, |
|
"grad_norm": 10.420796259880595, |
|
"learning_rate": 7.041266247556812e-08, |
|
"logits/chosen": -1.587501883506775, |
|
"logits/rejected": -1.274778127670288, |
|
"logps/chosen": -338.21112060546875, |
|
"logps/rejected": -354.18890380859375, |
|
"loss": 0.1144, |
|
"rewards/accuracies": 0.7749999761581421, |
|
"rewards/chosen": -0.5021848678588867, |
|
"rewards/margins": 0.439863920211792, |
|
"rewards/rejected": -0.9420488476753235, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 0.8027522935779816, |
|
"grad_norm": 9.40520686958908, |
|
"learning_rate": 5.706553665319955e-08, |
|
"logits/chosen": -1.567800521850586, |
|
"logits/rejected": -1.0267796516418457, |
|
"logps/chosen": -327.0886535644531, |
|
"logps/rejected": -339.1602478027344, |
|
"loss": 0.1226, |
|
"rewards/accuracies": 0.768750011920929, |
|
"rewards/chosen": -0.5259757041931152, |
|
"rewards/margins": 0.48901230096817017, |
|
"rewards/rejected": -1.0149879455566406, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.8027522935779816, |
|
"eval_logits/chosen": -1.5758812427520752, |
|
"eval_logits/rejected": -1.0590085983276367, |
|
"eval_logps/chosen": -333.0171203613281, |
|
"eval_logps/rejected": -349.79791259765625, |
|
"eval_loss": 0.11367923766374588, |
|
"eval_rewards/accuracies": 0.7543103694915771, |
|
"eval_rewards/chosen": -0.47927024960517883, |
|
"eval_rewards/margins": 0.5534845590591431, |
|
"eval_rewards/rejected": -1.032754898071289, |
|
"eval_runtime": 94.1796, |
|
"eval_samples_per_second": 19.304, |
|
"eval_steps_per_second": 0.308, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.8256880733944955, |
|
"grad_norm": 7.926122004749844, |
|
"learning_rate": 4.4956936350761005e-08, |
|
"logits/chosen": -1.530390977859497, |
|
"logits/rejected": -1.2470009326934814, |
|
"logps/chosen": -297.60394287109375, |
|
"logps/rejected": -349.6514587402344, |
|
"loss": 0.1145, |
|
"rewards/accuracies": 0.706250011920929, |
|
"rewards/chosen": -0.45283880829811096, |
|
"rewards/margins": 0.45830464363098145, |
|
"rewards/rejected": -0.91114342212677, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 0.8486238532110092, |
|
"grad_norm": 8.501039777999448, |
|
"learning_rate": 3.416459164418123e-08, |
|
"logits/chosen": -1.637953519821167, |
|
"logits/rejected": -1.3401291370391846, |
|
"logps/chosen": -346.0074462890625, |
|
"logps/rejected": -358.09075927734375, |
|
"loss": 0.1125, |
|
"rewards/accuracies": 0.75, |
|
"rewards/chosen": -0.46422967314720154, |
|
"rewards/margins": 0.4909321367740631, |
|
"rewards/rejected": -0.9551618695259094, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 0.8715596330275229, |
|
"grad_norm": 10.294163163028395, |
|
"learning_rate": 2.475778302439524e-08, |
|
"logits/chosen": -1.6482412815093994, |
|
"logits/rejected": -1.1318132877349854, |
|
"logps/chosen": -337.7255859375, |
|
"logps/rejected": -354.24090576171875, |
|
"loss": 0.118, |
|
"rewards/accuracies": 0.762499988079071, |
|
"rewards/chosen": -0.4134751260280609, |
|
"rewards/margins": 0.5523717403411865, |
|
"rewards/rejected": -0.965846836566925, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 0.8944954128440367, |
|
"grad_norm": 9.29566160345761, |
|
"learning_rate": 1.6796896657433805e-08, |
|
"logits/chosen": -1.5356476306915283, |
|
"logits/rejected": -1.1930160522460938, |
|
"logps/chosen": -296.85870361328125, |
|
"logps/rejected": -320.10577392578125, |
|
"loss": 0.1227, |
|
"rewards/accuracies": 0.731249988079071, |
|
"rewards/chosen": -0.4490123391151428, |
|
"rewards/margins": 0.4589834213256836, |
|
"rewards/rejected": -0.9079957008361816, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 0.9174311926605505, |
|
"grad_norm": 10.238826336953899, |
|
"learning_rate": 1.0333036740834855e-08, |
|
"logits/chosen": -1.455672264099121, |
|
"logits/rejected": -1.2008806467056274, |
|
"logps/chosen": -265.4068298339844, |
|
"logps/rejected": -317.6711120605469, |
|
"loss": 0.1206, |
|
"rewards/accuracies": 0.7124999761581421, |
|
"rewards/chosen": -0.4084719121456146, |
|
"rewards/margins": 0.4760831892490387, |
|
"rewards/rejected": -0.8845551609992981, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.9174311926605505, |
|
"eval_logits/chosen": -1.608026385307312, |
|
"eval_logits/rejected": -1.1351321935653687, |
|
"eval_logps/chosen": -328.656494140625, |
|
"eval_logps/rejected": -344.9654846191406, |
|
"eval_loss": 0.11354311555624008, |
|
"eval_rewards/accuracies": 0.75, |
|
"eval_rewards/chosen": -0.43566375970840454, |
|
"eval_rewards/margins": 0.5487664937973022, |
|
"eval_rewards/rejected": -0.984430193901062, |
|
"eval_runtime": 96.0499, |
|
"eval_samples_per_second": 18.928, |
|
"eval_steps_per_second": 0.302, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.9403669724770642, |
|
"grad_norm": 10.26611326963808, |
|
"learning_rate": 5.4076974448211685e-09, |
|
"logits/chosen": -1.4635480642318726, |
|
"logits/rejected": -1.029957890510559, |
|
"logps/chosen": -317.2907409667969, |
|
"logps/rejected": -339.1365051269531, |
|
"loss": 0.1172, |
|
"rewards/accuracies": 0.75, |
|
"rewards/chosen": -0.4633886218070984, |
|
"rewards/margins": 0.5475090742111206, |
|
"rewards/rejected": -1.0108975172042847, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 0.963302752293578, |
|
"grad_norm": 11.907377094577166, |
|
"learning_rate": 2.052496544188487e-09, |
|
"logits/chosen": -1.477085828781128, |
|
"logits/rejected": -0.9949935078620911, |
|
"logps/chosen": -308.0985412597656, |
|
"logps/rejected": -351.2358093261719, |
|
"loss": 0.1158, |
|
"rewards/accuracies": 0.7749999761581421, |
|
"rewards/chosen": -0.4865953028202057, |
|
"rewards/margins": 0.5669893026351929, |
|
"rewards/rejected": -1.0535845756530762, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 0.9862385321100917, |
|
"grad_norm": 9.339603996948918, |
|
"learning_rate": 2.889724508297886e-10, |
|
"logits/chosen": -1.5852940082550049, |
|
"logits/rejected": -1.0293024778366089, |
|
"logps/chosen": -347.1506042480469, |
|
"logps/rejected": -338.89019775390625, |
|
"loss": 0.1161, |
|
"rewards/accuracies": 0.7437499761581421, |
|
"rewards/chosen": -0.439751535654068, |
|
"rewards/margins": 0.5087853074073792, |
|
"rewards/rejected": -0.94853675365448, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"step": 436, |
|
"total_flos": 0.0, |
|
"train_loss": 0.13275344205012016, |
|
"train_runtime": 11631.0591, |
|
"train_samples_per_second": 4.794, |
|
"train_steps_per_second": 0.037 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 436, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 100, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 0.0, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|