Nexspear commited on
Commit
82c4fc4
·
verified ·
1 Parent(s): 7d5a6e6

Training in progress, step 200, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4e43d6d5883c6e405ab763c54b77b09403de689500e3c5a4449a17c605cb8fb1
3
  size 59933632
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6e6951f5b6175d14b7d614bbce6929619d9cf30d6d20d54326aa56b20ddbeb4b
3
  size 59933632
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1864703aea5625377248a418a56cf5af58afba9add15606aef291917de466f34
3
  size 31822948
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6c306a3827a197b1ca828f33365b5de56fe798f71b75b5a49bb3a17dbdeda00b
3
  size 31822948
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0c89db017fa9ddc5e9dc27f58a34fa74d457c810061d28b6ffc48e288f1a4f35
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eeb8c4dc6cb76a5543d18a9fb3156b407bc6d7f8294b806ca5fe4cfa45f8f56a
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d8ce05761f46e7cf72fb17a02e3a0ca15c9d25ce3babf590eeb40568923b8bac
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d2d754412c61116546142914503e7369d0cc35d3c380a07e5218f595d76b6d96
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.05413208228076507,
5
  "eval_steps": 50,
6
- "global_step": 150,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -249,6 +249,84 @@
249
  "eval_samples_per_second": 14.979,
250
  "eval_steps_per_second": 7.496,
251
  "step": 150
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
252
  }
253
  ],
254
  "logging_steps": 5,
@@ -263,12 +341,12 @@
263
  "should_evaluate": false,
264
  "should_log": false,
265
  "should_save": true,
266
- "should_training_stop": false
267
  },
268
  "attributes": {}
269
  }
270
  },
271
- "total_flos": 2.773278842997965e+16,
272
  "train_batch_size": 2,
273
  "trial_name": null,
274
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.07217610970768676,
5
  "eval_steps": 50,
6
+ "global_step": 200,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
249
  "eval_samples_per_second": 14.979,
250
  "eval_steps_per_second": 7.496,
251
  "step": 150
252
+ },
253
+ {
254
+ "epoch": 0.05593648502345724,
255
+ "grad_norm": 0.39626944065093994,
256
+ "learning_rate": 1.3213804466343421e-05,
257
+ "loss": 0.1199,
258
+ "step": 155
259
+ },
260
+ {
261
+ "epoch": 0.057740887766149405,
262
+ "grad_norm": 0.5094817280769348,
263
+ "learning_rate": 1.0542974530180327e-05,
264
+ "loss": 0.1201,
265
+ "step": 160
266
+ },
267
+ {
268
+ "epoch": 0.059545290508841574,
269
+ "grad_norm": 0.5557631254196167,
270
+ "learning_rate": 8.141676086873572e-06,
271
+ "loss": 0.1154,
272
+ "step": 165
273
+ },
274
+ {
275
+ "epoch": 0.06134969325153374,
276
+ "grad_norm": 0.6784803867340088,
277
+ "learning_rate": 6.026312439675552e-06,
278
+ "loss": 0.1254,
279
+ "step": 170
280
+ },
281
+ {
282
+ "epoch": 0.06315409599422592,
283
+ "grad_norm": 0.3143410086631775,
284
+ "learning_rate": 4.2113336672471245e-06,
285
+ "loss": 0.115,
286
+ "step": 175
287
+ },
288
+ {
289
+ "epoch": 0.06495849873691809,
290
+ "grad_norm": 0.5348233580589294,
291
+ "learning_rate": 2.7091379149682685e-06,
292
+ "loss": 0.1222,
293
+ "step": 180
294
+ },
295
+ {
296
+ "epoch": 0.06676290147961025,
297
+ "grad_norm": 0.29956620931625366,
298
+ "learning_rate": 1.5299867030334814e-06,
299
+ "loss": 0.1101,
300
+ "step": 185
301
+ },
302
+ {
303
+ "epoch": 0.06856730422230242,
304
+ "grad_norm": 0.3928939998149872,
305
+ "learning_rate": 6.819348298638839e-07,
306
+ "loss": 0.1205,
307
+ "step": 190
308
+ },
309
+ {
310
+ "epoch": 0.07037170696499459,
311
+ "grad_norm": 0.42990320920944214,
312
+ "learning_rate": 1.7077534966650766e-07,
313
+ "loss": 0.1136,
314
+ "step": 195
315
+ },
316
+ {
317
+ "epoch": 0.07217610970768676,
318
+ "grad_norm": 0.6331201791763306,
319
+ "learning_rate": 0.0,
320
+ "loss": 0.124,
321
+ "step": 200
322
+ },
323
+ {
324
+ "epoch": 0.07217610970768676,
325
+ "eval_loss": 0.11012110859155655,
326
+ "eval_runtime": 77.7727,
327
+ "eval_samples_per_second": 15.005,
328
+ "eval_steps_per_second": 7.509,
329
+ "step": 200
330
  }
331
  ],
332
  "logging_steps": 5,
 
341
  "should_evaluate": false,
342
  "should_log": false,
343
  "should_save": true,
344
+ "should_training_stop": true
345
  },
346
  "attributes": {}
347
  }
348
  },
349
+ "total_flos": 3.698847801724109e+16,
350
  "train_batch_size": 2,
351
  "trial_name": null,
352
  "trial_params": null