Nexspear commited on
Commit
969f89b
·
verified ·
1 Parent(s): b306708

Training in progress, step 200, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:56a1b6d87dc1fb270b46a64ac51ae0eb9fc677537735cfc5bf2f5fa946ab4142
3
  size 25271744
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7d45b2c1b8953404021c16605d0de80ad936228c735cfdc030863eb91831aa3d
3
  size 25271744
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:66abdcbc9aabede0b74f3f4083c19a92c63f8d286830fecf31ba5d242d8b795e
3
  size 13685516
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0a20dcb5e4784b182a9e3577482fb2fb9ab3a2eea4445e1c75e61ae5197019fe
3
  size 13685516
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d535cf080674396fb72928f6111bb7f64589019b0899774b6c6db72a95e900e4
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:185c4fed5976485f851475b83d32c75768629ff69422fd0d3d27a11ccafde888
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d8ce05761f46e7cf72fb17a02e3a0ca15c9d25ce3babf590eeb40568923b8bac
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d2d754412c61116546142914503e7369d0cc35d3c380a07e5218f595d76b6d96
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.010239781551326906,
5
  "eval_steps": 50,
6
- "global_step": 150,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -249,6 +249,84 @@
249
  "eval_samples_per_second": 42.028,
250
  "eval_steps_per_second": 21.014,
251
  "step": 150
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
252
  }
253
  ],
254
  "logging_steps": 5,
@@ -263,12 +341,12 @@
263
  "should_evaluate": false,
264
  "should_log": false,
265
  "should_save": true,
266
- "should_training_stop": false
267
  },
268
  "attributes": {}
269
  }
270
  },
271
- "total_flos": 7673759347507200.0,
272
  "train_batch_size": 2,
273
  "trial_name": null,
274
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.013653042068435874,
5
  "eval_steps": 50,
6
+ "global_step": 200,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
249
  "eval_samples_per_second": 42.028,
250
  "eval_steps_per_second": 21.014,
251
  "step": 150
252
+ },
253
+ {
254
+ "epoch": 0.010581107603037801,
255
+ "grad_norm": 2.8252055644989014,
256
+ "learning_rate": 1.3213804466343421e-05,
257
+ "loss": 2.1986,
258
+ "step": 155
259
+ },
260
+ {
261
+ "epoch": 0.010922433654748698,
262
+ "grad_norm": 2.882528066635132,
263
+ "learning_rate": 1.0542974530180327e-05,
264
+ "loss": 2.1574,
265
+ "step": 160
266
+ },
267
+ {
268
+ "epoch": 0.011263759706459596,
269
+ "grad_norm": 2.57167911529541,
270
+ "learning_rate": 8.141676086873572e-06,
271
+ "loss": 2.2536,
272
+ "step": 165
273
+ },
274
+ {
275
+ "epoch": 0.011605085758170493,
276
+ "grad_norm": 2.8177096843719482,
277
+ "learning_rate": 6.026312439675552e-06,
278
+ "loss": 2.1281,
279
+ "step": 170
280
+ },
281
+ {
282
+ "epoch": 0.01194641180988139,
283
+ "grad_norm": 2.2039341926574707,
284
+ "learning_rate": 4.2113336672471245e-06,
285
+ "loss": 2.0143,
286
+ "step": 175
287
+ },
288
+ {
289
+ "epoch": 0.012287737861592285,
290
+ "grad_norm": 2.465358018875122,
291
+ "learning_rate": 2.7091379149682685e-06,
292
+ "loss": 2.1856,
293
+ "step": 180
294
+ },
295
+ {
296
+ "epoch": 0.012629063913303182,
297
+ "grad_norm": 2.2475521564483643,
298
+ "learning_rate": 1.5299867030334814e-06,
299
+ "loss": 2.1777,
300
+ "step": 185
301
+ },
302
+ {
303
+ "epoch": 0.01297038996501408,
304
+ "grad_norm": 2.3308393955230713,
305
+ "learning_rate": 6.819348298638839e-07,
306
+ "loss": 2.3441,
307
+ "step": 190
308
+ },
309
+ {
310
+ "epoch": 0.013311716016724977,
311
+ "grad_norm": 2.915706157684326,
312
+ "learning_rate": 1.7077534966650766e-07,
313
+ "loss": 2.3743,
314
+ "step": 195
315
+ },
316
+ {
317
+ "epoch": 0.013653042068435874,
318
+ "grad_norm": 3.283146381378174,
319
+ "learning_rate": 0.0,
320
+ "loss": 2.1816,
321
+ "step": 200
322
+ },
323
+ {
324
+ "epoch": 0.013653042068435874,
325
+ "eval_loss": 2.2533507347106934,
326
+ "eval_runtime": 147.1216,
327
+ "eval_samples_per_second": 41.924,
328
+ "eval_steps_per_second": 20.962,
329
+ "step": 200
330
  }
331
  ],
332
  "logging_steps": 5,
 
341
  "should_evaluate": false,
342
  "should_log": false,
343
  "should_save": true,
344
+ "should_training_stop": true
345
  },
346
  "attributes": {}
347
  }
348
  },
349
+ "total_flos": 1.02316791300096e+16,
350
  "train_batch_size": 2,
351
  "trial_name": null,
352
  "trial_params": null