diff --git a/checkpoint-1014/README.md b/checkpoint-1014/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..e47b03ac2b3ec63bb9b693d5ea09a59bed58eec6
--- /dev/null
+++ b/checkpoint-1014/README.md
@@ -0,0 +1,202 @@
+---
+base_model: meta-llama/Llama-2-13b-chat-hf
+library_name: peft
+---
+
+# Model Card for Model ID
+
+
+
+
+
+## Model Details
+
+### Model Description
+
+
+
+
+
+- **Developed by:** [More Information Needed]
+- **Funded by [optional]:** [More Information Needed]
+- **Shared by [optional]:** [More Information Needed]
+- **Model type:** [More Information Needed]
+- **Language(s) (NLP):** [More Information Needed]
+- **License:** [More Information Needed]
+- **Finetuned from model [optional]:** [More Information Needed]
+
+### Model Sources [optional]
+
+
+
+- **Repository:** [More Information Needed]
+- **Paper [optional]:** [More Information Needed]
+- **Demo [optional]:** [More Information Needed]
+
+## Uses
+
+
+
+### Direct Use
+
+
+
+[More Information Needed]
+
+### Downstream Use [optional]
+
+
+
+[More Information Needed]
+
+### Out-of-Scope Use
+
+
+
+[More Information Needed]
+
+## Bias, Risks, and Limitations
+
+
+
+[More Information Needed]
+
+### Recommendations
+
+
+
+Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
+
+## How to Get Started with the Model
+
+Use the code below to get started with the model.
+
+[More Information Needed]
+
+## Training Details
+
+### Training Data
+
+
+
+[More Information Needed]
+
+### Training Procedure
+
+
+
+#### Preprocessing [optional]
+
+[More Information Needed]
+
+
+#### Training Hyperparameters
+
+- **Training regime:** [More Information Needed]
+
+#### Speeds, Sizes, Times [optional]
+
+
+
+[More Information Needed]
+
+## Evaluation
+
+
+
+### Testing Data, Factors & Metrics
+
+#### Testing Data
+
+
+
+[More Information Needed]
+
+#### Factors
+
+
+
+[More Information Needed]
+
+#### Metrics
+
+
+
+[More Information Needed]
+
+### Results
+
+[More Information Needed]
+
+#### Summary
+
+
+
+## Model Examination [optional]
+
+
+
+[More Information Needed]
+
+## Environmental Impact
+
+
+
+Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
+
+- **Hardware Type:** [More Information Needed]
+- **Hours used:** [More Information Needed]
+- **Cloud Provider:** [More Information Needed]
+- **Compute Region:** [More Information Needed]
+- **Carbon Emitted:** [More Information Needed]
+
+## Technical Specifications [optional]
+
+### Model Architecture and Objective
+
+[More Information Needed]
+
+### Compute Infrastructure
+
+[More Information Needed]
+
+#### Hardware
+
+[More Information Needed]
+
+#### Software
+
+[More Information Needed]
+
+## Citation [optional]
+
+
+
+**BibTeX:**
+
+[More Information Needed]
+
+**APA:**
+
+[More Information Needed]
+
+## Glossary [optional]
+
+
+
+[More Information Needed]
+
+## More Information [optional]
+
+[More Information Needed]
+
+## Model Card Authors [optional]
+
+[More Information Needed]
+
+## Model Card Contact
+
+[More Information Needed]
+### Framework versions
+
+- PEFT 0.13.2
\ No newline at end of file
diff --git a/checkpoint-1014/adapter_config.json b/checkpoint-1014/adapter_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..adaaeb24374ba5c7059503a0ebd5378b39206f06
--- /dev/null
+++ b/checkpoint-1014/adapter_config.json
@@ -0,0 +1,29 @@
+{
+ "alpha_pattern": {},
+ "auto_mapping": null,
+ "base_model_name_or_path": "meta-llama/Llama-2-13b-chat-hf",
+ "bias": "none",
+ "fan_in_fan_out": false,
+ "inference_mode": true,
+ "init_lora_weights": true,
+ "layer_replication": null,
+ "layers_pattern": null,
+ "layers_to_transform": null,
+ "loftq_config": {},
+ "lora_alpha": 32,
+ "lora_dropout": 0.05,
+ "megatron_config": null,
+ "megatron_core": "megatron.core",
+ "modules_to_save": null,
+ "peft_type": "LORA",
+ "r": 8,
+ "rank_pattern": {},
+ "revision": null,
+ "target_modules": [
+ "q_proj",
+ "v_proj"
+ ],
+ "task_type": "CAUSAL_LM",
+ "use_dora": false,
+ "use_rslora": false
+}
\ No newline at end of file
diff --git a/checkpoint-1014/adapter_model.safetensors b/checkpoint-1014/adapter_model.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..94134fa0407efa5343cc6905f38d5bb1ebee52e1
--- /dev/null
+++ b/checkpoint-1014/adapter_model.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:6ae1ac7a68861e6947090919c8e22ecf4456e25ecf846db0253434e2618f7c8a
+size 26235704
diff --git a/checkpoint-1014/optimizer.pt b/checkpoint-1014/optimizer.pt
new file mode 100644
index 0000000000000000000000000000000000000000..52f0c492c85307869dcf747bc9d7c2d1f2dcb423
--- /dev/null
+++ b/checkpoint-1014/optimizer.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8bfada63e6d908026a46974dc1fcd8fdfd894bb780b41207cadb800b2d0323b2
+size 52563258
diff --git a/checkpoint-1014/rng_state.pth b/checkpoint-1014/rng_state.pth
new file mode 100644
index 0000000000000000000000000000000000000000..5d968263657a30eea713bfe7c3ac18de2db73bca
--- /dev/null
+++ b/checkpoint-1014/rng_state.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:6b4c4f0166ef9ca74027402d439827bc8ed2a02c96ba6ba9e51d0a9c0f291412
+size 14244
diff --git a/checkpoint-1014/scheduler.pt b/checkpoint-1014/scheduler.pt
new file mode 100644
index 0000000000000000000000000000000000000000..8c15d820812361823eaae11e84a9769fc4b30d88
--- /dev/null
+++ b/checkpoint-1014/scheduler.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:78fd9b36c5271bbf8da930a0ab7ea570971f4031660dcb029549ddb0db3d5123
+size 1064
diff --git a/checkpoint-1014/trainer_state.json b/checkpoint-1014/trainer_state.json
new file mode 100644
index 0000000000000000000000000000000000000000..69877af17fe610fcf5071c6c0fc024e83423d0b5
--- /dev/null
+++ b/checkpoint-1014/trainer_state.json
@@ -0,0 +1,173 @@
+{
+ "best_metric": null,
+ "best_model_checkpoint": null,
+ "epoch": 5.991137370753323,
+ "eval_steps": 500,
+ "global_step": 1014,
+ "is_hyper_param_search": false,
+ "is_local_process_zero": true,
+ "is_world_process_zero": true,
+ "log_history": [
+ {
+ "epoch": 0.29542097488921715,
+ "grad_norm": 1.8936359882354736,
+ "learning_rate": 2.5e-05,
+ "loss": 2.2777,
+ "step": 50
+ },
+ {
+ "epoch": 0.5908419497784343,
+ "grad_norm": 0.42867106199264526,
+ "learning_rate": 5e-05,
+ "loss": 0.9288,
+ "step": 100
+ },
+ {
+ "epoch": 0.8862629246676514,
+ "grad_norm": 0.5983214378356934,
+ "learning_rate": 4.726477024070022e-05,
+ "loss": 0.6469,
+ "step": 150
+ },
+ {
+ "epoch": 1.1816838995568686,
+ "grad_norm": 0.5473693013191223,
+ "learning_rate": 4.452954048140044e-05,
+ "loss": 0.5595,
+ "step": 200
+ },
+ {
+ "epoch": 1.4771048744460857,
+ "grad_norm": 0.5971556901931763,
+ "learning_rate": 4.179431072210066e-05,
+ "loss": 0.537,
+ "step": 250
+ },
+ {
+ "epoch": 1.7725258493353029,
+ "grad_norm": 0.6025377511978149,
+ "learning_rate": 3.9059080962800876e-05,
+ "loss": 0.5271,
+ "step": 300
+ },
+ {
+ "epoch": 2.06794682422452,
+ "grad_norm": 0.9687663912773132,
+ "learning_rate": 3.6323851203501094e-05,
+ "loss": 0.5318,
+ "step": 350
+ },
+ {
+ "epoch": 2.363367799113737,
+ "grad_norm": 0.673847496509552,
+ "learning_rate": 3.358862144420131e-05,
+ "loss": 0.5116,
+ "step": 400
+ },
+ {
+ "epoch": 2.658788774002954,
+ "grad_norm": 0.7841825485229492,
+ "learning_rate": 3.085339168490153e-05,
+ "loss": 0.5114,
+ "step": 450
+ },
+ {
+ "epoch": 2.9542097488921715,
+ "grad_norm": 0.8053774237632751,
+ "learning_rate": 2.811816192560175e-05,
+ "loss": 0.5049,
+ "step": 500
+ },
+ {
+ "epoch": 3.2496307237813884,
+ "grad_norm": 0.8071188926696777,
+ "learning_rate": 2.538293216630197e-05,
+ "loss": 0.5045,
+ "step": 550
+ },
+ {
+ "epoch": 3.5450516986706058,
+ "grad_norm": 0.8850335478782654,
+ "learning_rate": 2.264770240700219e-05,
+ "loss": 0.4958,
+ "step": 600
+ },
+ {
+ "epoch": 3.8404726735598227,
+ "grad_norm": 1.001068115234375,
+ "learning_rate": 1.9912472647702408e-05,
+ "loss": 0.4927,
+ "step": 650
+ },
+ {
+ "epoch": 4.13589364844904,
+ "grad_norm": 1.3853482007980347,
+ "learning_rate": 1.7177242888402626e-05,
+ "loss": 0.494,
+ "step": 700
+ },
+ {
+ "epoch": 4.431314623338257,
+ "grad_norm": 1.1378982067108154,
+ "learning_rate": 1.4442013129102846e-05,
+ "loss": 0.4853,
+ "step": 750
+ },
+ {
+ "epoch": 4.726735598227474,
+ "grad_norm": 1.11444890499115,
+ "learning_rate": 1.1706783369803063e-05,
+ "loss": 0.4797,
+ "step": 800
+ },
+ {
+ "epoch": 5.022156573116692,
+ "grad_norm": 1.133715033531189,
+ "learning_rate": 8.971553610503283e-06,
+ "loss": 0.4822,
+ "step": 850
+ },
+ {
+ "epoch": 5.317577548005908,
+ "grad_norm": 1.230751633644104,
+ "learning_rate": 6.2363238512035015e-06,
+ "loss": 0.475,
+ "step": 900
+ },
+ {
+ "epoch": 5.612998522895126,
+ "grad_norm": 1.201663613319397,
+ "learning_rate": 3.50109409190372e-06,
+ "loss": 0.4729,
+ "step": 950
+ },
+ {
+ "epoch": 5.908419497784343,
+ "grad_norm": 1.250101089477539,
+ "learning_rate": 7.658643326039388e-07,
+ "loss": 0.4689,
+ "step": 1000
+ }
+ ],
+ "logging_steps": 50,
+ "max_steps": 1014,
+ "num_input_tokens_seen": 0,
+ "num_train_epochs": 6,
+ "save_steps": 500,
+ "stateful_callbacks": {
+ "TrainerControl": {
+ "args": {
+ "should_epoch_stop": false,
+ "should_evaluate": false,
+ "should_log": false,
+ "should_save": true,
+ "should_training_stop": true
+ },
+ "attributes": {}
+ }
+ },
+ "total_flos": 1.6016891541848064e+17,
+ "train_batch_size": 2,
+ "trial_name": null,
+ "trial_params": null
+}
diff --git a/checkpoint-1014/training_args.bin b/checkpoint-1014/training_args.bin
new file mode 100644
index 0000000000000000000000000000000000000000..3d1f2c67181e3caa74f4776a0508fef4a135f353
--- /dev/null
+++ b/checkpoint-1014/training_args.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:872c4fe8114f54151594e6613a66fece0ce49b15bbe3e4b8349db079199d124b
+size 5240
diff --git a/checkpoint-169/README.md b/checkpoint-169/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..e47b03ac2b3ec63bb9b693d5ea09a59bed58eec6
--- /dev/null
+++ b/checkpoint-169/README.md
@@ -0,0 +1,202 @@
+---
+base_model: meta-llama/Llama-2-13b-chat-hf
+library_name: peft
+---
+
+# Model Card for Model ID
+
+
+
+
+
+## Model Details
+
+### Model Description
+
+
+
+
+
+- **Developed by:** [More Information Needed]
+- **Funded by [optional]:** [More Information Needed]
+- **Shared by [optional]:** [More Information Needed]
+- **Model type:** [More Information Needed]
+- **Language(s) (NLP):** [More Information Needed]
+- **License:** [More Information Needed]
+- **Finetuned from model [optional]:** [More Information Needed]
+
+### Model Sources [optional]
+
+
+
+- **Repository:** [More Information Needed]
+- **Paper [optional]:** [More Information Needed]
+- **Demo [optional]:** [More Information Needed]
+
+## Uses
+
+
+
+### Direct Use
+
+
+
+[More Information Needed]
+
+### Downstream Use [optional]
+
+
+
+[More Information Needed]
+
+### Out-of-Scope Use
+
+
+
+[More Information Needed]
+
+## Bias, Risks, and Limitations
+
+
+
+[More Information Needed]
+
+### Recommendations
+
+
+
+Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
+
+## How to Get Started with the Model
+
+Use the code below to get started with the model.
+
+[More Information Needed]
+
+## Training Details
+
+### Training Data
+
+
+
+[More Information Needed]
+
+### Training Procedure
+
+
+
+#### Preprocessing [optional]
+
+[More Information Needed]
+
+
+#### Training Hyperparameters
+
+- **Training regime:** [More Information Needed]
+
+#### Speeds, Sizes, Times [optional]
+
+
+
+[More Information Needed]
+
+## Evaluation
+
+
+
+### Testing Data, Factors & Metrics
+
+#### Testing Data
+
+
+
+[More Information Needed]
+
+#### Factors
+
+
+
+[More Information Needed]
+
+#### Metrics
+
+
+
+[More Information Needed]
+
+### Results
+
+[More Information Needed]
+
+#### Summary
+
+
+
+## Model Examination [optional]
+
+
+
+[More Information Needed]
+
+## Environmental Impact
+
+
+
+Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
+
+- **Hardware Type:** [More Information Needed]
+- **Hours used:** [More Information Needed]
+- **Cloud Provider:** [More Information Needed]
+- **Compute Region:** [More Information Needed]
+- **Carbon Emitted:** [More Information Needed]
+
+## Technical Specifications [optional]
+
+### Model Architecture and Objective
+
+[More Information Needed]
+
+### Compute Infrastructure
+
+[More Information Needed]
+
+#### Hardware
+
+[More Information Needed]
+
+#### Software
+
+[More Information Needed]
+
+## Citation [optional]
+
+
+
+**BibTeX:**
+
+[More Information Needed]
+
+**APA:**
+
+[More Information Needed]
+
+## Glossary [optional]
+
+
+
+[More Information Needed]
+
+## More Information [optional]
+
+[More Information Needed]
+
+## Model Card Authors [optional]
+
+[More Information Needed]
+
+## Model Card Contact
+
+[More Information Needed]
+### Framework versions
+
+- PEFT 0.13.2
\ No newline at end of file
diff --git a/checkpoint-169/adapter_config.json b/checkpoint-169/adapter_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..adaaeb24374ba5c7059503a0ebd5378b39206f06
--- /dev/null
+++ b/checkpoint-169/adapter_config.json
@@ -0,0 +1,29 @@
+{
+ "alpha_pattern": {},
+ "auto_mapping": null,
+ "base_model_name_or_path": "meta-llama/Llama-2-13b-chat-hf",
+ "bias": "none",
+ "fan_in_fan_out": false,
+ "inference_mode": true,
+ "init_lora_weights": true,
+ "layer_replication": null,
+ "layers_pattern": null,
+ "layers_to_transform": null,
+ "loftq_config": {},
+ "lora_alpha": 32,
+ "lora_dropout": 0.05,
+ "megatron_config": null,
+ "megatron_core": "megatron.core",
+ "modules_to_save": null,
+ "peft_type": "LORA",
+ "r": 8,
+ "rank_pattern": {},
+ "revision": null,
+ "target_modules": [
+ "q_proj",
+ "v_proj"
+ ],
+ "task_type": "CAUSAL_LM",
+ "use_dora": false,
+ "use_rslora": false
+}
\ No newline at end of file
diff --git a/checkpoint-169/adapter_model.safetensors b/checkpoint-169/adapter_model.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..9d8cb28f8ec6354607b5d1e247d335f3d155173b
--- /dev/null
+++ b/checkpoint-169/adapter_model.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:4442ce7239959c5cf44de288137afb3b22e858a2ca8490cf27cb05f81d24430b
+size 26235704
diff --git a/checkpoint-169/optimizer.pt b/checkpoint-169/optimizer.pt
new file mode 100644
index 0000000000000000000000000000000000000000..27514b619c8218fa0f654481fad5d623774a3974
--- /dev/null
+++ b/checkpoint-169/optimizer.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:5ae42fdd02ee22b2ff706df2281ac0555d2bf45388a9416c1b10bb11b50b7ef5
+size 52563258
diff --git a/checkpoint-169/rng_state.pth b/checkpoint-169/rng_state.pth
new file mode 100644
index 0000000000000000000000000000000000000000..8c3d7eaea723ec61d2e0b66cbd197d16544f0d37
--- /dev/null
+++ b/checkpoint-169/rng_state.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:671cb42ff173690da931c0dc9e5393050001d4311b9fa17e7f4b9742fc264993
+size 14244
diff --git a/checkpoint-169/scheduler.pt b/checkpoint-169/scheduler.pt
new file mode 100644
index 0000000000000000000000000000000000000000..ef22707cae3a3afe61900df410ff4cfb7caab5cf
--- /dev/null
+++ b/checkpoint-169/scheduler.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a6b9a90250d286d551e591117be4e8a759da87ed937533dfac8a7df81db37c5a
+size 1064
diff --git a/checkpoint-169/trainer_state.json b/checkpoint-169/trainer_state.json
new file mode 100644
index 0000000000000000000000000000000000000000..78f1dd8a4a982d2220818f4209c0cbdfd823979b
--- /dev/null
+++ b/checkpoint-169/trainer_state.json
@@ -0,0 +1,54 @@
+{
+ "best_metric": null,
+ "best_model_checkpoint": null,
+ "epoch": 0.9985228951255539,
+ "eval_steps": 500,
+ "global_step": 169,
+ "is_hyper_param_search": false,
+ "is_local_process_zero": true,
+ "is_world_process_zero": true,
+ "log_history": [
+ {
+ "epoch": 0.29542097488921715,
+ "grad_norm": 1.8936359882354736,
+ "learning_rate": 2.5e-05,
+ "loss": 2.2777,
+ "step": 50
+ },
+ {
+ "epoch": 0.5908419497784343,
+ "grad_norm": 0.42867106199264526,
+ "learning_rate": 5e-05,
+ "loss": 0.9288,
+ "step": 100
+ },
+ {
+ "epoch": 0.8862629246676514,
+ "grad_norm": 0.5983214378356934,
+ "learning_rate": 4.726477024070022e-05,
+ "loss": 0.6469,
+ "step": 150
+ }
+ ],
+ "logging_steps": 50,
+ "max_steps": 1014,
+ "num_input_tokens_seen": 0,
+ "num_train_epochs": 6,
+ "save_steps": 500,
+ "stateful_callbacks": {
+ "TrainerControl": {
+ "args": {
+ "should_epoch_stop": false,
+ "should_evaluate": false,
+ "should_log": false,
+ "should_save": true,
+ "should_training_stop": false
+ },
+ "attributes": {}
+ }
+ },
+ "total_flos": 2.673267488980992e+16,
+ "train_batch_size": 2,
+ "trial_name": null,
+ "trial_params": null
+}
diff --git a/checkpoint-169/training_args.bin b/checkpoint-169/training_args.bin
new file mode 100644
index 0000000000000000000000000000000000000000..3d1f2c67181e3caa74f4776a0508fef4a135f353
--- /dev/null
+++ b/checkpoint-169/training_args.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:872c4fe8114f54151594e6613a66fece0ce49b15bbe3e4b8349db079199d124b
+size 5240
diff --git a/checkpoint-338/README.md b/checkpoint-338/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..e47b03ac2b3ec63bb9b693d5ea09a59bed58eec6
--- /dev/null
+++ b/checkpoint-338/README.md
@@ -0,0 +1,202 @@
+---
+base_model: meta-llama/Llama-2-13b-chat-hf
+library_name: peft
+---
+
+# Model Card for Model ID
+
+
+
+
+
+## Model Details
+
+### Model Description
+
+
+
+
+
+- **Developed by:** [More Information Needed]
+- **Funded by [optional]:** [More Information Needed]
+- **Shared by [optional]:** [More Information Needed]
+- **Model type:** [More Information Needed]
+- **Language(s) (NLP):** [More Information Needed]
+- **License:** [More Information Needed]
+- **Finetuned from model [optional]:** [More Information Needed]
+
+### Model Sources [optional]
+
+
+
+- **Repository:** [More Information Needed]
+- **Paper [optional]:** [More Information Needed]
+- **Demo [optional]:** [More Information Needed]
+
+## Uses
+
+
+
+### Direct Use
+
+
+
+[More Information Needed]
+
+### Downstream Use [optional]
+
+
+
+[More Information Needed]
+
+### Out-of-Scope Use
+
+
+
+[More Information Needed]
+
+## Bias, Risks, and Limitations
+
+
+
+[More Information Needed]
+
+### Recommendations
+
+
+
+Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
+
+## How to Get Started with the Model
+
+Use the code below to get started with the model.
+
+[More Information Needed]
+
+## Training Details
+
+### Training Data
+
+
+
+[More Information Needed]
+
+### Training Procedure
+
+
+
+#### Preprocessing [optional]
+
+[More Information Needed]
+
+
+#### Training Hyperparameters
+
+- **Training regime:** [More Information Needed]
+
+#### Speeds, Sizes, Times [optional]
+
+
+
+[More Information Needed]
+
+## Evaluation
+
+
+
+### Testing Data, Factors & Metrics
+
+#### Testing Data
+
+
+
+[More Information Needed]
+
+#### Factors
+
+
+
+[More Information Needed]
+
+#### Metrics
+
+
+
+[More Information Needed]
+
+### Results
+
+[More Information Needed]
+
+#### Summary
+
+
+
+## Model Examination [optional]
+
+
+
+[More Information Needed]
+
+## Environmental Impact
+
+
+
+Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
+
+- **Hardware Type:** [More Information Needed]
+- **Hours used:** [More Information Needed]
+- **Cloud Provider:** [More Information Needed]
+- **Compute Region:** [More Information Needed]
+- **Carbon Emitted:** [More Information Needed]
+
+## Technical Specifications [optional]
+
+### Model Architecture and Objective
+
+[More Information Needed]
+
+### Compute Infrastructure
+
+[More Information Needed]
+
+#### Hardware
+
+[More Information Needed]
+
+#### Software
+
+[More Information Needed]
+
+## Citation [optional]
+
+
+
+**BibTeX:**
+
+[More Information Needed]
+
+**APA:**
+
+[More Information Needed]
+
+## Glossary [optional]
+
+
+
+[More Information Needed]
+
+## More Information [optional]
+
+[More Information Needed]
+
+## Model Card Authors [optional]
+
+[More Information Needed]
+
+## Model Card Contact
+
+[More Information Needed]
+### Framework versions
+
+- PEFT 0.13.2
\ No newline at end of file
diff --git a/checkpoint-338/adapter_config.json b/checkpoint-338/adapter_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..adaaeb24374ba5c7059503a0ebd5378b39206f06
--- /dev/null
+++ b/checkpoint-338/adapter_config.json
@@ -0,0 +1,29 @@
+{
+ "alpha_pattern": {},
+ "auto_mapping": null,
+ "base_model_name_or_path": "meta-llama/Llama-2-13b-chat-hf",
+ "bias": "none",
+ "fan_in_fan_out": false,
+ "inference_mode": true,
+ "init_lora_weights": true,
+ "layer_replication": null,
+ "layers_pattern": null,
+ "layers_to_transform": null,
+ "loftq_config": {},
+ "lora_alpha": 32,
+ "lora_dropout": 0.05,
+ "megatron_config": null,
+ "megatron_core": "megatron.core",
+ "modules_to_save": null,
+ "peft_type": "LORA",
+ "r": 8,
+ "rank_pattern": {},
+ "revision": null,
+ "target_modules": [
+ "q_proj",
+ "v_proj"
+ ],
+ "task_type": "CAUSAL_LM",
+ "use_dora": false,
+ "use_rslora": false
+}
\ No newline at end of file
diff --git a/checkpoint-338/adapter_model.safetensors b/checkpoint-338/adapter_model.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..d099aaeaf979fd21e1635a5118517b48a60962bb
--- /dev/null
+++ b/checkpoint-338/adapter_model.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c86dd9d6ba6f8d9da217d7e361785e3cef1fb1f287d6b1ebcda4a722919c1dd2
+size 26235704
diff --git a/checkpoint-338/optimizer.pt b/checkpoint-338/optimizer.pt
new file mode 100644
index 0000000000000000000000000000000000000000..0420130e940e65999060715868f53f81a990576a
--- /dev/null
+++ b/checkpoint-338/optimizer.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:77f727734afcb878c0ffd387c20c154790abde5abeab450350c41c16fd522ba0
+size 52563258
diff --git a/checkpoint-338/rng_state.pth b/checkpoint-338/rng_state.pth
new file mode 100644
index 0000000000000000000000000000000000000000..28a15ce2f3f6b96a48280686bd9a851a0d33ae3e
--- /dev/null
+++ b/checkpoint-338/rng_state.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:4a407ae22d91865b0c7b2f37be1ef15d17cbdabfdef79af88be1868bd0b57a9e
+size 14244
diff --git a/checkpoint-338/scheduler.pt b/checkpoint-338/scheduler.pt
new file mode 100644
index 0000000000000000000000000000000000000000..af352f337b969ed7e51b47fd8a09e0b7bbd50dab
--- /dev/null
+++ b/checkpoint-338/scheduler.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:0e843bba70c519c5360d713ee493af48cef233f40f1d4fe81285921c728ccf34
+size 1064
diff --git a/checkpoint-338/trainer_state.json b/checkpoint-338/trainer_state.json
new file mode 100644
index 0000000000000000000000000000000000000000..4e113f96abcdc7bdb9f362e4cc94000a6a3ccdfe
--- /dev/null
+++ b/checkpoint-338/trainer_state.json
@@ -0,0 +1,75 @@
+{
+ "best_metric": null,
+ "best_model_checkpoint": null,
+ "epoch": 1.9970457902511078,
+ "eval_steps": 500,
+ "global_step": 338,
+ "is_hyper_param_search": false,
+ "is_local_process_zero": true,
+ "is_world_process_zero": true,
+ "log_history": [
+ {
+ "epoch": 0.29542097488921715,
+ "grad_norm": 1.8936359882354736,
+ "learning_rate": 2.5e-05,
+ "loss": 2.2777,
+ "step": 50
+ },
+ {
+ "epoch": 0.5908419497784343,
+ "grad_norm": 0.42867106199264526,
+ "learning_rate": 5e-05,
+ "loss": 0.9288,
+ "step": 100
+ },
+ {
+ "epoch": 0.8862629246676514,
+ "grad_norm": 0.5983214378356934,
+ "learning_rate": 4.726477024070022e-05,
+ "loss": 0.6469,
+ "step": 150
+ },
+ {
+ "epoch": 1.1816838995568686,
+ "grad_norm": 0.5473693013191223,
+ "learning_rate": 4.452954048140044e-05,
+ "loss": 0.5595,
+ "step": 200
+ },
+ {
+ "epoch": 1.4771048744460857,
+ "grad_norm": 0.5971556901931763,
+ "learning_rate": 4.179431072210066e-05,
+ "loss": 0.537,
+ "step": 250
+ },
+ {
+ "epoch": 1.7725258493353029,
+ "grad_norm": 0.6025377511978149,
+ "learning_rate": 3.9059080962800876e-05,
+ "loss": 0.5271,
+ "step": 300
+ }
+ ],
+ "logging_steps": 50,
+ "max_steps": 1014,
+ "num_input_tokens_seen": 0,
+ "num_train_epochs": 6,
+ "save_steps": 500,
+ "stateful_callbacks": {
+ "TrainerControl": {
+ "args": {
+ "should_epoch_stop": false,
+ "should_evaluate": false,
+ "should_log": false,
+ "should_save": true,
+ "should_training_stop": false
+ },
+ "attributes": {}
+ }
+ },
+ "total_flos": 5.346534977961984e+16,
+ "train_batch_size": 2,
+ "trial_name": null,
+ "trial_params": null
+}
diff --git a/checkpoint-338/training_args.bin b/checkpoint-338/training_args.bin
new file mode 100644
index 0000000000000000000000000000000000000000..3d1f2c67181e3caa74f4776a0508fef4a135f353
--- /dev/null
+++ b/checkpoint-338/training_args.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:872c4fe8114f54151594e6613a66fece0ce49b15bbe3e4b8349db079199d124b
+size 5240
diff --git a/checkpoint-507/README.md b/checkpoint-507/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..e47b03ac2b3ec63bb9b693d5ea09a59bed58eec6
--- /dev/null
+++ b/checkpoint-507/README.md
@@ -0,0 +1,202 @@
+---
+base_model: meta-llama/Llama-2-13b-chat-hf
+library_name: peft
+---
+
+# Model Card for Model ID
+
+
+
+
+
+## Model Details
+
+### Model Description
+
+
+
+
+
+- **Developed by:** [More Information Needed]
+- **Funded by [optional]:** [More Information Needed]
+- **Shared by [optional]:** [More Information Needed]
+- **Model type:** [More Information Needed]
+- **Language(s) (NLP):** [More Information Needed]
+- **License:** [More Information Needed]
+- **Finetuned from model [optional]:** [More Information Needed]
+
+### Model Sources [optional]
+
+
+
+- **Repository:** [More Information Needed]
+- **Paper [optional]:** [More Information Needed]
+- **Demo [optional]:** [More Information Needed]
+
+## Uses
+
+
+
+### Direct Use
+
+
+
+[More Information Needed]
+
+### Downstream Use [optional]
+
+
+
+[More Information Needed]
+
+### Out-of-Scope Use
+
+
+
+[More Information Needed]
+
+## Bias, Risks, and Limitations
+
+
+
+[More Information Needed]
+
+### Recommendations
+
+
+
+Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
+
+## How to Get Started with the Model
+
+Use the code below to get started with the model.
+
+[More Information Needed]
+
+## Training Details
+
+### Training Data
+
+
+
+[More Information Needed]
+
+### Training Procedure
+
+
+
+#### Preprocessing [optional]
+
+[More Information Needed]
+
+
+#### Training Hyperparameters
+
+- **Training regime:** [More Information Needed]
+
+#### Speeds, Sizes, Times [optional]
+
+
+
+[More Information Needed]
+
+## Evaluation
+
+
+
+### Testing Data, Factors & Metrics
+
+#### Testing Data
+
+
+
+[More Information Needed]
+
+#### Factors
+
+
+
+[More Information Needed]
+
+#### Metrics
+
+
+
+[More Information Needed]
+
+### Results
+
+[More Information Needed]
+
+#### Summary
+
+
+
+## Model Examination [optional]
+
+
+
+[More Information Needed]
+
+## Environmental Impact
+
+
+
+Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
+
+- **Hardware Type:** [More Information Needed]
+- **Hours used:** [More Information Needed]
+- **Cloud Provider:** [More Information Needed]
+- **Compute Region:** [More Information Needed]
+- **Carbon Emitted:** [More Information Needed]
+
+## Technical Specifications [optional]
+
+### Model Architecture and Objective
+
+[More Information Needed]
+
+### Compute Infrastructure
+
+[More Information Needed]
+
+#### Hardware
+
+[More Information Needed]
+
+#### Software
+
+[More Information Needed]
+
+## Citation [optional]
+
+
+
+**BibTeX:**
+
+[More Information Needed]
+
+**APA:**
+
+[More Information Needed]
+
+## Glossary [optional]
+
+
+
+[More Information Needed]
+
+## More Information [optional]
+
+[More Information Needed]
+
+## Model Card Authors [optional]
+
+[More Information Needed]
+
+## Model Card Contact
+
+[More Information Needed]
+### Framework versions
+
+- PEFT 0.13.2
\ No newline at end of file
diff --git a/checkpoint-507/adapter_config.json b/checkpoint-507/adapter_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..adaaeb24374ba5c7059503a0ebd5378b39206f06
--- /dev/null
+++ b/checkpoint-507/adapter_config.json
@@ -0,0 +1,29 @@
+{
+ "alpha_pattern": {},
+ "auto_mapping": null,
+ "base_model_name_or_path": "meta-llama/Llama-2-13b-chat-hf",
+ "bias": "none",
+ "fan_in_fan_out": false,
+ "inference_mode": true,
+ "init_lora_weights": true,
+ "layer_replication": null,
+ "layers_pattern": null,
+ "layers_to_transform": null,
+ "loftq_config": {},
+ "lora_alpha": 32,
+ "lora_dropout": 0.05,
+ "megatron_config": null,
+ "megatron_core": "megatron.core",
+ "modules_to_save": null,
+ "peft_type": "LORA",
+ "r": 8,
+ "rank_pattern": {},
+ "revision": null,
+ "target_modules": [
+ "q_proj",
+ "v_proj"
+ ],
+ "task_type": "CAUSAL_LM",
+ "use_dora": false,
+ "use_rslora": false
+}
\ No newline at end of file
diff --git a/checkpoint-507/adapter_model.safetensors b/checkpoint-507/adapter_model.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..3fcb3918c2f33013c31a6d64b943de01d80f08bc
--- /dev/null
+++ b/checkpoint-507/adapter_model.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:105b1809d373e480a43a8c836cac57b1048c9cf8ec42b840066043ac701947e0
+size 26235704
diff --git a/checkpoint-507/optimizer.pt b/checkpoint-507/optimizer.pt
new file mode 100644
index 0000000000000000000000000000000000000000..21a2a0330d55d5d3ae0eb83fc81ee82388d88dff
--- /dev/null
+++ b/checkpoint-507/optimizer.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b36b369fd0c82332dbb174bf83d477b286ee9c38c440dd2494fcb0be52dae8f0
+size 52563258
diff --git a/checkpoint-507/rng_state.pth b/checkpoint-507/rng_state.pth
new file mode 100644
index 0000000000000000000000000000000000000000..518eebd4ed0e965d0147b7f42843fa71967f44ac
--- /dev/null
+++ b/checkpoint-507/rng_state.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:5e835266e3a87be80aca307cad0a349799b85090ac5a34a85e773d1c8dc2560a
+size 14244
diff --git a/checkpoint-507/scheduler.pt b/checkpoint-507/scheduler.pt
new file mode 100644
index 0000000000000000000000000000000000000000..429c00f61c940436c48d3c586a7762becdfc310c
--- /dev/null
+++ b/checkpoint-507/scheduler.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e3fd4253769ea0aeaa0685369d70bcfb875725f58354f5399b1fa66d41b3ed04
+size 1064
diff --git a/checkpoint-507/trainer_state.json b/checkpoint-507/trainer_state.json
new file mode 100644
index 0000000000000000000000000000000000000000..6b075373c5a73525a1c484ca45ef290444b043e8
--- /dev/null
+++ b/checkpoint-507/trainer_state.json
@@ -0,0 +1,103 @@
+{
+ "best_metric": null,
+ "best_model_checkpoint": null,
+ "epoch": 2.9955686853766617,
+ "eval_steps": 500,
+ "global_step": 507,
+ "is_hyper_param_search": false,
+ "is_local_process_zero": true,
+ "is_world_process_zero": true,
+ "log_history": [
+ {
+ "epoch": 0.29542097488921715,
+ "grad_norm": 1.8936359882354736,
+ "learning_rate": 2.5e-05,
+ "loss": 2.2777,
+ "step": 50
+ },
+ {
+ "epoch": 0.5908419497784343,
+ "grad_norm": 0.42867106199264526,
+ "learning_rate": 5e-05,
+ "loss": 0.9288,
+ "step": 100
+ },
+ {
+ "epoch": 0.8862629246676514,
+ "grad_norm": 0.5983214378356934,
+ "learning_rate": 4.726477024070022e-05,
+ "loss": 0.6469,
+ "step": 150
+ },
+ {
+ "epoch": 1.1816838995568686,
+ "grad_norm": 0.5473693013191223,
+ "learning_rate": 4.452954048140044e-05,
+ "loss": 0.5595,
+ "step": 200
+ },
+ {
+ "epoch": 1.4771048744460857,
+ "grad_norm": 0.5971556901931763,
+ "learning_rate": 4.179431072210066e-05,
+ "loss": 0.537,
+ "step": 250
+ },
+ {
+ "epoch": 1.7725258493353029,
+ "grad_norm": 0.6025377511978149,
+ "learning_rate": 3.9059080962800876e-05,
+ "loss": 0.5271,
+ "step": 300
+ },
+ {
+ "epoch": 2.06794682422452,
+ "grad_norm": 0.9687663912773132,
+ "learning_rate": 3.6323851203501094e-05,
+ "loss": 0.5318,
+ "step": 350
+ },
+ {
+ "epoch": 2.363367799113737,
+ "grad_norm": 0.673847496509552,
+ "learning_rate": 3.358862144420131e-05,
+ "loss": 0.5116,
+ "step": 400
+ },
+ {
+ "epoch": 2.658788774002954,
+ "grad_norm": 0.7841825485229492,
+ "learning_rate": 3.085339168490153e-05,
+ "loss": 0.5114,
+ "step": 450
+ },
+ {
+ "epoch": 2.9542097488921715,
+ "grad_norm": 0.8053774237632751,
+ "learning_rate": 2.811816192560175e-05,
+ "loss": 0.5049,
+ "step": 500
+ }
+ ],
+ "logging_steps": 50,
+ "max_steps": 1014,
+ "num_input_tokens_seen": 0,
+ "num_train_epochs": 6,
+ "save_steps": 500,
+ "stateful_callbacks": {
+ "TrainerControl": {
+ "args": {
+ "should_epoch_stop": false,
+ "should_evaluate": false,
+ "should_log": false,
+ "should_save": true,
+ "should_training_stop": false
+ },
+ "attributes": {}
+ }
+ },
+ "total_flos": 8.019802466942976e+16,
+ "train_batch_size": 2,
+ "trial_name": null,
+ "trial_params": null
+}
diff --git a/checkpoint-507/training_args.bin b/checkpoint-507/training_args.bin
new file mode 100644
index 0000000000000000000000000000000000000000..3d1f2c67181e3caa74f4776a0508fef4a135f353
--- /dev/null
+++ b/checkpoint-507/training_args.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:872c4fe8114f54151594e6613a66fece0ce49b15bbe3e4b8349db079199d124b
+size 5240
diff --git a/checkpoint-677/README.md b/checkpoint-677/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..e47b03ac2b3ec63bb9b693d5ea09a59bed58eec6
--- /dev/null
+++ b/checkpoint-677/README.md
@@ -0,0 +1,202 @@
+---
+base_model: meta-llama/Llama-2-13b-chat-hf
+library_name: peft
+---
+
+# Model Card for Model ID
+
+
+
+
+
+## Model Details
+
+### Model Description
+
+
+
+
+
+- **Developed by:** [More Information Needed]
+- **Funded by [optional]:** [More Information Needed]
+- **Shared by [optional]:** [More Information Needed]
+- **Model type:** [More Information Needed]
+- **Language(s) (NLP):** [More Information Needed]
+- **License:** [More Information Needed]
+- **Finetuned from model [optional]:** [More Information Needed]
+
+### Model Sources [optional]
+
+
+
+- **Repository:** [More Information Needed]
+- **Paper [optional]:** [More Information Needed]
+- **Demo [optional]:** [More Information Needed]
+
+## Uses
+
+
+
+### Direct Use
+
+
+
+[More Information Needed]
+
+### Downstream Use [optional]
+
+
+
+[More Information Needed]
+
+### Out-of-Scope Use
+
+
+
+[More Information Needed]
+
+## Bias, Risks, and Limitations
+
+
+
+[More Information Needed]
+
+### Recommendations
+
+
+
+Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
+
+## How to Get Started with the Model
+
+Use the code below to get started with the model.
+
+[More Information Needed]
+
+## Training Details
+
+### Training Data
+
+
+
+[More Information Needed]
+
+### Training Procedure
+
+
+
+#### Preprocessing [optional]
+
+[More Information Needed]
+
+
+#### Training Hyperparameters
+
+- **Training regime:** [More Information Needed]
+
+#### Speeds, Sizes, Times [optional]
+
+
+
+[More Information Needed]
+
+## Evaluation
+
+
+
+### Testing Data, Factors & Metrics
+
+#### Testing Data
+
+
+
+[More Information Needed]
+
+#### Factors
+
+
+
+[More Information Needed]
+
+#### Metrics
+
+
+
+[More Information Needed]
+
+### Results
+
+[More Information Needed]
+
+#### Summary
+
+
+
+## Model Examination [optional]
+
+
+
+[More Information Needed]
+
+## Environmental Impact
+
+
+
+Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
+
+- **Hardware Type:** [More Information Needed]
+- **Hours used:** [More Information Needed]
+- **Cloud Provider:** [More Information Needed]
+- **Compute Region:** [More Information Needed]
+- **Carbon Emitted:** [More Information Needed]
+
+## Technical Specifications [optional]
+
+### Model Architecture and Objective
+
+[More Information Needed]
+
+### Compute Infrastructure
+
+[More Information Needed]
+
+#### Hardware
+
+[More Information Needed]
+
+#### Software
+
+[More Information Needed]
+
+## Citation [optional]
+
+
+
+**BibTeX:**
+
+[More Information Needed]
+
+**APA:**
+
+[More Information Needed]
+
+## Glossary [optional]
+
+
+
+[More Information Needed]
+
+## More Information [optional]
+
+[More Information Needed]
+
+## Model Card Authors [optional]
+
+[More Information Needed]
+
+## Model Card Contact
+
+[More Information Needed]
+### Framework versions
+
+- PEFT 0.13.2
\ No newline at end of file
diff --git a/checkpoint-677/adapter_config.json b/checkpoint-677/adapter_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..adaaeb24374ba5c7059503a0ebd5378b39206f06
--- /dev/null
+++ b/checkpoint-677/adapter_config.json
@@ -0,0 +1,29 @@
+{
+ "alpha_pattern": {},
+ "auto_mapping": null,
+ "base_model_name_or_path": "meta-llama/Llama-2-13b-chat-hf",
+ "bias": "none",
+ "fan_in_fan_out": false,
+ "inference_mode": true,
+ "init_lora_weights": true,
+ "layer_replication": null,
+ "layers_pattern": null,
+ "layers_to_transform": null,
+ "loftq_config": {},
+ "lora_alpha": 32,
+ "lora_dropout": 0.05,
+ "megatron_config": null,
+ "megatron_core": "megatron.core",
+ "modules_to_save": null,
+ "peft_type": "LORA",
+ "r": 8,
+ "rank_pattern": {},
+ "revision": null,
+ "target_modules": [
+ "q_proj",
+ "v_proj"
+ ],
+ "task_type": "CAUSAL_LM",
+ "use_dora": false,
+ "use_rslora": false
+}
\ No newline at end of file
diff --git a/checkpoint-677/adapter_model.safetensors b/checkpoint-677/adapter_model.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..972be45384dfd0c8218745b054db88c2c9de4aad
--- /dev/null
+++ b/checkpoint-677/adapter_model.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:89f884c44ff5bab7cf5f8a5b7ac9a97207dbcde03c976c18ebfdea64c1b4aa58
+size 26235704
diff --git a/checkpoint-677/optimizer.pt b/checkpoint-677/optimizer.pt
new file mode 100644
index 0000000000000000000000000000000000000000..f7f87ae6d6fc92f42eca0d6ad14c8d3bac94771a
--- /dev/null
+++ b/checkpoint-677/optimizer.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:dd5c10c0c1e581c3e4ae00273250b6185c4bd043d4b8c7114559095d0d564987
+size 52563258
diff --git a/checkpoint-677/rng_state.pth b/checkpoint-677/rng_state.pth
new file mode 100644
index 0000000000000000000000000000000000000000..346707572282eff6e90aeff25f77cd75d6fde795
--- /dev/null
+++ b/checkpoint-677/rng_state.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e6356d528bf3866558bd9dd722baf34c195d10152de27bd868852994ed582b79
+size 14244
diff --git a/checkpoint-677/scheduler.pt b/checkpoint-677/scheduler.pt
new file mode 100644
index 0000000000000000000000000000000000000000..708df19cb736accef75470dddd81e209a8dd2ee1
--- /dev/null
+++ b/checkpoint-677/scheduler.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ddaadb35f74d04ab3c980cafc198834d2c4f383d5a8137825aa635ea88bf0035
+size 1064
diff --git a/checkpoint-677/trainer_state.json b/checkpoint-677/trainer_state.json
new file mode 100644
index 0000000000000000000000000000000000000000..7f31fb6ef3a0af6f4aacb0347c2696814599686e
--- /dev/null
+++ b/checkpoint-677/trainer_state.json
@@ -0,0 +1,124 @@
+{
+ "best_metric": null,
+ "best_model_checkpoint": null,
+ "epoch": 4.0,
+ "eval_steps": 500,
+ "global_step": 677,
+ "is_hyper_param_search": false,
+ "is_local_process_zero": true,
+ "is_world_process_zero": true,
+ "log_history": [
+ {
+ "epoch": 0.29542097488921715,
+ "grad_norm": 1.8936359882354736,
+ "learning_rate": 2.5e-05,
+ "loss": 2.2777,
+ "step": 50
+ },
+ {
+ "epoch": 0.5908419497784343,
+ "grad_norm": 0.42867106199264526,
+ "learning_rate": 5e-05,
+ "loss": 0.9288,
+ "step": 100
+ },
+ {
+ "epoch": 0.8862629246676514,
+ "grad_norm": 0.5983214378356934,
+ "learning_rate": 4.726477024070022e-05,
+ "loss": 0.6469,
+ "step": 150
+ },
+ {
+ "epoch": 1.1816838995568686,
+ "grad_norm": 0.5473693013191223,
+ "learning_rate": 4.452954048140044e-05,
+ "loss": 0.5595,
+ "step": 200
+ },
+ {
+ "epoch": 1.4771048744460857,
+ "grad_norm": 0.5971556901931763,
+ "learning_rate": 4.179431072210066e-05,
+ "loss": 0.537,
+ "step": 250
+ },
+ {
+ "epoch": 1.7725258493353029,
+ "grad_norm": 0.6025377511978149,
+ "learning_rate": 3.9059080962800876e-05,
+ "loss": 0.5271,
+ "step": 300
+ },
+ {
+ "epoch": 2.06794682422452,
+ "grad_norm": 0.9687663912773132,
+ "learning_rate": 3.6323851203501094e-05,
+ "loss": 0.5318,
+ "step": 350
+ },
+ {
+ "epoch": 2.363367799113737,
+ "grad_norm": 0.673847496509552,
+ "learning_rate": 3.358862144420131e-05,
+ "loss": 0.5116,
+ "step": 400
+ },
+ {
+ "epoch": 2.658788774002954,
+ "grad_norm": 0.7841825485229492,
+ "learning_rate": 3.085339168490153e-05,
+ "loss": 0.5114,
+ "step": 450
+ },
+ {
+ "epoch": 2.9542097488921715,
+ "grad_norm": 0.8053774237632751,
+ "learning_rate": 2.811816192560175e-05,
+ "loss": 0.5049,
+ "step": 500
+ },
+ {
+ "epoch": 3.2496307237813884,
+ "grad_norm": 0.8071188926696777,
+ "learning_rate": 2.538293216630197e-05,
+ "loss": 0.5045,
+ "step": 550
+ },
+ {
+ "epoch": 3.5450516986706058,
+ "grad_norm": 0.8850335478782654,
+ "learning_rate": 2.264770240700219e-05,
+ "loss": 0.4958,
+ "step": 600
+ },
+ {
+ "epoch": 3.8404726735598227,
+ "grad_norm": 1.001068115234375,
+ "learning_rate": 1.9912472647702408e-05,
+ "loss": 0.4927,
+ "step": 650
+ }
+ ],
+ "logging_steps": 50,
+ "max_steps": 1014,
+ "num_input_tokens_seen": 0,
+ "num_train_epochs": 6,
+ "save_steps": 500,
+ "stateful_callbacks": {
+ "TrainerControl": {
+ "args": {
+ "should_epoch_stop": false,
+ "should_evaluate": false,
+ "should_log": false,
+ "should_save": true,
+ "should_training_stop": false
+ },
+ "attributes": {}
+ }
+ },
+ "total_flos": 1.0693069955923968e+17,
+ "train_batch_size": 2,
+ "trial_name": null,
+ "trial_params": null
+}
diff --git a/checkpoint-677/training_args.bin b/checkpoint-677/training_args.bin
new file mode 100644
index 0000000000000000000000000000000000000000..3d1f2c67181e3caa74f4776a0508fef4a135f353
--- /dev/null
+++ b/checkpoint-677/training_args.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:872c4fe8114f54151594e6613a66fece0ce49b15bbe3e4b8349db079199d124b
+size 5240
diff --git a/checkpoint-846/README.md b/checkpoint-846/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..e47b03ac2b3ec63bb9b693d5ea09a59bed58eec6
--- /dev/null
+++ b/checkpoint-846/README.md
@@ -0,0 +1,202 @@
+---
+base_model: meta-llama/Llama-2-13b-chat-hf
+library_name: peft
+---
+
+# Model Card for Model ID
+
+
+
+
+
+## Model Details
+
+### Model Description
+
+
+
+
+
+- **Developed by:** [More Information Needed]
+- **Funded by [optional]:** [More Information Needed]
+- **Shared by [optional]:** [More Information Needed]
+- **Model type:** [More Information Needed]
+- **Language(s) (NLP):** [More Information Needed]
+- **License:** [More Information Needed]
+- **Finetuned from model [optional]:** [More Information Needed]
+
+### Model Sources [optional]
+
+
+
+- **Repository:** [More Information Needed]
+- **Paper [optional]:** [More Information Needed]
+- **Demo [optional]:** [More Information Needed]
+
+## Uses
+
+
+
+### Direct Use
+
+
+
+[More Information Needed]
+
+### Downstream Use [optional]
+
+
+
+[More Information Needed]
+
+### Out-of-Scope Use
+
+
+
+[More Information Needed]
+
+## Bias, Risks, and Limitations
+
+
+
+[More Information Needed]
+
+### Recommendations
+
+
+
+Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
+
+## How to Get Started with the Model
+
+Use the code below to get started with the model.
+
+[More Information Needed]
+
+## Training Details
+
+### Training Data
+
+
+
+[More Information Needed]
+
+### Training Procedure
+
+
+
+#### Preprocessing [optional]
+
+[More Information Needed]
+
+
+#### Training Hyperparameters
+
+- **Training regime:** [More Information Needed]
+
+#### Speeds, Sizes, Times [optional]
+
+
+
+[More Information Needed]
+
+## Evaluation
+
+
+
+### Testing Data, Factors & Metrics
+
+#### Testing Data
+
+
+
+[More Information Needed]
+
+#### Factors
+
+
+
+[More Information Needed]
+
+#### Metrics
+
+
+
+[More Information Needed]
+
+### Results
+
+[More Information Needed]
+
+#### Summary
+
+
+
+## Model Examination [optional]
+
+
+
+[More Information Needed]
+
+## Environmental Impact
+
+
+
+Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
+
+- **Hardware Type:** [More Information Needed]
+- **Hours used:** [More Information Needed]
+- **Cloud Provider:** [More Information Needed]
+- **Compute Region:** [More Information Needed]
+- **Carbon Emitted:** [More Information Needed]
+
+## Technical Specifications [optional]
+
+### Model Architecture and Objective
+
+[More Information Needed]
+
+### Compute Infrastructure
+
+[More Information Needed]
+
+#### Hardware
+
+[More Information Needed]
+
+#### Software
+
+[More Information Needed]
+
+## Citation [optional]
+
+
+
+**BibTeX:**
+
+[More Information Needed]
+
+**APA:**
+
+[More Information Needed]
+
+## Glossary [optional]
+
+
+
+[More Information Needed]
+
+## More Information [optional]
+
+[More Information Needed]
+
+## Model Card Authors [optional]
+
+[More Information Needed]
+
+## Model Card Contact
+
+[More Information Needed]
+### Framework versions
+
+- PEFT 0.13.2
\ No newline at end of file
diff --git a/checkpoint-846/adapter_config.json b/checkpoint-846/adapter_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..adaaeb24374ba5c7059503a0ebd5378b39206f06
--- /dev/null
+++ b/checkpoint-846/adapter_config.json
@@ -0,0 +1,29 @@
+{
+ "alpha_pattern": {},
+ "auto_mapping": null,
+ "base_model_name_or_path": "meta-llama/Llama-2-13b-chat-hf",
+ "bias": "none",
+ "fan_in_fan_out": false,
+ "inference_mode": true,
+ "init_lora_weights": true,
+ "layer_replication": null,
+ "layers_pattern": null,
+ "layers_to_transform": null,
+ "loftq_config": {},
+ "lora_alpha": 32,
+ "lora_dropout": 0.05,
+ "megatron_config": null,
+ "megatron_core": "megatron.core",
+ "modules_to_save": null,
+ "peft_type": "LORA",
+ "r": 8,
+ "rank_pattern": {},
+ "revision": null,
+ "target_modules": [
+ "q_proj",
+ "v_proj"
+ ],
+ "task_type": "CAUSAL_LM",
+ "use_dora": false,
+ "use_rslora": false
+}
\ No newline at end of file
diff --git a/checkpoint-846/adapter_model.safetensors b/checkpoint-846/adapter_model.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..a146c2ef159e50f704ec9b621969a2d4868b91da
--- /dev/null
+++ b/checkpoint-846/adapter_model.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:64645135aeab25cd34ad4ecd076c0ba22b6163a5b68aefec2ae5ed4f94330732
+size 26235704
diff --git a/checkpoint-846/optimizer.pt b/checkpoint-846/optimizer.pt
new file mode 100644
index 0000000000000000000000000000000000000000..f0ba7e3425bc77d7ec3a510bf8e1ac58f050f8c8
--- /dev/null
+++ b/checkpoint-846/optimizer.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:557aea934707478782a3d41e5d844fa67ee75ba69e5cdc12bf045baf8652adb4
+size 52563258
diff --git a/checkpoint-846/rng_state.pth b/checkpoint-846/rng_state.pth
new file mode 100644
index 0000000000000000000000000000000000000000..421edff8f71c55b5598ad5b7f1a12958255a7b35
--- /dev/null
+++ b/checkpoint-846/rng_state.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:20c2eb11d2b683c85ee0e7da692f4b5b279e4d46d161a2bd6b46f8b649eb6709
+size 14244
diff --git a/checkpoint-846/scheduler.pt b/checkpoint-846/scheduler.pt
new file mode 100644
index 0000000000000000000000000000000000000000..ea2ea53af1469458e15d348d539cda3f453ffea2
--- /dev/null
+++ b/checkpoint-846/scheduler.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ab8f16729e3a13f7c0b70a540f3fba6c55f3abffc4be248e85d11207b1e0846a
+size 1064
diff --git a/checkpoint-846/trainer_state.json b/checkpoint-846/trainer_state.json
new file mode 100644
index 0000000000000000000000000000000000000000..a3afcb4d8472a204d21a1a4ef4efa9955da0e2da
--- /dev/null
+++ b/checkpoint-846/trainer_state.json
@@ -0,0 +1,145 @@
+{
+ "best_metric": null,
+ "best_model_checkpoint": null,
+ "epoch": 4.998522895125554,
+ "eval_steps": 500,
+ "global_step": 846,
+ "is_hyper_param_search": false,
+ "is_local_process_zero": true,
+ "is_world_process_zero": true,
+ "log_history": [
+ {
+ "epoch": 0.29542097488921715,
+ "grad_norm": 1.8936359882354736,
+ "learning_rate": 2.5e-05,
+ "loss": 2.2777,
+ "step": 50
+ },
+ {
+ "epoch": 0.5908419497784343,
+ "grad_norm": 0.42867106199264526,
+ "learning_rate": 5e-05,
+ "loss": 0.9288,
+ "step": 100
+ },
+ {
+ "epoch": 0.8862629246676514,
+ "grad_norm": 0.5983214378356934,
+ "learning_rate": 4.726477024070022e-05,
+ "loss": 0.6469,
+ "step": 150
+ },
+ {
+ "epoch": 1.1816838995568686,
+ "grad_norm": 0.5473693013191223,
+ "learning_rate": 4.452954048140044e-05,
+ "loss": 0.5595,
+ "step": 200
+ },
+ {
+ "epoch": 1.4771048744460857,
+ "grad_norm": 0.5971556901931763,
+ "learning_rate": 4.179431072210066e-05,
+ "loss": 0.537,
+ "step": 250
+ },
+ {
+ "epoch": 1.7725258493353029,
+ "grad_norm": 0.6025377511978149,
+ "learning_rate": 3.9059080962800876e-05,
+ "loss": 0.5271,
+ "step": 300
+ },
+ {
+ "epoch": 2.06794682422452,
+ "grad_norm": 0.9687663912773132,
+ "learning_rate": 3.6323851203501094e-05,
+ "loss": 0.5318,
+ "step": 350
+ },
+ {
+ "epoch": 2.363367799113737,
+ "grad_norm": 0.673847496509552,
+ "learning_rate": 3.358862144420131e-05,
+ "loss": 0.5116,
+ "step": 400
+ },
+ {
+ "epoch": 2.658788774002954,
+ "grad_norm": 0.7841825485229492,
+ "learning_rate": 3.085339168490153e-05,
+ "loss": 0.5114,
+ "step": 450
+ },
+ {
+ "epoch": 2.9542097488921715,
+ "grad_norm": 0.8053774237632751,
+ "learning_rate": 2.811816192560175e-05,
+ "loss": 0.5049,
+ "step": 500
+ },
+ {
+ "epoch": 3.2496307237813884,
+ "grad_norm": 0.8071188926696777,
+ "learning_rate": 2.538293216630197e-05,
+ "loss": 0.5045,
+ "step": 550
+ },
+ {
+ "epoch": 3.5450516986706058,
+ "grad_norm": 0.8850335478782654,
+ "learning_rate": 2.264770240700219e-05,
+ "loss": 0.4958,
+ "step": 600
+ },
+ {
+ "epoch": 3.8404726735598227,
+ "grad_norm": 1.001068115234375,
+ "learning_rate": 1.9912472647702408e-05,
+ "loss": 0.4927,
+ "step": 650
+ },
+ {
+ "epoch": 4.13589364844904,
+ "grad_norm": 1.3853482007980347,
+ "learning_rate": 1.7177242888402626e-05,
+ "loss": 0.494,
+ "step": 700
+ },
+ {
+ "epoch": 4.431314623338257,
+ "grad_norm": 1.1378982067108154,
+ "learning_rate": 1.4442013129102846e-05,
+ "loss": 0.4853,
+ "step": 750
+ },
+ {
+ "epoch": 4.726735598227474,
+ "grad_norm": 1.11444890499115,
+ "learning_rate": 1.1706783369803063e-05,
+ "loss": 0.4797,
+ "step": 800
+ }
+ ],
+ "logging_steps": 50,
+ "max_steps": 1014,
+ "num_input_tokens_seen": 0,
+ "num_train_epochs": 6,
+ "save_steps": 500,
+ "stateful_callbacks": {
+ "TrainerControl": {
+ "args": {
+ "should_epoch_stop": false,
+ "should_evaluate": false,
+ "should_log": false,
+ "should_save": true,
+ "should_training_stop": false
+ },
+ "attributes": {}
+ }
+ },
+ "total_flos": 1.336633744490496e+17,
+ "train_batch_size": 2,
+ "trial_name": null,
+ "trial_params": null
+}
diff --git a/checkpoint-846/training_args.bin b/checkpoint-846/training_args.bin
new file mode 100644
index 0000000000000000000000000000000000000000..3d1f2c67181e3caa74f4776a0508fef4a135f353
--- /dev/null
+++ b/checkpoint-846/training_args.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:872c4fe8114f54151594e6613a66fece0ce49b15bbe3e4b8349db079199d124b
+size 5240
diff --git a/special_tokens_map.json b/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..72ecfeeb7e14d244c936169d2ed139eeae235ef1
--- /dev/null
+++ b/special_tokens_map.json
@@ -0,0 +1,24 @@
+{
+ "bos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": "",
+ "unk_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/tokenizer.model b/tokenizer.model
new file mode 100644
index 0000000000000000000000000000000000000000..6c00c742ce03c627d6cd5b795984876fa49fa899
--- /dev/null
+++ b/tokenizer.model
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
+size 499723
diff --git a/tokenizer_config.json b/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..80c6aa4e7dd57e0e78805f1ddec9b62f2132fa5d
--- /dev/null
+++ b/tokenizer_config.json
@@ -0,0 +1,44 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": true,
+ "added_tokens_decoder": {
+ "0": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "1": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "2": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ }
+ },
+ "bos_token": "",
+ "chat_template": "{% if messages[0]['role'] == 'system' %}{% set loop_messages = messages[1:] %}{% set system_message = messages[0]['content'] %}{% else %}{% set loop_messages = messages %}{% set system_message = false %}{% endif %}{% for message in loop_messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if loop.index0 == 0 and system_message != false %}{% set content = '<>\\n' + system_message + '\\n<>\\n\\n' + message['content'] %}{% else %}{% set content = message['content'] %}{% endif %}{% if message['role'] == 'user' %}{{ bos_token + '[INST] ' + content.strip() + ' [/INST]' }}{% elif message['role'] == 'assistant' %}{{ ' ' + content.strip() + ' ' + eos_token }}{% endif %}{% endfor %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "",
+ "legacy": false,
+ "model_max_length": 1000000000000000019884624838656,
+ "pad_token": "",
+ "padding_side": "right",
+ "sp_model_kwargs": {},
+ "spaces_between_special_tokens": false,
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": "",
+ "use_default_system_prompt": false
+}