Update README.md
Browse files
README.md
CHANGED
@@ -12,7 +12,7 @@ license: llama3
|
|
12 |
import torch
|
13 |
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
|
14 |
|
15 |
-
BASE_MODEL = "
|
16 |
|
17 |
model = AutoModelForCausalLM.from_pretrained(BASE_MODEL,
|
18 |
torch_dtype=torch.bfloat16,
|
@@ -72,7 +72,7 @@ print(outputs[0]['generated_text'][len(prompt):])
|
|
72 |
from vllm import LLM, SamplingParams
|
73 |
from transformers import AutoTokenizer, pipeline
|
74 |
|
75 |
-
BASE_MODEL = "sh2orc/llama-3-korean-8b
|
76 |
|
77 |
llm = LLM(model=BASE_MODEL)
|
78 |
|
|
|
12 |
import torch
|
13 |
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
|
14 |
|
15 |
+
BASE_MODEL = "sh2orc/llama-3-korean-8b"
|
16 |
|
17 |
model = AutoModelForCausalLM.from_pretrained(BASE_MODEL,
|
18 |
torch_dtype=torch.bfloat16,
|
|
|
72 |
from vllm import LLM, SamplingParams
|
73 |
from transformers import AutoTokenizer, pipeline
|
74 |
|
75 |
+
BASE_MODEL = "sh2orc/llama-3-korean-8b"
|
76 |
|
77 |
llm = LLM(model=BASE_MODEL)
|
78 |
|