Commit
•
cc2e351
1
Parent(s):
248e66b
Create README.md
Browse files
README.md
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
library_name: transformers
|
3 |
+
pipeline_tag: text-generation
|
4 |
+
language:
|
5 |
+
- ar
|
6 |
+
---
|
7 |
+
|
8 |
+
# 1p46G-gemma-fp-dedup-rehydr-ar-350BT-seed-6/transformers/107000
|
9 |
+
|
10 |
+
Tokenizer: `google/gemma-7b`
|
11 |
+
|
12 |
+
```python
|
13 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM
|
14 |
+
# Initialize model and tokenizer
|
15 |
+
TEST_PROMPT = "الزرادشتية هي ديانة انتشرت في بلاد"
|
16 |
+
save_path = "nouamanetazi/hf-ar-107000"
|
17 |
+
tokenizer = AutoTokenizer.from_pretrained(save_path)
|
18 |
+
input_ids = tokenizer(TEST_PROMPT, return_tensors="pt")["input_ids"].cuda() # google/gemma-7b
|
19 |
+
print("Input prompt:", tokenizer.batch_decode(input_ids)[0])
|
20 |
+
|
21 |
+
model = AutoModelForCausalLM.from_pretrained(save_path, device="cuda", dtype=torch.bfloat16)
|
22 |
+
outputs = model.generate(input_ids, max_new_tokens=100)
|
23 |
+
print("Generated text:", tokenizer.batch_decode(outputs)[0])
|
24 |
+
```
|