Update README.md
Browse files
README.md
CHANGED
|
@@ -69,6 +69,23 @@ model_output=model.generate(inp, use_cache=True, num_beams=4, max_length=20, min
|
|
| 69 |
decoded_output=tokenizer.decode(model_output[0], skip_special_tokens=True, clean_up_tokenization_spaces=False)
|
| 70 |
|
| 71 |
print(decoded_output) # I am happy
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 72 |
```
|
| 73 |
|
| 74 |
# Notes:
|
|
|
|
| 69 |
decoded_output=tokenizer.decode(model_output[0], skip_special_tokens=True, clean_up_tokenization_spaces=False)
|
| 70 |
|
| 71 |
print(decoded_output) # I am happy
|
| 72 |
+
|
| 73 |
+
inp = tokenizer("मैं [MASK] हूँ </s> <2hi>", add_special_tokens=False, return_tensors="pt", padding=True).input_ids
|
| 74 |
+
|
| 75 |
+
model_output=model.generate(inp, use_cache=True, num_beams=4, max_length=20, min_length=1, early_stopping=True, pad_token_id=pad_id, bos_token_id=bos_id, eos_token_id=eos_id, decoder_start_token_id=tokenizer._convert_token_to_id_with_added_voc("<2en>"))
|
| 76 |
+
|
| 77 |
+
decoded_output=tokenizer.decode(model_output[0], skip_special_tokens=True, clean_up_tokenization_spaces=False)
|
| 78 |
+
|
| 79 |
+
print(decoded_output) # मैं जानता हूँ
|
| 80 |
+
|
| 81 |
+
inp = tokenizer("मला [MASK] पाहिजे </s> <2mr>", add_special_tokens=False, return_tensors="pt", padding=True).input_ids
|
| 82 |
+
|
| 83 |
+
model_output=model.generate(inp, use_cache=True, num_beams=4, max_length=20, min_length=1, early_stopping=True, pad_token_id=pad_id, bos_token_id=bos_id, eos_token_id=eos_id, decoder_start_token_id=tokenizer._convert_token_to_id_with_added_voc("<2en>"))
|
| 84 |
+
|
| 85 |
+
decoded_output=tokenizer.decode(model_output[0], skip_special_tokens=True, clean_up_tokenization_spaces=False)
|
| 86 |
+
|
| 87 |
+
print(decoded_output) # मला ओळखलं पाहिजे
|
| 88 |
+
|
| 89 |
```
|
| 90 |
|
| 91 |
# Notes:
|