Update README.md
Browse files
README.md
CHANGED
|
@@ -71,13 +71,17 @@ The model can also be applied on NLI tasks like so:
|
|
| 71 |
```python
|
| 72 |
import torch
|
| 73 |
from transformers import AutoTokenizer, AutoModelForSequenceClassification
|
|
|
|
| 74 |
# device = "cuda:0" or "cpu"
|
| 75 |
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
|
|
|
|
| 76 |
model_name = "mjwong/drama-base-xnli-anli"
|
| 77 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
| 78 |
model = AutoModelForSequenceClassification.from_pretrained(model_name)
|
|
|
|
| 79 |
premise = "But I thought you'd sworn off coffee."
|
| 80 |
hypothesis = "I thought that you vowed to drink more coffee."
|
|
|
|
| 81 |
input = tokenizer(premise, hypothesis, truncation=True, return_tensors="pt")
|
| 82 |
output = model(input["input_ids"].to(device))
|
| 83 |
prediction = torch.softmax(output["logits"][0], -1).tolist()
|
|
|
|
| 71 |
```python
|
| 72 |
import torch
|
| 73 |
from transformers import AutoTokenizer, AutoModelForSequenceClassification
|
| 74 |
+
|
| 75 |
# device = "cuda:0" or "cpu"
|
| 76 |
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
|
| 77 |
+
|
| 78 |
model_name = "mjwong/drama-base-xnli-anli"
|
| 79 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
| 80 |
model = AutoModelForSequenceClassification.from_pretrained(model_name)
|
| 81 |
+
|
| 82 |
premise = "But I thought you'd sworn off coffee."
|
| 83 |
hypothesis = "I thought that you vowed to drink more coffee."
|
| 84 |
+
|
| 85 |
input = tokenizer(premise, hypothesis, truncation=True, return_tensors="pt")
|
| 86 |
output = model(input["input_ids"].to(device))
|
| 87 |
prediction = torch.softmax(output["logits"][0], -1).tolist()
|