forked from UKPLab/sentence-transformers
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathmodel_card_templates.py
173 lines (127 loc) · 5.26 KB
/
model_card_templates.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
import logging
from .util import fullname
class ModelCardTemplate:
__TAGS__ = ["sentence-transformers", "feature-extraction", "sentence-similarity"]
__DEFAULT_VARS__ = {
"{PIPELINE_TAG}": "sentence-similarity",
"{MODEL_DESCRIPTION}": "<!--- Describe your model here -->",
"{TRAINING_SECTION}": "",
"{USAGE_TRANSFORMERS_SECTION}": "",
"{EVALUATION}": "<!--- Describe how your model was evaluated -->",
"{CITING}": "<!--- Describe where people can find more information -->"
}
__MODEL_CARD__ = """
---
pipeline_tag: {PIPELINE_TAG}
tags:
{TAGS}
---
# {MODEL_NAME}
This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a {NUM_DIMENSIONS} dimensional dense vector space and can be used for tasks like clustering or semantic search.
{MODEL_DESCRIPTION}
## Usage (Sentence-Transformers)
Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed:
```
pip install -U sentence-transformers
```
Then you can use the model like this:
```python
from sentence_transformers import SentenceTransformer
sentences = ["This is an example sentence", "Each sentence is converted"]
model = SentenceTransformer('{MODEL_NAME}')
embeddings = model.encode(sentences)
print(embeddings)
```
{USAGE_TRANSFORMERS_SECTION}
## Evaluation Results
{EVALUATION}
For an automated evaluation of this model, see the *Sentence Embeddings Benchmark*: [https://seb.sbert.net](https://seb.sbert.net?model_name={MODEL_NAME})
{TRAINING_SECTION}
## Full Model Architecture
```
{FULL_MODEL_STR}
```
## Citing & Authors
{CITING}
"""
__TRAINING_SECTION__ = """
## Training
The model was trained with the parameters:
{LOSS_FUNCTIONS}
Parameters of the fit()-Method:
```
{FIT_PARAMETERS}
```
"""
__USAGE_TRANSFORMERS__ = """\n
## Usage (HuggingFace Transformers)
Without [sentence-transformers](https://www.SBERT.net), you can use the model like this: First, you pass your input through the transformer model, then you have to apply the right pooling-operation on-top of the contextualized word embeddings.
```python
from transformers import AutoTokenizer, AutoModel
import torch
{POOLING_FUNCTION}
# Sentences we want sentence embeddings for
sentences = ['This is an example sentence', 'Each sentence is converted']
# Load model from HuggingFace Hub
tokenizer = AutoTokenizer.from_pretrained('{MODEL_NAME}')
model = AutoModel.from_pretrained('{MODEL_NAME}')
# Tokenize sentences
encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt')
# Compute token embeddings
with torch.no_grad():
model_output = model(**encoded_input)
# Perform pooling. In this case, {POOLING_MODE} pooling.
sentence_embeddings = {POOLING_FUNCTION_NAME}(model_output, encoded_input['attention_mask'])
print("Sentence embeddings:")
print(sentence_embeddings)
```
"""
@staticmethod
def model_card_get_pooling_function(pooling_mode):
if pooling_mode == 'max':
return "max_pooling", """
# Max Pooling - Take the max value over time for every dimension.
def max_pooling(model_output, attention_mask):
token_embeddings = model_output[0] #First element of model_output contains all token embeddings
input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()
token_embeddings[input_mask_expanded == 0] = -1e9 # Set padding tokens to large negative value
return torch.max(token_embeddings, 1)[0]
"""
elif pooling_mode == 'mean':
return "mean_pooling", """
#Mean Pooling - Take attention mask into account for correct averaging
def mean_pooling(model_output, attention_mask):
token_embeddings = model_output[0] #First element of model_output contains all token embeddings
input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()
return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9)
"""
elif pooling_mode == 'cls':
return "cls_pooling", """
def cls_pooling(model_output, attention_mask):
return model_output[0][:,0]
"""
@staticmethod
def get_train_objective_info(dataloader, loss):
try:
if hasattr(dataloader, 'get_config_dict'):
train_loader = dataloader.get_config_dict()
else:
loader_params = {}
loader_params['batch_size'] = dataloader.batch_size if hasattr(dataloader, 'batch_size') else 'unknown'
if hasattr(dataloader, 'sampler'):
loader_params['sampler'] = fullname(dataloader.sampler)
if hasattr(dataloader, 'batch_sampler'):
loader_params['batch_sampler'] = fullname(dataloader.batch_sampler)
dataloader_str = """**DataLoader**:\n\n`{}` of length {} with parameters:
```
{}
```""".format(fullname(dataloader), len(dataloader), loader_params)
loss_str = "**Loss**:\n\n`{}` {}".format(fullname(loss),
"""with parameters:
```
{}
```""".format(loss.get_config_dict()) if hasattr(loss, 'get_config_dict') else "")
return [dataloader_str, loss_str]
except Exception as e:
logging.WARN("Exception when creating get_train_objective_info: {}".format(str(e)))
return ""