-
Notifications
You must be signed in to change notification settings - Fork 5
/
Copy pathSaveEmbeddings.py
64 lines (37 loc) · 1.3 KB
/
SaveEmbeddings.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
import pandas as pd
import s3fs
from smart_open import open
import boto3
from io import StringIO # python3; python2: BytesIO
from boto3.s3.transfer import TransferConfig
import metrics
import torch
from transformers import *
import numpy as np
# In[ ]:
column_of_interest = ["text_ tokens"]
train_set = pd.read_csv('s3://recsys-challenge-2020/train_set.csv', encoding="utf-8",
usecols= [1])
# In[ ]:
column_of_interest = ["text_ tokens", "engaging_user_id"]
train_set = pd.read_csv('s3://recsys-challenge-2020/train_set.csv', encoding="utf-8",
usecols= [1, 4])
# In[ ]:
train_set.head()
# In[ ]:
len(train_set)
# In[ ]:
tokenizer = BertTokenizer.from_pretrained('bert-base-multilingual-cased', do_lower_case=False)
model = BertModel.from_pretrained('/dev/bert/')
# In[ ]:
iterator = 0
for chunk in np.array_split(train_set, 1000000):
print(iterator)
iterator = iterator + 1
df_embeddings = pd.DataFrame()
df_embeddings = chunk[["engaging_user_id"]]
df_embeddings['text_embeddings'] = chunk['text_ tokens'].apply(lambda x : model(torch.tensor(list(map(int, x.split('\t')))).unsqueeze(0))[0][0][0])
df_embeddings.to_csv('s3://recsys-challenge-2020/embeddings_user.csv', mode='a', header=False)