diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index f4cbe8ad..d7b40818 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -17,12 +17,12 @@ jobs:
timeout-minutes: 120
strategy:
matrix:
- python-version: [3.5,3.6,3.7]
- torch-version: [1.1.0,1.2.0]
+ python-version: [3.6,3.7]
+ torch-version: [1.1.0,1.2.0,1.3.0,1.4.0]
- # exclude:
- # - python-version: 3.7
- # tf-version: 1.4.0
+# exclude:
+# - python-version: 3.5
+# tf-version: 1.1.0
steps:
diff --git a/README.md b/README.md
index f4920e72..6aa60417 100644
--- a/README.md
+++ b/README.md
@@ -19,30 +19,6 @@ DeepCTR is a **Easy-to-use**,**Modular** and **Extendible** package of deep-lear
Let's [**Get Started!**](https://deepctr-torch.readthedocs.io/en/latest/Quick-Start.html)([Chinese Introduction](https://zhuanlan.zhihu.com/p/53231955))
-## Contributors([welcome to join us!](./CONTRIBUTING.md))
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
## Models List
| Model | Paper |
@@ -67,4 +43,27 @@ Let's [**Get Started!**](https://deepctr-torch.readthedocs.io/en/latest/Quick-St
Please follow our wechat to join group:
- 公众号:**浅梦的学习笔记**
- wechat ID: **deepctrbot**
-![wechat](./docs/pics/weichennote.png)
\ No newline at end of file
+![wechat](./docs/pics/weichennote.png)
+
+## Contributors([welcome to join us!](./CONTRIBUTING.md))
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/deepctr_torch/__init__.py b/deepctr_torch/__init__.py
index 0c223331..47005c76 100644
--- a/deepctr_torch/__init__.py
+++ b/deepctr_torch/__init__.py
@@ -2,5 +2,5 @@
from . import models
from .utils import check_version
-__version__ = '0.1.3'
+__version__ = '0.2.0'
check_version(__version__)
\ No newline at end of file
diff --git a/deepctr_torch/inputs.py b/deepctr_torch/inputs.py
index ddb584a8..35c837d0 100644
--- a/deepctr_torch/inputs.py
+++ b/deepctr_torch/inputs.py
@@ -4,59 +4,92 @@
Weichen Shen,wcshen1994@163.com
"""
-from collections import OrderedDict, namedtuple, defaultdict
-from itertools import chain
+from collections import OrderedDict, namedtuple
import torch
import torch.nn as nn
-from .layers.utils import concat_fun
from .layers.sequence import SequencePoolingLayer
+from .layers.utils import concat_fun
DEFAULT_GROUP_NAME = "default_group"
-class SparseFeat(namedtuple('SparseFeat', ['name', 'dimension', 'use_hash', 'dtype', 'embedding_name', 'embedding', 'group_name'])):
+class SparseFeat(namedtuple('SparseFeat',
+ ['name', 'vocabulary_size', 'embedding_dim', 'use_hash', 'dtype', 'embedding_name',
+ 'group_name'])):
__slots__ = ()
- def __new__(cls, name, dimension, use_hash=False, dtype="int32",
- embedding_name=None, embedding=True, group_name=DEFAULT_GROUP_NAME):
- if embedding and embedding_name is None:
+ def __new__(cls, name, vocabulary_size, embedding_dim=4, use_hash=False, dtype="int32", embedding_name=None,
+ group_name=DEFAULT_GROUP_NAME):
+ if embedding_name is None:
embedding_name = name
- return super(SparseFeat, cls).__new__(cls, name, dimension, use_hash, dtype, embedding_name, embedding, group_name)
+ if embedding_dim == "auto":
+ embedding_dim = 6 * int(pow(vocabulary_size, 0.25))
+ if use_hash:
+ print("Notice! Feature Hashing on the fly currently is not supported in torch version,you can use tensorflow version!")
+ return super(SparseFeat, cls).__new__(cls, name, vocabulary_size, embedding_dim, use_hash, dtype,
+ embedding_name, group_name)
+ def __hash__(self):
+ return self.name.__hash__()
-class DenseFeat(namedtuple('DenseFeat', ['name', 'dimension', 'dtype'])):
+
+class VarLenSparseFeat(namedtuple('VarLenSparseFeat',
+ ['sparsefeat', 'maxlen', 'combiner', 'length_name'])):
__slots__ = ()
- def __new__(cls, name, dimension=1, dtype="float32"):
- return super(DenseFeat, cls).__new__(cls, name, dimension, dtype)
+ def __new__(cls, sparsefeat, maxlen, combiner="mean", length_name=None):
+ return super(VarLenSparseFeat, cls).__new__(cls, sparsefeat, maxlen, combiner, length_name)
+
+ @property
+ def name(self):
+ return self.sparsefeat.name
+
+ @property
+ def vocabulary_size(self):
+ return self.sparsefeat.vocabulary_size
+
+ @property
+ def embedding_dim(self):
+ return self.sparsefeat.embedding_dim
+
+ @property
+ def dtype(self):
+ return self.sparsefeat.dtype
+
+ @property
+ def embedding_name(self):
+ return self.sparsefeat.embedding_name
+
+ @property
+ def group_name(self):
+ return self.sparsefeat.group_name
+
+ def __hash__(self):
+ return self.name.__hash__()
-class VarLenSparseFeat(namedtuple('VarLenFeat',
- ['name', 'dimension', 'maxlen', 'combiner', 'use_hash', 'dtype', 'embedding_name',
- 'embedding', 'group_name'])):
+class DenseFeat(namedtuple('DenseFeat', ['name', 'dimension', 'dtype'])):
__slots__ = ()
- def __new__(cls, name, dimension, maxlen, combiner="mean", use_hash=False, dtype="float32", embedding_name=None,
- embedding=True, group_name=DEFAULT_GROUP_NAME):
- if embedding_name is None:
- embedding_name = name
- return super(VarLenSparseFeat, cls).__new__(cls, name, dimension, maxlen, combiner, use_hash, dtype,
+ def __new__(cls, name, dimension=1, dtype="float32"):
+ return super(DenseFeat, cls).__new__(cls, name, dimension, dtype)
- embedding_name, embedding, group_name)
+ def __hash__(self):
+ return self.name.__hash__()
def get_feature_names(feature_columns):
features = build_input_features(feature_columns)
return list(features.keys())
-def get_inputs_list(inputs):
- return list(chain(*list(map(lambda x: x.values(), filter(lambda x: x is not None, inputs)))))
+# def get_inputs_list(inputs):
+# return list(chain(*list(map(lambda x: x.values(), filter(lambda x: x is not None, inputs)))))
-def build_input_features(feature_columns):
+def build_input_features(feature_columns):
# Return OrderedDict: {feature_name:(start, start+dimension)}
features = OrderedDict()
@@ -72,21 +105,24 @@ def build_input_features(feature_columns):
elif isinstance(feat, DenseFeat):
features[feat_name] = (start, start + feat.dimension)
start += feat.dimension
- elif isinstance(feat,VarLenSparseFeat):
+ elif isinstance(feat, VarLenSparseFeat):
features[feat_name] = (start, start + feat.maxlen)
start += feat.maxlen
+ if feat.length_name is not None:
+ features[feat.length_name] = (start, start + 1)
+ start += 1
else:
- raise TypeError("Invalid feature column type,got",type(feat))
+ raise TypeError("Invalid feature column type,got", type(feat))
return features
-def get_dense_input(features, feature_columns):
- dense_feature_columns = list(filter(lambda x: isinstance(
- x, DenseFeat), feature_columns)) if feature_columns else []
- dense_input_list = []
- for fc in dense_feature_columns:
- dense_input_list.append(features[fc.name])
- return dense_input_list
+# def get_dense_input(features, feature_columns):
+# dense_feature_columns = list(filter(lambda x: isinstance(
+# x, DenseFeat), feature_columns)) if feature_columns else []
+# dense_input_list = []
+# for fc in dense_feature_columns:
+# dense_input_list.append(features[fc.name])
+# return dense_input_list
def combined_dnn_input(sparse_embedding_list, dense_value_list):
@@ -103,68 +139,113 @@ def combined_dnn_input(sparse_embedding_list, dense_value_list):
else:
raise NotImplementedError
-
-def embedding_lookup(sparse_embedding_dict, sparse_input_dict, sparse_feature_columns, return_feat_list=(),
- mask_feat_list=(), to_list=False):
- """
- Args:
- sparse_embedding_dict: nn.ModuleDict, {embedding_name: nn.Embedding}
- sparse_input_dict: OrderedDict, {feature_name:(start, start+dimension)}
- sparse_feature_columns: list, sparse features
- return_feat_list: list, names of feature to be returned, defualt () -> return all features
- mask_feat_list, list, names of feature to be masked in hash transform
- Return:
- group_embedding_dict: defaultdict(list)
- """
- group_embedding_dict = defaultdict(list)
- for fc in sparse_feature_columns:
- feature_name = fc.name
- embedding_name = fc.embedding_name
- if (len(return_feat_list) == 0 or feature_name in return_feat_list):
- if fc.use_hash:
- # lookup_idx = Hash(fc.vocabulary_size, mask_zero=(feature_name in mask_feat_list))(
- # sparse_input_dict[feature_name])
- # TODO: add hash function
- lookup_idx = sparse_input_dict[feature_name]
- else:
- lookup_idx = sparse_input_dict[feature_name]
-
- group_embedding_dict[fc.group_name].append(sparse_embedding_dict[embedding_name](lookup_idx))
- if to_list:
- return list(chain.from_iterable(group_embedding_dict.values()))
- return group_embedding_dict
-
-
-def varlen_embedding_lookup(embedding_dict, sequence_input_dict, varlen_sparse_feature_columns):
- varlen_embedding_vec_dict = {}
- for fc in varlen_sparse_feature_columns:
- feature_name = fc.name
- embedding_name = fc.embedding_name
- if fc.use_hash:
- # lookup_idx = Hash(fc.vocabulary_size, mask_zero=True)(sequence_input_dict[feature_name])
- # TODO: add hash function
- lookup_idx = sequence_input_dict[feature_name]
- else:
- lookup_idx = sequence_input_dict[feature_name]
- varlen_embedding_vec_dict[feature_name] = embedding_dict[embedding_name](lookup_idx)
- return varlen_embedding_vec_dict
-
-
-def get_varlen_pooling_list(embedding_dict, features, varlen_sparse_feature_columns, to_list=False):
- pooling_vec_list = defaultdict(list)
- for fc in varlen_sparse_feature_columns:
- feature_name = fc.name
- combiner = fc.combiner
- feature_length_name = fc.length_name
- if feature_length_name is not None:
- seq_input = embedding_dict[feature_name]
- vec = SequencePoolingLayer(combiner)([seq_input, features[feature_length_name]])
+ #
+ # def embedding_lookup(sparse_embedding_dict, sparse_input_dict, sparse_feature_columns, return_feat_list=(),
+ # mask_feat_list=(), to_list=False):
+ # """
+ # Args:
+ # sparse_embedding_dict: nn.ModuleDict, {embedding_name: nn.Embedding}
+ # sparse_input_dict: OrderedDict, {feature_name:(start, start+dimension)}
+ # sparse_feature_columns: list, sparse features
+ # return_feat_list: list, names of feature to be returned, defualt () -> return all features
+ # mask_feat_list, list, names of feature to be masked in hash transform
+ # Return:
+ # group_embedding_dict: defaultdict(list)
+ # """
+ # group_embedding_dict = defaultdict(list)
+ # for fc in sparse_feature_columns:
+ # feature_name = fc.name
+ # embedding_name = fc.embedding_name
+ # if (len(return_feat_list) == 0 or feature_name in return_feat_list):
+ # if fc.use_hash:
+ # # lookup_idx = Hash(fc.vocabulary_size, mask_zero=(feature_name in mask_feat_list))(
+ # # sparse_input_dict[feature_name])
+ # # TODO: add hash function
+ # lookup_idx = sparse_input_dict[feature_name]
+ # else:
+ # lookup_idx = sparse_input_dict[feature_name]
+ #
+ # group_embedding_dict[fc.group_name].append(sparse_embedding_dict[embedding_name](lookup_idx))
+ # if to_list:
+ # return list(chain.from_iterable(group_embedding_dict.values()))
+ # return group_embedding_dict
+ #
+ #
+ # def varlen_embedding_lookup(embedding_dict, sequence_input_dict, varlen_sparse_feature_columns):
+ # varlen_embedding_vec_dict = {}
+ # for fc in varlen_sparse_feature_columns:
+ # feature_name = fc.name
+ # embedding_name = fc.embedding_name
+ # if fc.use_hash:
+ # # lookup_idx = Hash(fc.vocabulary_size, mask_zero=True)(sequence_input_dict[feature_name])
+ # # TODO: add hash function
+ # lookup_idx = sequence_input_dict[feature_name]
+ # else:
+ # lookup_idx = sequence_input_dict[feature_name]
+ # varlen_embedding_vec_dict[feature_name] = embedding_dict[embedding_name](lookup_idx)
+ # return varlen_embedding_vec_dict
+ #
+ #
+ # def get_varlen_pooling_list(embedding_dict, features, varlen_sparse_feature_columns, to_list=False):
+ # pooling_vec_list = defaultdict(list)
+ # for fc in varlen_sparse_feature_columns:
+ # feature_name = fc.name
+ # combiner = fc.combiner
+ # feature_length_name = fc.length_name
+ # if feature_length_name is not None:
+ # seq_input = embedding_dict[feature_name]
+ # vec = SequencePoolingLayer(combiner)([seq_input, features[feature_length_name]])
+ # else:
+ # seq_input = embedding_dict[feature_name]
+ # vec = SequencePoolingLayer(combiner)(seq_input)
+ # pooling_vec_list[fc.group_name].append(vec)
+ #
+ # if to_list:
+ # return chain.from_iterable(pooling_vec_list.values())
+ #
+ # return pooling_vec_list
+
+
+def get_varlen_pooling_list(embedding_dict, features, feature_index, varlen_sparse_feature_columns, device):
+ varlen_sparse_embedding_list = []
+
+ for feat in varlen_sparse_feature_columns:
+ seq_emb = embedding_dict[feat.embedding_name](
+ features[:, feature_index[feat.name][0]:feature_index[feat.name][1]].long())
+ if feat.length_name is None:
+ seq_mask = features[:, feature_index[feat.name][0]:feature_index[feat.name][1]].long() != 0
+
+ emb = SequencePoolingLayer(mode=feat.combiner, supports_masking=True, device=device)(
+ [seq_emb, seq_mask])
else:
- seq_input = embedding_dict[feature_name]
- vec = SequencePoolingLayer(combiner)(seq_input)
- pooling_vec_list[fc.group_name].append(vec)
+ seq_length = features[:,
+ feature_index[feat.length_name][0]:feature_index[feat.length_name][1]].long()
+ emb = SequencePoolingLayer(mode=feat.combiner, supports_masking=False, device=device)(
+ [seq_emb, seq_length])
+ varlen_sparse_embedding_list.append(emb)
+ return varlen_sparse_embedding_list
+
+
+def create_embedding_matrix(feature_columns, init_std=0.0001, linear=False, sparse=False, device='cpu'):
+ # Return nn.ModuleDict: for sparse features, {embedding_name: nn.Embedding}
+ # for varlen sparse features, {embedding_name: nn.EmbeddingBag}
+ sparse_feature_columns = list(
+ filter(lambda x: isinstance(x, SparseFeat), feature_columns)) if len(feature_columns) else []
+
+ varlen_sparse_feature_columns = list(
+ filter(lambda x: isinstance(x, VarLenSparseFeat), feature_columns)) if len(feature_columns) else []
+
+ embedding_dict = nn.ModuleDict(
+ {feat.embedding_name: nn.Embedding(feat.vocabulary_size, feat.embedding_dim if not linear else 1, sparse=sparse)
+ for feat in
+ sparse_feature_columns + varlen_sparse_feature_columns}
+ )
+
+ # for feat in varlen_sparse_feature_columns:
+ # embedding_dict[feat.embedding_name] = nn.EmbeddingBag(
+ # feat.dimension, embedding_size, sparse=sparse, mode=feat.combiner)
- if to_list:
- return chain.from_iterable(pooling_vec_list.values())
+ for tensor in embedding_dict.values():
+ nn.init.normal_(tensor.weight, mean=0, std=init_std)
- return pooling_vec_list
+ return embedding_dict.to(device)
diff --git a/deepctr_torch/layers/__init__.py b/deepctr_torch/layers/__init__.py
index 7818c993..3767b035 100644
--- a/deepctr_torch/layers/__init__.py
+++ b/deepctr_torch/layers/__init__.py
@@ -1,4 +1,4 @@
from .interaction import *
from .core import *
from .utils import concat_fun
-from .sequence import KMaxPooling
+from .sequence import KMaxPooling, SequencePoolingLayer
diff --git a/deepctr_torch/layers/activation.py b/deepctr_torch/layers/activation.py
index 1473d4b0..73ef23e6 100644
--- a/deepctr_torch/layers/activation.py
+++ b/deepctr_torch/layers/activation.py
@@ -1,59 +1,56 @@
# -*- coding:utf-8 -*-
-"""
-Author:
- Yuef Zhang
-
-"""
-import sys
-
-import torch
import torch.nn as nn
-import torch.nn.functional as F
-
-
-device = 'cuda:0' if torch.cuda.is_available() else 'cpu'
-
-class Dice(nn.Module):
- """The Data Adaptive Activation Function in DIN,which can be viewed as a generalization of PReLu and can adaptively adjust the rectified point according to distribution of input data.
-
- Input shape:
- - 2 dims: [batch_size, embedding_size(features)]
- - 3 dims: [batch_size, num_features, embedding_size(features)]
-
- Output shape:
- - Same shape as the input.
-
- References
- - [Zhou G, Zhu X, Song C, et al. Deep interest network for click-through rate prediction[C]//Proceedings of the 24th ACM SIGKDD International Conference on Knowledge Discovery & Data Mining. ACM, 2018: 1059-1068.](https://arxiv.org/pdf/1706.06978.pdf)
- - https://github.com/zhougr1993/DeepInterestNetwork, https://github.com/fanoping/DIN-pytorch
- """
- def __init__(self, num_features, dim=2, epsilon=1e-9):
- super(Dice, self).__init__()
- assert dim == 2 or dim == 3
- self.bn = nn.BatchNorm1d(num_features, eps=epsilon)
- self.sigmoid = nn.Sigmoid()
- self.dim = dim
-
- if self.dim == 2:
- self.alpha = torch.zeros((num_features,)).to(device)
- else:
- self.alpha = torch.zeros((num_features, 1)).to(device)
-
- def forward(self, x):
- # x shape: [batch_size, num_features, embedding_size(features)]
- assert x.dim() == 2 or x.dim() == 3
- if self.dim == 2:
- x_p = self.sigmoid(self.bn(x))
- out = self.alpha * (1 - x_p) * x + x_p * x
- else:
- x = torch.transpose(x, 1, 2)
- x_p = self.sigmoid(self.bn(x))
- out = self.alpha * (1 - x_p) * x + x_p * x
- out = torch.transpose(out, 1, 2)
-
- return out
+# class Dice(nn.Module):
+# """The Data Adaptive Activation Function in DIN,which can be viewed as a generalization of PReLu and can adaptively adjust the rectified point according to distribution of input data.
+#
+# Input shape:
+# - 2 dims: [batch_size, embedding_size(features)]
+# - 3 dims: [batch_size, num_features, embedding_size(features)]
+#
+# Output shape:
+# - Same shape as the input.
+#
+# References
+# - [Zhou G, Zhu X, Song C, et al. Deep interest network for click-through rate prediction[C]//Proceedings of the 24th ACM SIGKDD International Conference on Knowledge Discovery & Data Mining. ACM, 2018: 1059-1068.](https://arxiv.org/pdf/1706.06978.pdf)
+# - https://github.com/zhougr1993/DeepInterestNetwork, https://github.com/fanoping/DIN-pytorch
+# """
+# def __init__(self, num_features, dim=2, epsilon=1e-9):
+# super(Dice, self).__init__()
+# assert dim == 2 or dim == 3
+# self.bn = nn.BatchNorm1d(num_features, eps=epsilon)
+# self.sigmoid = nn.Sigmoid()
+# self.dim = dim
+#
+# if self.dim == 2:
+# self.alpha = torch.zeros((num_features,)).to(device)
+# else:
+# self.alpha = torch.zeros((num_features, 1)).to(device)
+#
+# def forward(self, x):
+# # x shape: [batch_size, num_features, embedding_size(features)]
+# assert x.dim() == 2 or x.dim() == 3
+#
+# if self.dim == 2:
+# x_p = self.sigmoid(self.bn(x))
+# out = self.alpha * (1 - x_p) * x + x_p * x
+# else:
+# x = torch.transpose(x, 1, 2)
+# x_p = self.sigmoid(self.bn(x))
+# out = self.alpha * (1 - x_p) * x + x_p * x
+# out = torch.transpose(out, 1, 2)
+#
+# return out
+
+class Identity(nn.Module):
+
+
+ def __init__(self, **kwargs):
+ super(Identity, self).__init__()
+
+ def forward(self, X):
+ return X
def activation_layer(act_name, hidden_size=None, dice_dim=2):
@@ -67,7 +64,9 @@ def activation_layer(act_name, hidden_size=None, dice_dim=2):
act_layer: activation layer
"""
if isinstance(act_name, str):
- if act_name.lower() == 'relu' or 'linear':
+ if act_name.lower() == 'linear':
+ act_layer = Identity()
+ if act_name.lower() == 'relu':
act_layer = nn.ReLU(inplace=True)
elif act_name.lower() == 'dice':
assert dice_dim
@@ -83,10 +82,13 @@ def activation_layer(act_name, hidden_size=None, dice_dim=2):
if __name__ == "__main__":
- torch.manual_seed(7)
- a = Dice(3)
- b = torch.rand((5, 3))
- c = a(b)
- print(c.size())
- print('b:', b)
- print('c:', c)
+ pass
+ #device = 'cuda:0' if torch.cuda.is_available() else 'cpu'
+
+ # torch.manual_seed(7)
+ # a = Dice(3)
+ # b = torch.rand((5, 3))
+ # c = a(b)
+ # print(c.size())
+ # print('b:', b)
+ # print('c:', c)
diff --git a/deepctr_torch/layers/core.py b/deepctr_torch/layers/core.py
index bc10c14b..8f1b7d99 100644
--- a/deepctr_torch/layers/core.py
+++ b/deepctr_torch/layers/core.py
@@ -37,12 +37,12 @@ class LocalActivationUnit(nn.Module):
def __init__(self, hidden_units=[80, 40], embedding_dim=4, activation='Dice', dropout_rate=0, use_bn=False):
super(LocalActivationUnit, self).__init__()
- self.dnn1 = DNN(inputs_dim=4*embedding_dim,
- hidden_units=hidden_units,
- activation=activation,
- dropout_rate=0.5,
- use_bn=use_bn,
- dice_dim=3)
+ self.dnn1 = DNN(inputs_dim=4 * embedding_dim,
+ hidden_units=hidden_units,
+ activation=activation,
+ dropout_rate=0.5,
+ use_bn=use_bn,
+ dice_dim=3)
# self.dnn2 = DNN(inputs_dim=hidden_units[-1],
# hidden_units=[1],
@@ -59,7 +59,7 @@ def forward(self, query, user_behavior):
user_behavior_len = user_behavior.size(1)
queries = torch.cat([query for _ in range(user_behavior_len)], dim=1)
- attention_input = torch.cat([queries, user_behavior, queries-user_behavior, queries*user_behavior], dim=-1)
+ attention_input = torch.cat([queries, user_behavior, queries - user_behavior, queries * user_behavior], dim=-1)
attention_output = self.dnn1(attention_input)
attention_output = self.dense(attention_output)
@@ -109,7 +109,7 @@ def __init__(self, inputs_dim, hidden_units, activation='relu', l2_reg=0, dropou
if self.use_bn:
self.bn = nn.ModuleList(
[nn.BatchNorm1d(hidden_units[i + 1]) for i in range(len(hidden_units) - 1)])
-
+
self.activation_layers = nn.ModuleList(
[activation_layer(activation, hidden_units[i + 1], dice_dim) for i in range(len(hidden_units) - 1)])
diff --git a/deepctr_torch/layers/interaction.py b/deepctr_torch/layers/interaction.py
index c2594c32..84afa51e 100644
--- a/deepctr_torch/layers/interaction.py
+++ b/deepctr_torch/layers/interaction.py
@@ -4,9 +4,10 @@
import torch.nn as nn
import torch.nn.functional as F
+from ..layers.activation import activation_layer
from ..layers.core import Conv2dSame
from ..layers.sequence import KMaxPooling
-from ..layers.activation import activation_layer
+
class FM(nn.Module):
"""Factorization Machine models pairwise (order-2) feature interactions
@@ -609,11 +610,11 @@ def __init__(self, field_size, conv_kernel_width, conv_filters, device='cpu'):
module_list.append(torch.nn.Tanh().to(self.device))
# KMaxPooling, extract top_k, returns tensors values
- module_list.append(KMaxPooling(k = min(k, filed_shape), axis = 2, device = self.device).to(self.device))
+ module_list.append(KMaxPooling(k=min(k, filed_shape), axis=2, device=self.device).to(self.device))
filed_shape = min(k, filed_shape)
self.conv_layer = nn.Sequential(*module_list)
self.to(device)
self.filed_shape = filed_shape
-
+
def forward(self, inputs):
return self.conv_layer(inputs)
diff --git a/deepctr_torch/layers/sequence.py b/deepctr_torch/layers/sequence.py
index 1a538e5e..33b30492 100644
--- a/deepctr_torch/layers/sequence.py
+++ b/deepctr_torch/layers/sequence.py
@@ -1,10 +1,6 @@
import torch
import torch.nn as nn
-import numpy as np
-
-from .core import LocalActivationUnit
-
class SequencePoolingLayer(nn.Module):
"""The SequencePoolingLayer is used to apply pooling operation(sum,mean,max) on variable-length sequence feature/multi-value feature.
@@ -24,13 +20,16 @@ class SequencePoolingLayer(nn.Module):
"""
- def __init__(self, mode='mean'):
- super(SequencePoolingLayer).__init__()
+ def __init__(self, mode='mean', supports_masking=False, device='cpu'):
+ super(SequencePoolingLayer, self).__init__()
if mode not in ['sum', 'mean', 'max']:
raise ValueError('parameter mode should in [sum, mean, max]')
+ self.supports_masking = supports_masking
self.mode = mode
- self.eps = torch.FloatTensor([1e-8])
+ self.eps = torch.FloatTensor([1e-8]).to(device)
+ self.to(device)
+
def _sequence_mask(self, lengths, maxlen=None, dtype=torch.bool):
# Returns a mask tensor representing the first N positions of each cell.
@@ -44,81 +43,87 @@ def _sequence_mask(self, lengths, maxlen=None, dtype=torch.bool):
return mask
def forward(self, seq_value_len_list):
- uiseq_embed_list, user_behavior_length = seq_value_len_list # [B, T, E], [B, 1]
- mask = self._sequence_mask(user_behavior_length, dtype=torch.float32) # [B, 1, maxlen]
- mask = torch.transpose(mask, 1, 2) # [B, maxlen, 1]
+ if self.supports_masking:
+ uiseq_embed_list, mask = seq_value_len_list # [B, T, E], [B, 1]
+ mask = mask.float()
+ user_behavior_length = torch.sum(mask, dim=-1, keepdim=True)
+ mask = mask.unsqueeze(2)
+ else:
+ uiseq_embed_list, user_behavior_length = seq_value_len_list # [B, T, E], [B, 1]
+ mask = self._sequence_mask(user_behavior_length, maxlen=uiseq_embed_list.shape[1],
+ dtype=torch.float32) # [B, 1, maxlen]
+ mask = torch.transpose(mask, 1, 2) # [B, maxlen, 1]
embedding_size = uiseq_embed_list.shape[-1]
- mask = torch.repeat_interleave(mask, embedding_size, dim=2) # [B, maxlen, E]
-
- uiseq_embed_list *= mask # [B, maxlen, E]
- hist = uiseq_embed_list
+ mask = torch.repeat_interleave(mask, embedding_size, dim=2) # [B, maxlen, E]
if self.mode == 'max':
- res = torch.max(hist, dim=1, keepdim=True)
- elif self.mode == 'mean':
- res = torch.max(hist, dim=1, keepdim=False)
- res = torch.div(res, user_behavior_length.type(torch.float32) + self.eps)
- res = torch.unsqueeze(res, dim=1)
- elif self.mode == 'sum':
- res = torch.max(hist, dim=1, keepdim=False)
- res = torch.unsqueeze(res, dim=1)
-
- return res
-
-
-class AttentionSequencePoolingLayer(nn.Module):
- """The Attentional sequence pooling operation used in DIN.
-
- Input shape
- - A list of three tensor: [query,keys,keys_length]
-
- - query is a 3D tensor with shape: ``(batch_size, 1, embedding_size)``
+ hist = uiseq_embed_list - (1 - mask) * 1e9
+ hist = torch.max(hist, dim=1, keepdim=True)[0]
+ return hist
+ hist = torch.sum(uiseq_embed_list * mask, dim=1, keepdim=False)
+
+ if self.mode == 'mean':
+ hist = torch.div(hist, user_behavior_length.type(torch.float32) + self.eps)
+
+ hist = torch.unsqueeze(hist, dim=1)
+ return hist
+
+
+# class AttentionSequencePoolingLayer(nn.Module):
+# """The Attentional sequence pooling operation used in DIN.
+#
+# Input shape
+# - A list of three tensor: [query,keys,keys_length]
+#
+# - query is a 3D tensor with shape: ``(batch_size, 1, embedding_size)``
+#
+# - keys is a 3D tensor with shape: ``(batch_size, T, embedding_size)``
+#
+# - keys_length is a 2D tensor with shape: ``(batch_size, 1)``
+#
+# Output shape
+# - 3D tensor with shape: ``(batch_size, 1, embedding_size)``
+#
+# Arguments
+# - **att_hidden_units**: List of positive integer, the attention net layer number and units in each layer.
+#
+# - **embedding_dim**: Dimension of the input embeddings.
+#
+# - **activation**: Activation function to use in attention net.
+#
+# - **weight_normalization**: bool.Whether normalize the attention score of local activation unit.
+#
+# References
+# - [Zhou G, Zhu X, Song C, et al. Deep interest network for click-through rate prediction[C]//Proceedings of the 24th ACM SIGKDD International Conference on Knowledge Discovery & Data Mining. ACM, 2018: 1059-1068.](https://arxiv.org/pdf/1706.06978.pdf)
+# """
+#
+# def __init__(self, att_hidden_units=[80, 40], embedding_dim=4, activation='Dice', weight_normalization=False):
+# super(AttentionSequencePoolingLayer, self).__init__()
+#
+# self.local_att = LocalActivationUnit(hidden_units=att_hidden_units, embedding_dim=embedding_dim,
+# activation=activation)
+#
+# def forward(self, query, keys, keys_length):
+# # query: [B, 1, E], keys: [B, T, E], keys_length: [B, 1]
+# # TODO: Mini-batch aware regularization in originial paper [Zhou G, et al. 2018] is not implemented here. As the authors mentioned
+# # it is not a must for small dataset as the open-sourced ones.
+# attention_score = self.local_att(query, keys)
+# attention_score = torch.transpose(attention_score, 1, 2) # B * 1 * T
+#
+# # define mask by length
+# keys_length = keys_length.type(torch.LongTensor)
+# mask = torch.arange(keys.size(1))[None, :] < keys_length[:, None] # [1, T] < [B, 1, 1] -> [B, 1, T]
+#
+# # mask
+# output = torch.mul(attention_score, mask.type(torch.FloatTensor)) # [B, 1, T]
+#
+# # multiply weight
+# output = torch.matmul(output, keys) # [B, 1, E]
+#
+# return output
- - keys is a 3D tensor with shape: ``(batch_size, T, embedding_size)``
-
- - keys_length is a 2D tensor with shape: ``(batch_size, 1)``
-
- Output shape
- - 3D tensor with shape: ``(batch_size, 1, embedding_size)``
-
- Arguments
- - **att_hidden_units**: List of positive integer, the attention net layer number and units in each layer.
-
- - **embedding_dim**: Dimension of the input embeddings.
-
- - **activation**: Activation function to use in attention net.
-
- - **weight_normalization**: bool.Whether normalize the attention score of local activation unit.
-
- References
- - [Zhou G, Zhu X, Song C, et al. Deep interest network for click-through rate prediction[C]//Proceedings of the 24th ACM SIGKDD International Conference on Knowledge Discovery & Data Mining. ACM, 2018: 1059-1068.](https://arxiv.org/pdf/1706.06978.pdf)
- """
- def __init__(self, att_hidden_units=[80, 40], embedding_dim=4, activation='Dice', weight_normalization=False):
- super(AttentionSequencePoolingLayer, self).__init__()
-
- self.local_att = LocalActivationUnit(hidden_units=att_hidden_units, embedding_dim=embedding_dim, activation=activation)
-
- def forward(self, query, keys, keys_length):
- # query: [B, 1, E], keys: [B, T, E], keys_length: [B, 1]
- # TODO: Mini-batch aware regularization in originial paper [Zhou G, et al. 2018] is not implemented here. As the authors mentioned
- # it is not a must for small dataset as the open-sourced ones.
- attention_score = self.local_att(query, keys)
- attention_score = torch.transpose(attention_score, 1, 2) # B * 1 * T
-
- # define mask by length
- keys_length = keys_length.type(torch.LongTensor)
- mask = torch.arange(keys.size(1))[None, :] < keys_length[:, None] # [1, T] < [B, 1, 1] -> [B, 1, T]
-
- # mask
- output = torch.mul(attention_score, mask.type(torch.FloatTensor)) # [B, 1, T]
-
- # multiply weight
- output = torch.matmul(output, keys) # [B, 1, E]
-
- return output
-
class KMaxPooling(nn.Module):
"""K Max pooling that selects the k biggest value along the specific axis.
@@ -145,7 +150,7 @@ def __init__(self, k, axis, device='cpu'):
def forward(self, input):
if self.axis < 0 or self.axis >= len(input.shape):
raise ValueError("axis must be 0~%d,now is %d" %
- (len(input.shape)-1, self.axis))
+ (len(input.shape) - 1, self.axis))
if self.k < 1 or self.k > input.shape[self.axis]:
raise ValueError("k must be in 1 ~ %d,now k is %d" %
diff --git a/deepctr_torch/models/afm.py b/deepctr_torch/models/afm.py
index 86b46011..2cf57076 100644
--- a/deepctr_torch/models/afm.py
+++ b/deepctr_torch/models/afm.py
@@ -7,19 +7,16 @@
(https://arxiv.org/abs/1708.04617)
"""
import torch
-import torch.nn.functional as F
from .basemodel import BaseModel
from ..layers import FM, AFMLayer
class AFM(BaseModel):
-
"""Instantiates the Attentional Factorization Machine architecture.
:param linear_feature_columns: An iterable containing all the features used by linear part of the model.
:param dnn_feature_columns: An iterable containing all the features used by deep part of the model.
- :param embedding_size: positive integer,sparse feature embedding_size
:param use_attention: bool,whether use attention or not,if set to ``False``.it is the same as **standard Factorization Machine**
:param attention_factor: positive integer,units in attention net
:param l2_reg_linear: float. L2 regularizer strength applied to linear part
@@ -34,10 +31,10 @@ class AFM(BaseModel):
"""
- def __init__(self,linear_feature_columns, dnn_feature_columns, embedding_size=8, use_attention=True, attention_factor=8,
+ def __init__(self, linear_feature_columns, dnn_feature_columns, use_attention=True, attention_factor=8,
l2_reg_linear=1e-5, l2_reg_embedding=1e-5, l2_reg_att=1e-5, afm_dropout=0, init_std=0.0001, seed=1024,
task='binary', device='cpu'):
- super(AFM, self).__init__(linear_feature_columns, dnn_feature_columns, embedding_size=embedding_size,
+ super(AFM, self).__init__(linear_feature_columns, dnn_feature_columns,
dnn_hidden_units=[],
l2_reg_linear=l2_reg_linear,
l2_reg_embedding=l2_reg_embedding, l2_reg_dnn=0, init_std=init_std,
@@ -48,7 +45,7 @@ def __init__(self,linear_feature_columns, dnn_feature_columns, embedding_size=8,
self.use_attention = use_attention
if use_attention:
- self.fm = AFMLayer(embedding_size, attention_factor, l2_reg_att, afm_dropout,
+ self.fm = AFMLayer(self.embedding_size, attention_factor, l2_reg_att, afm_dropout,
seed, device)
self.add_regularization_loss(self.fm.attention_W, l2_reg_att)
else:
@@ -59,7 +56,7 @@ def __init__(self,linear_feature_columns, dnn_feature_columns, embedding_size=8,
def forward(self, X):
sparse_embedding_list, _ = self.input_from_feature_columns(X, self.dnn_feature_columns,
- self.embedding_dict,support_dense=False)
+ self.embedding_dict, support_dense=False)
logit = self.linear_model(X)
if len(sparse_embedding_list) > 0:
if self.use_attention:
diff --git a/deepctr_torch/models/autoint.py b/deepctr_torch/models/autoint.py
index af8ed79c..1abfe317 100644
--- a/deepctr_torch/models/autoint.py
+++ b/deepctr_torch/models/autoint.py
@@ -7,7 +7,6 @@
"""
import torch
import torch.nn as nn
-import torch.nn.functional as F
from .basemodel import BaseModel
from ..inputs import combined_dnn_input
@@ -17,8 +16,8 @@
class AutoInt(BaseModel):
"""Instantiates the AutoInt Network architecture.
+ :param linear_feature_columns: An iterable containing all the features used by linear part of the model.
:param dnn_feature_columns: An iterable containing all the features used by deep part of the model.
- :param embedding_size: positive integer,sparse feature embedding_size
:param att_layer_num: int.The InteractingLayer number to be used.
:param att_embedding_size: int.The embedding size in multi-head self-attention network.
:param att_head_num: int.The head number in multi-head self-attention network.
@@ -37,13 +36,13 @@ class AutoInt(BaseModel):
"""
- def __init__(self, dnn_feature_columns, embedding_size=8, att_layer_num=3, att_embedding_size=8, att_head_num=2,
+ def __init__(self, linear_feature_columns, dnn_feature_columns, att_layer_num=3, att_embedding_size=8, att_head_num=2,
att_res=True,
dnn_hidden_units=(256, 128), dnn_activation='relu',
l2_reg_dnn=0, l2_reg_embedding=1e-5, dnn_use_bn=False, dnn_dropout=0, init_std=0.0001, seed=1024,
task='binary', device='cpu'):
- super(AutoInt, self).__init__([], dnn_feature_columns, embedding_size=embedding_size,
+ super(AutoInt, self).__init__(linear_feature_columns, dnn_feature_columns,
dnn_hidden_units=dnn_hidden_units,
l2_reg_linear=0,
l2_reg_embedding=l2_reg_embedding, l2_reg_dnn=l2_reg_dnn, init_std=init_std,
@@ -58,7 +57,7 @@ def __init__(self, dnn_feature_columns, embedding_size=8, att_layer_num=3, att_e
if len(dnn_hidden_units) and att_layer_num > 0:
dnn_linear_in_feature = dnn_hidden_units[-1] + \
- field_num * att_embedding_size * att_head_num
+ field_num * att_embedding_size * att_head_num
elif len(dnn_hidden_units) > 0:
dnn_linear_in_feature = dnn_hidden_units[-1]
elif att_layer_num > 0:
@@ -70,16 +69,14 @@ def __init__(self, dnn_feature_columns, embedding_size=8, att_layer_num=3, att_e
self.dnn_hidden_units = dnn_hidden_units
self.att_layer_num = att_layer_num
if self.use_dnn:
- self.dnn = DNN(self.compute_input_dim(dnn_feature_columns, embedding_size), dnn_hidden_units,
+ self.dnn = DNN(self.compute_input_dim(dnn_feature_columns), dnn_hidden_units,
activation=dnn_activation, l2_reg=l2_reg_dnn, dropout_rate=dnn_dropout, use_bn=dnn_use_bn,
init_std=init_std, device=device)
self.add_regularization_loss(
filter(lambda x: 'weight' in x[0] and 'bn' not in x[0], self.dnn.named_parameters()), l2_reg_dnn)
-
- self.int_layers = nn.ModuleList([InteractingLayer(embedding_size if i == 0 else att_embedding_size*att_head_num,
- att_embedding_size, att_head_num, att_res, device=device) for i in range(att_layer_num)])
-
-
+ self.int_layers = nn.ModuleList(
+ [InteractingLayer(self.embedding_size if i == 0 else att_embedding_size * att_head_num,
+ att_embedding_size, att_head_num, att_res, device=device) for i in range(att_layer_num)])
self.to(device)
@@ -87,6 +84,7 @@ def forward(self, X):
sparse_embedding_list, dense_value_list = self.input_from_feature_columns(X, self.dnn_feature_columns,
self.embedding_dict)
+ logit = self.linear_model(X)
att_input = concat_fun(sparse_embedding_list, axis=1)
@@ -100,15 +98,15 @@ def forward(self, X):
if len(self.dnn_hidden_units) > 0 and self.att_layer_num > 0: # Deep & Interacting Layer
deep_out = self.dnn(dnn_input)
stack_out = concat_fun([att_output, deep_out])
- final_logit = self.dnn_linear(stack_out)
+ logit += self.dnn_linear(stack_out)
elif len(self.dnn_hidden_units) > 0: # Only Deep
deep_out = self.dnn(dnn_input)
- final_logit = self.dnn_linear(deep_out)
+ logit += self.dnn_linear(deep_out)
elif self.att_layer_num > 0: # Only Interacting Layer
- final_logit = self.dnn_linear(att_output)
+ logit += self.dnn_linear(att_output)
else: # Error
- raise NotImplementedError
+ pass
- y_pred = self.out(final_logit)
+ y_pred = self.out(logit)
return y_pred
diff --git a/deepctr_torch/models/basemodel.py b/deepctr_torch/models/basemodel.py
index 87814b6e..10f0f52e 100644
--- a/deepctr_torch/models/basemodel.py
+++ b/deepctr_torch/models/basemodel.py
@@ -18,7 +18,7 @@
from torch.utils.data import DataLoader
from tqdm import tqdm
-from ..inputs import build_input_features, SparseFeat, DenseFeat, VarLenSparseFeat
+from ..inputs import build_input_features, SparseFeat, DenseFeat, VarLenSparseFeat, get_varlen_pooling_list,create_embedding_matrix
from ..layers import PredictionLayer
from ..layers.utils import slice_arrays
@@ -27,14 +27,16 @@ class Linear(nn.Module):
def __init__(self, feature_columns, feature_index, init_std=0.0001, device='cpu'):
super(Linear, self).__init__()
self.feature_index = feature_index
-
+ self.device = device
self.sparse_feature_columns = list(
filter(lambda x: isinstance(x, SparseFeat), feature_columns)) if len(feature_columns) else []
self.dense_feature_columns = list(
filter(lambda x: isinstance(x, DenseFeat), feature_columns)) if len(feature_columns) else []
- self.embedding_dict = self.create_embedding_matrix(self.sparse_feature_columns, 1, init_std, sparse=False).to(
- device)
+ self.varlen_sparse_feature_columns = list(
+ filter(lambda x: isinstance(x, VarLenSparseFeat), feature_columns)) if len(feature_columns) else []
+
+ self.embedding_dict = create_embedding_matrix(feature_columns,init_std,linear=True,sparse=False,device=device)
# nn.ModuleDict(
# {feat.embedding_name: nn.Embedding(feat.dimension, 1, sparse=True) for feat in
@@ -58,6 +60,11 @@ def forward(self, X):
dense_value_list = [X[:, self.feature_index[feat.name][0]:self.feature_index[feat.name][1]] for feat in
self.dense_feature_columns]
+ varlen_embedding_list = get_varlen_pooling_list(self.embedding_dict, X, self.feature_index,
+ self.varlen_sparse_feature_columns, self.device)
+
+ sparse_embedding_list += varlen_embedding_list
+
if len(sparse_embedding_list) > 0 and len(dense_value_list) > 0:
linear_sparse_logit = torch.sum(
torch.cat(sparse_embedding_list, dim=-1), dim=-1, keepdim=False)
@@ -74,31 +81,19 @@ def forward(self, X):
linear_logit = torch.zeros([X.shape[0], 1])
return linear_logit
- def create_embedding_matrix(self, feature_columns, embedding_size, init_std=0.0001, sparse=False):
-
- sparse_feature_columns = list(
- filter(lambda x: isinstance(x, SparseFeat), feature_columns)) if len(feature_columns) else []
-
- embedding_dict = nn.ModuleDict(
- {feat.embedding_name: nn.Embedding(feat.dimension, embedding_size, sparse=sparse) for feat in
- sparse_feature_columns}
- )
- for tensor in embedding_dict.values():
- nn.init.normal_(tensor.weight, mean=0, std=init_std)
-
- return embedding_dict
-
class BaseModel(nn.Module):
def __init__(self,
- linear_feature_columns, dnn_feature_columns, embedding_size=8, dnn_hidden_units=(128, 128),
+ linear_feature_columns, dnn_feature_columns, dnn_hidden_units=(128, 128),
l2_reg_linear=1e-5,
l2_reg_embedding=1e-5, l2_reg_dnn=0, init_std=0.0001, seed=1024, dnn_dropout=0, dnn_activation='relu',
task='binary', device='cpu'):
super(BaseModel, self).__init__()
+ self.dnn_feature_columns = dnn_feature_columns
+
self.reg_loss = torch.zeros((1,), device=device)
self.device = device # device
@@ -106,8 +101,7 @@ def __init__(self,
linear_feature_columns + dnn_feature_columns)
self.dnn_feature_columns = dnn_feature_columns
- self.embedding_dict = self.create_embedding_matrix(dnn_feature_columns, embedding_size, init_std,
- sparse=False).to(device)
+ self.embedding_dict = create_embedding_matrix(dnn_feature_columns,init_std,sparse=False,device=device)
# nn.ModuleDict(
# {feat.embedding_name: nn.Embedding(feat.dimension, embedding_size, sparse=True) for feat in
# self.dnn_feature_columns}
@@ -121,7 +115,7 @@ def __init__(self,
self.add_regularization_loss(
self.linear_model.parameters(), l2_reg_linear)
- self.out = PredictionLayer(task, )
+ self.out = PredictionLayer(task,)
self.to(device)
def fit(self, x=None,
@@ -132,8 +126,8 @@ def fit(self, x=None,
initial_epoch=0,
validation_split=0.,
validation_data=None,
- shuffle=True,
- use_double=False,):
+ shuffle=True,
+ use_double=False ):
"""
:param x: Numpy array of training data (if the model has a single input), or list of Numpy arrays (if the model has multiple inputs).If input layers in the model are named, you can also pass a
@@ -146,9 +140,10 @@ def fit(self, x=None,
:param validation_split: Float between 0 and 1. Fraction of the training data to be used as validation data. The model will set apart this fraction of the training data, will not train on it, and will evaluate the loss and any model metrics on this data at the end of each epoch. The validation data is selected from the last samples in the `x` and `y` data provided, before shuffling.
:param validation_data: tuple `(x_val, y_val)` or tuple `(x_val, y_val, val_sample_weights)` on which to evaluate the loss and any model metrics at the end of each epoch. The model will not be trained on this data. `validation_data` will override `validation_split`.
:param shuffle: Boolean. Whether to shuffle the order of the batches at the beginning of each epoch.
+ :param use_double: Boolean. Whether to use double precision in metric calculation.
"""
- if isinstance(x,dict):
+ if isinstance(x, dict):
x = [x[feature] for feature in self.feature_index]
if validation_data:
if len(validation_data) == 2:
@@ -202,7 +197,7 @@ def fit(self, x=None,
steps_per_epoch = (sample_num - 1) // batch_size + 1
print("Train on {0} samples, validate on {1} samples, {2} steps per epoch".format(
- len(train_tensor_data), len(val_y),steps_per_epoch))
+ len(train_tensor_data), len(val_y), steps_per_epoch))
for epoch in range(initial_epoch, epochs):
start_time = time.time()
loss_epoch = 0
@@ -253,14 +248,14 @@ def fit(self, x=None,
for name, result in train_result.items():
eval_str += " - " + name + \
- ": {0: .4f}".format(np.sum(result) / steps_per_epoch)
+ ": {0: .4f}".format(np.sum(result) / steps_per_epoch)
if len(val_x) and len(val_y):
eval_result = self.evaluate(val_x, val_y, batch_size)
for name, result in eval_result.items():
eval_str += " - val_" + name + \
- ": {0: .4f}".format(result)
+ ": {0: .4f}".format(result)
print(eval_str)
def evaluate(self, x, y, batch_size=256):
@@ -327,43 +322,19 @@ def input_from_feature_columns(self, X, feature_columns, embedding_dict, support
sparse_embedding_list = [embedding_dict[feat.embedding_name](
X[:, self.feature_index[feat.name][0]:self.feature_index[feat.name][1]].long()) for
feat in sparse_feature_columns]
- varlen_sparse_embedding_list = [embedding_dict[feat.embedding_name](
- X[:, self.feature_index[feat.name][0]:self.feature_index[feat.name][1]].long()) for
- feat in varlen_sparse_feature_columns]
- varlen_sparse_embedding_list = list(
- map(lambda x: x.unsqueeze(dim=1), varlen_sparse_embedding_list))
+
+ varlen_sparse_embedding_list = get_varlen_pooling_list(self.embedding_dict, X, self.feature_index,
+ varlen_sparse_feature_columns, self.device)
dense_value_list = [X[:, self.feature_index[feat.name][0]:self.feature_index[feat.name][1]] for feat in
dense_feature_columns]
return sparse_embedding_list + varlen_sparse_embedding_list, dense_value_list
- def create_embedding_matrix(self, feature_columns, embedding_size, init_std=0.0001, sparse=False):
- # Return nn.ModuleDict: for sparse features, {embedding_name: nn.Embedding}
- # for varlen sparse features, {embedding_name: nn.EmbeddingBag}
+ def compute_input_dim(self, feature_columns, include_sparse=True, include_dense=True, feature_group=False):
sparse_feature_columns = list(
- filter(lambda x: isinstance(x, SparseFeat), feature_columns)) if len(feature_columns) else []
-
- varlen_sparse_feature_columns = list(
- filter(lambda x: isinstance(x, VarLenSparseFeat), feature_columns)) if len(feature_columns) else []
-
- embedding_dict = nn.ModuleDict(
- {feat.embedding_name: nn.Embedding(feat.dimension, embedding_size, sparse=sparse) for feat in
- sparse_feature_columns}
- )
-
- for feat in varlen_sparse_feature_columns:
- embedding_dict[feat.embedding_name] = nn.EmbeddingBag(
- feat.dimension, embedding_size, sparse=sparse, mode=feat.combiner)
-
- for tensor in embedding_dict.values():
- nn.init.normal_(tensor.weight, mean=0, std=init_std)
-
- return embedding_dict
-
- def compute_input_dim(self, feature_columns, embedding_size=1, include_sparse=True, include_dense=True, feature_group=False):
- sparse_feature_columns = list(
- filter(lambda x: isinstance(x, (SparseFeat, VarLenSparseFeat)), feature_columns)) if len(feature_columns) else []
+ filter(lambda x: isinstance(x, (SparseFeat, VarLenSparseFeat)), feature_columns)) if len(
+ feature_columns) else []
dense_feature_columns = list(
filter(lambda x: isinstance(x, DenseFeat), feature_columns)) if len(feature_columns) else []
@@ -372,7 +343,7 @@ def compute_input_dim(self, feature_columns, embedding_size=1, include_sparse=Tr
if feature_group:
sparse_input_dim = len(sparse_feature_columns)
else:
- sparse_input_dim = len(sparse_feature_columns) * embedding_size
+ sparse_input_dim = sum(feat.embedding_dim for feat in sparse_feature_columns)
input_dim = 0
if include_sparse:
input_dim += sparse_input_dim
@@ -460,4 +431,15 @@ def _get_metrics(self, metrics, set_eps=False):
if metric == "accuracy" or metric == "acc":
metrics_[metric] = lambda y_true, y_pred: accuracy_score(
y_true, np.where(y_pred > 0.5, 1, 0))
- return metrics_
\ No newline at end of file
+ return metrics_
+
+ @property
+ def embedding_size(self, ):
+ feature_columns = self.dnn_feature_columns
+ sparse_feature_columns = list(
+ filter(lambda x: isinstance(x, (SparseFeat, VarLenSparseFeat)), feature_columns)) if len(
+ feature_columns) else []
+ embedding_size_set = set([feat.embedding_dim for feat in sparse_feature_columns])
+ if len(embedding_size_set) > 1:
+ raise ValueError("embedding_dim of SparseFeat and VarlenSparseFeat must be same in this model!")
+ return list(embedding_size_set)[0]
diff --git a/deepctr_torch/models/ccpm.py b/deepctr_torch/models/ccpm.py
index 514ab7e3..a583acd8 100644
--- a/deepctr_torch/models/ccpm.py
+++ b/deepctr_torch/models/ccpm.py
@@ -11,13 +11,11 @@
"""
import torch
import torch.nn as nn
-import torch.nn.functional as F
from .basemodel import BaseModel
-from ..layers.core import DNN, Conv2dSame
-from ..layers.utils import concat_fun
-from ..layers.sequence import KMaxPooling
+from ..layers.core import DNN
from ..layers.interaction import ConvLayer
+from ..layers.utils import concat_fun
class CCPM(BaseModel):
@@ -25,7 +23,6 @@ class CCPM(BaseModel):
:param linear_feature_columns: An iterable containing all the features used by linear part of the model.
:param dnn_feature_columns: An iterable containing all the features used by deep part of the model.
- :param embedding_size: positive integer,sparse feature embedding_size
:param conv_kernel_width: list,list of positive integer or empty list,the width of filter in each conv layer.
:param conv_filters: list,list of positive integer or empty list,the number of filters in each conv layer.
:param dnn_hidden_units: list,list of positive integer or empty list, the layer number and units in each layer of DNN.
@@ -41,12 +38,12 @@ class CCPM(BaseModel):
"""
- def __init__(self, linear_feature_columns, dnn_feature_columns, embedding_size=8, conv_kernel_width=(6, 5),
+ def __init__(self, linear_feature_columns, dnn_feature_columns, conv_kernel_width=(6, 5),
conv_filters=(4, 4),
dnn_hidden_units=(256,), l2_reg_linear=1e-5, l2_reg_embedding=1e-5, l2_reg_dnn=0, dnn_dropout=0,
init_std=0.0001, seed=1024, task='binary', device='cpu', dnn_use_bn=False, dnn_activation='relu'):
- super(CCPM, self).__init__(linear_feature_columns, dnn_feature_columns, embedding_size=embedding_size,
+ super(CCPM, self).__init__(linear_feature_columns, dnn_feature_columns,
dnn_hidden_units=dnn_hidden_units,
l2_reg_linear=l2_reg_linear,
l2_reg_embedding=l2_reg_embedding, l2_reg_dnn=l2_reg_dnn, init_std=init_std,
@@ -57,21 +54,20 @@ def __init__(self, linear_feature_columns, dnn_feature_columns, embedding_size=8
if len(conv_kernel_width) != len(conv_filters):
raise ValueError(
"conv_kernel_width must have same element with conv_filters")
-
- filed_size = self.compute_input_dim(dnn_feature_columns, embedding_size, include_dense=False, feature_group=True)
- self.conv_layer = ConvLayer(field_size=filed_size, conv_kernel_width=conv_kernel_width, conv_filters=conv_filters, device=device)
-
- self.dnn_input_dim = self.conv_layer.filed_shape * embedding_size * conv_filters[-1]
+
+ filed_size = self.compute_input_dim(dnn_feature_columns, include_dense=False, feature_group=True)
+ self.conv_layer = ConvLayer(field_size=filed_size, conv_kernel_width=conv_kernel_width,
+ conv_filters=conv_filters, device=device)
+ self.dnn_input_dim = self.conv_layer.filed_shape * self.embedding_size * conv_filters[-1]
self.dnn = DNN(self.dnn_input_dim, dnn_hidden_units,
- activation=dnn_activation, l2_reg=l2_reg_dnn, dropout_rate=dnn_dropout, use_bn=dnn_use_bn,
- init_std=init_std, device=device)
+ activation=dnn_activation, l2_reg=l2_reg_dnn, dropout_rate=dnn_dropout, use_bn=dnn_use_bn,
+ init_std=init_std, device=device)
self.dnn_linear = nn.Linear(dnn_hidden_units[-1], 1, bias=False).to(device)
self.add_regularization_loss(
- filter(lambda x: 'weight' in x[0] and 'bn' not in x[0], self.dnn.named_parameters()), l2_reg_dnn)
+ filter(lambda x: 'weight' in x[0] and 'bn' not in x[0], self.dnn.named_parameters()), l2_reg_dnn)
self.add_regularization_loss(self.dnn_linear.weight, l2_reg_dnn)
-
- self.to(device)
+ self.to(device)
def forward(self, X):
linear_logit = self.linear_model(X)
diff --git a/deepctr_torch/models/dcn.py b/deepctr_torch/models/dcn.py
index 46fd7f02..8573f206 100644
--- a/deepctr_torch/models/dcn.py
+++ b/deepctr_torch/models/dcn.py
@@ -7,18 +7,17 @@
"""
import torch
import torch.nn as nn
-import torch.nn.functional as F
from .basemodel import BaseModel
from ..inputs import combined_dnn_input
from ..layers import CrossNet, DNN
-class DCN(BaseModel):
+class DCN(BaseModel):
"""Instantiates the Deep&Cross Network architecture.
+ :param linear_feature_columns: An iterable containing all the features used by linear part of the model.
:param dnn_feature_columns: An iterable containing all the features used by deep part of the model.
- :param embedding_size: positive int or str,sparse feature embedding_size.If set to "auto",it will be 6*pow(cardinality,025)
:param cross_num: positive integet,cross layer number
:param dnn_hidden_units: list,list of positive integer or empty list, the layer number and units in each layer of DNN
:param l2_reg_embedding: float. L2 regularizer strength applied to embedding vector
@@ -35,16 +34,15 @@ class DCN(BaseModel):
"""
- def __init__(self,
- dnn_feature_columns, embedding_size=8, cross_num=2,
+ def __init__(self,linear_feature_columns,
+ dnn_feature_columns, cross_num=2,
dnn_hidden_units=(128, 128), l2_reg_linear=0.00001,
l2_reg_embedding=0.00001, l2_reg_cross=0.00001, l2_reg_dnn=0, init_std=0.0001, seed=1024,
dnn_dropout=0,
dnn_activation='relu', dnn_use_bn=False, task='binary', device='cpu'):
- super(DCN, self).__init__(linear_feature_columns=[],
+ super(DCN, self).__init__(linear_feature_columns=linear_feature_columns,
dnn_feature_columns=dnn_feature_columns,
- embedding_size=embedding_size,
dnn_hidden_units=dnn_hidden_units,
l2_reg_embedding=l2_reg_embedding, l2_reg_dnn=l2_reg_dnn, init_std=init_std,
seed=seed,
@@ -52,19 +50,19 @@ def __init__(self,
task=task, device=device)
self.dnn_hidden_units = dnn_hidden_units
self.cross_num = cross_num
- self.dnn = DNN(self.compute_input_dim(dnn_feature_columns, embedding_size), dnn_hidden_units,
+ self.dnn = DNN(self.compute_input_dim(dnn_feature_columns), dnn_hidden_units,
activation=dnn_activation, use_bn=dnn_use_bn, l2_reg=l2_reg_dnn, dropout_rate=dnn_dropout,
init_std=init_std, device=device)
if len(self.dnn_hidden_units) > 0 and self.cross_num > 0:
- dnn_linear_in_feature = self.compute_input_dim(dnn_feature_columns, embedding_size) + dnn_hidden_units[-1]
+ dnn_linear_in_feature = self.compute_input_dim(dnn_feature_columns) + dnn_hidden_units[-1]
elif len(self.dnn_hidden_units) > 0:
dnn_linear_in_feature = dnn_hidden_units[-1]
elif self.cross_num > 0:
- dnn_linear_in_feature = self.compute_input_dim(dnn_feature_columns, embedding_size)
+ dnn_linear_in_feature = self.compute_input_dim(dnn_feature_columns)
self.dnn_linear = nn.Linear(dnn_linear_in_feature, 1, bias=False).to(
device)
- self.crossnet = CrossNet(in_features=self.compute_input_dim(dnn_feature_columns, embedding_size),
+ self.crossnet = CrossNet(in_features=self.compute_input_dim(dnn_feature_columns),
layer_num=cross_num, seed=1024, device=device)
self.add_regularization_loss(
filter(lambda x: 'weight' in x[0] and 'bn' not in x[0], self.dnn.named_parameters()), l2_reg_dnn)
@@ -74,6 +72,7 @@ def __init__(self,
def forward(self, X):
+ logit = self.linear_model(X)
sparse_embedding_list, dense_value_list = self.input_from_feature_columns(X, self.dnn_feature_columns,
self.embedding_dict)
@@ -83,15 +82,14 @@ def forward(self, X):
deep_out = self.dnn(dnn_input)
cross_out = self.crossnet(dnn_input)
stack_out = torch.cat((cross_out, deep_out), dim=-1)
- final_logit = self.dnn_linear(stack_out)
+ logit += self.dnn_linear(stack_out)
elif len(self.dnn_hidden_units) > 0: # Only Deep
deep_out = self.dnn(dnn_input)
- final_logit = self.dnn_linear(deep_out)
+ logit += self.dnn_linear(deep_out)
elif self.cross_num > 0: # Only Cross
cross_out = self.crossnet(dnn_input)
- final_logit = self.dnn_linear(cross_out)
+ logit += self.dnn_linear(cross_out)
else: # Error
- raise NotImplementedError
-
- y_pred = self.out(final_logit)
+ pass
+ y_pred = self.out(logit)
return y_pred
diff --git a/deepctr_torch/models/deepfm.py b/deepctr_torch/models/deepfm.py
index d4ccfee0..edf4b561 100644
--- a/deepctr_torch/models/deepfm.py
+++ b/deepctr_torch/models/deepfm.py
@@ -7,7 +7,6 @@
"""
import torch
import torch.nn as nn
-import torch.nn.functional as F
from .basemodel import BaseModel
from ..inputs import combined_dnn_input
@@ -19,7 +18,6 @@ class DeepFM(BaseModel):
:param linear_feature_columns: An iterable containing all the features used by linear part of the model.
:param dnn_feature_columns: An iterable containing all the features used by deep part of the model.
- :param embedding_size: positive integer,sparse feature embedding_size
:param use_fm: bool,use FM part or not
:param dnn_hidden_units: list,list of positive integer or empty list, the layer number and units in each layer of DNN
:param l2_reg_linear: float. L2 regularizer strength applied to linear part
@@ -35,14 +33,15 @@ class DeepFM(BaseModel):
:return: A PyTorch model instance.
"""
+
def __init__(self,
- linear_feature_columns, dnn_feature_columns, embedding_size=8, use_fm=True,
+ linear_feature_columns, dnn_feature_columns, use_fm=True,
dnn_hidden_units=(256, 128),
l2_reg_linear=0.00001, l2_reg_embedding=0.00001, l2_reg_dnn=0, init_std=0.0001, seed=1024,
dnn_dropout=0,
dnn_activation='relu', dnn_use_bn=False, task='binary', device='cpu'):
- super(DeepFM, self).__init__(linear_feature_columns, dnn_feature_columns, embedding_size=embedding_size,
+ super(DeepFM, self).__init__(linear_feature_columns, dnn_feature_columns,
dnn_hidden_units=dnn_hidden_units,
l2_reg_linear=l2_reg_linear,
l2_reg_embedding=l2_reg_embedding, l2_reg_dnn=l2_reg_dnn, init_std=init_std,
@@ -57,8 +56,9 @@ def __init__(self,
self.fm = FM()
if self.use_dnn:
- self.dnn = DNN(self.compute_input_dim(dnn_feature_columns, embedding_size), dnn_hidden_units,
- activation=dnn_activation, l2_reg=l2_reg_dnn, dropout_rate=dnn_dropout, use_bn=dnn_use_bn, init_std=init_std, device=device)
+ self.dnn = DNN(self.compute_input_dim(dnn_feature_columns), dnn_hidden_units,
+ activation=dnn_activation, l2_reg=l2_reg_dnn, dropout_rate=dnn_dropout, use_bn=dnn_use_bn,
+ init_std=init_std, device=device)
self.dnn_linear = nn.Linear(
dnn_hidden_units[-1], 1, bias=False).to(device)
@@ -78,7 +78,6 @@ def forward(self, X):
logit += self.fm(fm_input)
if self.use_dnn:
-
dnn_input = combined_dnn_input(
sparse_embedding_list, dense_value_list)
dnn_output = self.dnn(dnn_input)
@@ -87,4 +86,4 @@ def forward(self, X):
y_pred = self.out(logit)
- return y_pred
\ No newline at end of file
+ return y_pred
diff --git a/deepctr_torch/models/fibinet.py b/deepctr_torch/models/fibinet.py
index af6f7d42..017ec912 100644
--- a/deepctr_torch/models/fibinet.py
+++ b/deepctr_torch/models/fibinet.py
@@ -8,12 +8,10 @@
import torch
import torch.nn as nn
-import torch.nn.functional as F
from .basemodel import BaseModel
from ..inputs import combined_dnn_input, SparseFeat, DenseFeat, VarLenSparseFeat
-from ..layers import SENETLayer,BilinearInteraction,DNN
-
+from ..layers import SENETLayer, BilinearInteraction, DNN
class FiBiNET(BaseModel):
@@ -21,7 +19,6 @@ class FiBiNET(BaseModel):
:param linear_feature_columns: An iterable containing all the features used by linear part of the model.
:param dnn_feature_columns: An iterable containing all the features used by deep part of the model.
- :param embedding_size: positive integer,sparse feature embedding_size
:param bilinear_type: str,bilinear function type used in Bilinear Interaction Layer,can be ``'all'`` , ``'each'`` or ``'interaction'``
:param reduction_ratio: integer in [1,inf), reduction ratio used in SENET Layer
:param dnn_hidden_units: list,list of positive integer or empty list, the layer number and units in each layer of DNN
@@ -38,11 +35,11 @@ class FiBiNET(BaseModel):
"""
- def __init__(self, linear_feature_columns, dnn_feature_columns, embedding_size=8, bilinear_type='interaction',
+ def __init__(self, linear_feature_columns, dnn_feature_columns, bilinear_type='interaction',
reduction_ratio=3, dnn_hidden_units=(128, 128), l2_reg_linear=1e-5,
l2_reg_embedding=1e-5, l2_reg_dnn=0, init_std=0.0001, seed=1024, dnn_dropout=0, dnn_activation='relu',
task='binary', device='cpu'):
- super(FiBiNET, self).__init__(linear_feature_columns, dnn_feature_columns, embedding_size=embedding_size,
+ super(FiBiNET, self).__init__(linear_feature_columns, dnn_feature_columns,
dnn_hidden_units=dnn_hidden_units,
l2_reg_linear=l2_reg_linear,
l2_reg_embedding=l2_reg_embedding, l2_reg_dnn=l2_reg_dnn, init_std=init_std,
@@ -53,20 +50,22 @@ def __init__(self, linear_feature_columns, dnn_feature_columns, embedding_size=8
self.dnn_feature_columns = dnn_feature_columns
self.filed_size = len(self.embedding_dict)
self.SE = SENETLayer(self.filed_size, reduction_ratio, seed, device)
- self.Bilinear = BilinearInteraction(self.filed_size,embedding_size, bilinear_type, seed, device)
- self.dnn = DNN(self.compute_input_dim(dnn_feature_columns, embedding_size), dnn_hidden_units,
+ self.Bilinear = BilinearInteraction(self.filed_size, self.embedding_size, bilinear_type, seed, device)
+ self.dnn = DNN(self.compute_input_dim(dnn_feature_columns), dnn_hidden_units,
activation=dnn_activation, l2_reg=l2_reg_dnn, dropout_rate=dnn_dropout, use_bn=False,
init_std=init_std, device=device)
self.dnn_linear = nn.Linear(dnn_hidden_units[-1], 1, bias=False).to(device)
- def compute_input_dim(self, feature_columns, embedding_size, include_sparse=True, include_dense=True):
+ def compute_input_dim(self, feature_columns, include_sparse=True, include_dense=True):
sparse_feature_columns = list(
- filter(lambda x: isinstance(x, (SparseFeat,VarLenSparseFeat)), feature_columns)) if len(feature_columns) else []
+ filter(lambda x: isinstance(x, (SparseFeat, VarLenSparseFeat)), feature_columns)) if len(
+ feature_columns) else []
dense_feature_columns = list(
filter(lambda x: isinstance(x, DenseFeat), feature_columns)) if len(feature_columns) else []
field_size = len(sparse_feature_columns)
dense_input_dim = sum(map(lambda x: x.dimension, dense_feature_columns))
+ embedding_size = sparse_feature_columns[0].embedding_dim
sparse_input_dim = field_size * (field_size - 1) * embedding_size
input_dim = 0
@@ -87,7 +86,7 @@ def forward(self, X):
bilinear_out = self.Bilinear(sparse_embedding_input)
linear_logit = self.linear_model(X)
- temp = torch.split(torch.cat((senet_bilinear_out,bilinear_out), dim = 1), 1, dim = 1)
+ temp = torch.split(torch.cat((senet_bilinear_out, bilinear_out), dim=1), 1, dim=1)
dnn_input = combined_dnn_input(temp, dense_value_list)
dnn_output = self.dnn(dnn_input)
dnn_logit = self.dnn_linear(dnn_output)
@@ -104,4 +103,3 @@ def forward(self, X):
y_pred = self.out(final_logit)
return y_pred
-
diff --git a/deepctr_torch/models/mlr.py b/deepctr_torch/models/mlr.py
index 615c24db..8a49bce4 100644
--- a/deepctr_torch/models/mlr.py
+++ b/deepctr_torch/models/mlr.py
@@ -9,14 +9,12 @@
import torch
import torch.nn as nn
-
from .basemodel import Linear, BaseModel
from ..inputs import build_input_features
from ..layers import PredictionLayer
class MLR(BaseModel):
-
"""Instantiates the Mixed Logistic Regression/Piece-wise Linear Model.
:param region_feature_columns: An iterable containing all the features used by region part of the model.
@@ -61,7 +59,8 @@ def __init__(self, region_feature_columns, base_feature_columns=None, bias_featu
self.region_feature_columns + self.base_feature_columns + self.bias_feature_columns)
self.region_linear_model = nn.ModuleList([Linear(
- self.region_feature_columns, self.feature_index, self.init_std, self.device) for i in range(self.region_num)])
+ self.region_feature_columns, self.feature_index, self.init_std, self.device) for i in
+ range(self.region_num)])
self.base_linear_model = nn.ModuleList([Linear(
self.base_feature_columns, self.feature_index, self.init_std, self.device) for i in range(self.region_num)])
@@ -93,9 +92,9 @@ def forward(self, X):
learner_score = self.get_learner_score(X, self.region_num)
final_logit = torch.sum(
- region_score*learner_score, dim=-1, keepdim=True)
+ region_score * learner_score, dim=-1, keepdim=True)
if self.bias_feature_columns is not None and len(self.bias_feature_columns) > 0:
bias_score = self.bias_model(X)
- final_logit = final_logit*bias_score
- return final_logit
\ No newline at end of file
+ final_logit = final_logit * bias_score
+ return final_logit
diff --git a/deepctr_torch/models/nfm.py b/deepctr_torch/models/nfm.py
index 11a364e1..c206c789 100644
--- a/deepctr_torch/models/nfm.py
+++ b/deepctr_torch/models/nfm.py
@@ -7,7 +7,6 @@
"""
import torch
import torch.nn as nn
-import torch.nn.functional as F
from .basemodel import BaseModel
from ..inputs import combined_dnn_input
@@ -19,7 +18,6 @@ class NFM(BaseModel):
:param linear_feature_columns: An iterable containing all the features used by linear part of the model.
:param dnn_feature_columns: An iterable containing all the features used by deep part of the model.
- :param embedding_size: positive integer,sparse feature embedding_size
:param dnn_hidden_units: list,list of positive integer or empty list, the layer number and units in each layer of deep net
:param l2_reg_embedding: float. L2 regularizer strength applied to embedding vector
:param l2_reg_linear: float. L2 regularizer strength applied to linear part.
@@ -36,10 +34,10 @@ class NFM(BaseModel):
"""
def __init__(self,
- linear_feature_columns, dnn_feature_columns, embedding_size=8, dnn_hidden_units=(128, 128),
+ linear_feature_columns, dnn_feature_columns, dnn_hidden_units=(128, 128),
l2_reg_embedding=1e-5, l2_reg_linear=1e-5, l2_reg_dnn=0, init_std=0.0001, seed=1024, bi_dropout=0,
dnn_dropout=0, dnn_activation='relu', task='binary', device='cpu'):
- super(NFM, self).__init__(linear_feature_columns, dnn_feature_columns, embedding_size=embedding_size,
+ super(NFM, self).__init__(linear_feature_columns, dnn_feature_columns,
dnn_hidden_units=dnn_hidden_units,
l2_reg_linear=l2_reg_linear,
l2_reg_embedding=l2_reg_embedding, l2_reg_dnn=l2_reg_dnn, init_std=init_std,
@@ -47,7 +45,7 @@ def __init__(self,
dnn_dropout=dnn_dropout, dnn_activation=dnn_activation,
task=task, device=device)
- self.dnn = DNN(self.compute_input_dim(dnn_feature_columns, embedding_size, include_sparse=False) + embedding_size,
+ self.dnn = DNN(self.compute_input_dim(dnn_feature_columns, include_sparse=False) + self.embedding_size,
dnn_hidden_units,
activation=dnn_activation, l2_reg=l2_reg_dnn, dropout_rate=dnn_dropout, use_bn=False,
init_std=init_std, device=device)
@@ -80,4 +78,4 @@ def forward(self, X):
y_pred = self.out(logit)
- return y_pred
\ No newline at end of file
+ return y_pred
diff --git a/deepctr_torch/models/onn.py b/deepctr_torch/models/onn.py
index d8235edd..df7fb8d5 100644
--- a/deepctr_torch/models/onn.py
+++ b/deepctr_torch/models/onn.py
@@ -33,12 +33,12 @@ def forward(self, first, second):
y = first_emb * second_emb # core code
return y
+
class ONN(BaseModel):
"""Instantiates the Operation-aware Neural Networks architecture.
:param linear_feature_columns: An iterable containing all the features used by linear part of the model.
:param dnn_feature_columns: An iterable containing all the features used by deep part of the model.
- :param embedding_size: positive integer,sparse feature embedding_size
:param dnn_hidden_units: list,list of positive integer or empty list, the layer number and units in each layer of deep net
:param l2_reg_embedding: float. L2 regularizer strength applied to embedding vector
:param l2_reg_linear: float. L2 regularizer strength applied to linear part.
@@ -53,12 +53,13 @@ class ONN(BaseModel):
:return: A PyTorch model instance.
"""
- def __init__(self, linear_feature_columns, dnn_feature_columns, embedding_size=4,
+
+ def __init__(self, linear_feature_columns, dnn_feature_columns,
dnn_hidden_units=(128, 128),
l2_reg_embedding=1e-5, l2_reg_linear=1e-5, l2_reg_dnn=0,
dnn_dropout=0, init_std=0.0001, seed=1024, dnn_use_bn=False, dnn_activation='relu',
task='binary', device='cpu'):
- super(ONN, self).__init__(linear_feature_columns, dnn_feature_columns, embedding_size=embedding_size,
+ super(ONN, self).__init__(linear_feature_columns, dnn_feature_columns,
dnn_hidden_units=dnn_hidden_units,
l2_reg_linear=l2_reg_linear,
l2_reg_embedding=l2_reg_embedding, l2_reg_dnn=l2_reg_dnn, init_std=init_std,
@@ -67,6 +68,7 @@ def __init__(self, linear_feature_columns, dnn_feature_columns, embedding_size=4
task=task, device=device)
# second order part
+ embedding_size = self.embedding_size
self.second_order_embedding_dict = self.__create_second_order_embedding_matrix(
dnn_feature_columns, embedding_size=embedding_size, sparse=False).to(device)
@@ -114,9 +116,9 @@ def __input_from_second_order_column(self, X, feature_columns, second_order_embe
second_order_embedding_list.append(
second_order_embedding_dict[first_name + "+" + second_name](
X[:, self.feature_index[first_name][0]
- :self.feature_index[first_name][1]].long(),
+ :self.feature_index[first_name][1]].long(),
X[:, self.feature_index[second_name][0]
- :self.feature_index[second_name][1]].long()
+ :self.feature_index[second_name][1]].long()
)
)
return second_order_embedding_list
@@ -130,8 +132,9 @@ def __create_second_order_embedding_matrix(self, feature_columns, embedding_size
for second_index in range(first_index + 1, len(sparse_feature_columns)):
first_name = sparse_feature_columns[first_index].embedding_name
second_name = sparse_feature_columns[second_index].embedding_name
- temp_dict[first_name + "+" + second_name] = Interac(sparse_feature_columns[first_index].dimension,
- sparse_feature_columns[second_index].dimension,
+ temp_dict[first_name + "+" + second_name] = Interac(sparse_feature_columns[first_index].vocabulary_size,
+ sparse_feature_columns[
+ second_index].vocabulary_size,
emb_size=embedding_size,
init_std=init_std,
sparse=sparse)
@@ -157,5 +160,3 @@ def forward(self, X):
y_pred = self.out(final_logit)
return y_pred
-
-
diff --git a/deepctr_torch/models/pnn.py b/deepctr_torch/models/pnn.py
index d1c7fa7d..2efc04c1 100644
--- a/deepctr_torch/models/pnn.py
+++ b/deepctr_torch/models/pnn.py
@@ -8,7 +8,6 @@
import torch
import torch.nn as nn
-import torch.nn.functional as F
from .basemodel import BaseModel
from ..inputs import combined_dnn_input
@@ -19,7 +18,6 @@ class PNN(BaseModel):
"""Instantiates the Product-based Neural Network architecture.
:param dnn_feature_columns: An iterable containing all the features used by deep part of the model.
- :param embedding_size: positive integer,sparse feature embedding_size
:param dnn_hidden_units: list,list of positive integer or empty list, the layer number and units in each layer of deep net
:param l2_reg_embedding: float . L2 regularizer strength applied to embedding vector
:param l2_reg_dnn: float. L2 regularizer strength applied to DNN
@@ -36,11 +34,11 @@ class PNN(BaseModel):
"""
- def __init__(self, dnn_feature_columns, embedding_size=8, dnn_hidden_units=(128, 128), l2_reg_embedding=1e-5, l2_reg_dnn=0,
+ def __init__(self, dnn_feature_columns, dnn_hidden_units=(128, 128), l2_reg_embedding=1e-5, l2_reg_dnn=0,
init_std=0.0001, seed=1024, dnn_dropout=0, dnn_activation='relu', use_inner=True, use_outter=False,
- kernel_type='mat', task='binary', device='cpu',):
+ kernel_type='mat', task='binary', device='cpu', ):
- super(PNN, self).__init__([], dnn_feature_columns, embedding_size=embedding_size,
+ super(PNN, self).__init__([], dnn_feature_columns,
dnn_hidden_units=dnn_hidden_units,
l2_reg_embedding=l2_reg_embedding, l2_reg_dnn=l2_reg_dnn,
l2_reg_linear=0, init_std=init_std, seed=seed,
@@ -56,8 +54,7 @@ def __init__(self, dnn_feature_columns, embedding_size=8, dnn_hidden_units=(128,
self.task = task
product_out_dim = 0
- num_inputs = self.compute_input_dim(dnn_feature_columns, embedding_size, include_dense=False,
- feature_group=True)
+ num_inputs = self.compute_input_dim(dnn_feature_columns, include_dense=False, feature_group=True)
num_pairs = int(num_inputs * (num_inputs - 1) / 2)
if self.use_inner:
@@ -67,9 +64,9 @@ def __init__(self, dnn_feature_columns, embedding_size=8, dnn_hidden_units=(128,
if self.use_outter:
product_out_dim += num_pairs
self.outterproduct = OutterProductLayer(
- num_inputs, embedding_size, kernel_type=kernel_type, device=device)
+ num_inputs, self.embedding_size, kernel_type=kernel_type, device=device)
- self.dnn = DNN(product_out_dim + self.compute_input_dim(dnn_feature_columns, embedding_size), dnn_hidden_units,
+ self.dnn = DNN(product_out_dim + self.compute_input_dim(dnn_feature_columns), dnn_hidden_units,
activation=dnn_activation, l2_reg=l2_reg_dnn, dropout_rate=dnn_dropout, use_bn=False,
init_std=init_std, device=device)
@@ -112,4 +109,4 @@ def forward(self, X):
y_pred = self.out(logit)
- return y_pred
\ No newline at end of file
+ return y_pred
diff --git a/deepctr_torch/models/wdl.py b/deepctr_torch/models/wdl.py
index d4300e46..4d2f840a 100644
--- a/deepctr_torch/models/wdl.py
+++ b/deepctr_torch/models/wdl.py
@@ -7,18 +7,17 @@
"""
import torch.nn as nn
-import torch.nn.functional as F
from .basemodel import BaseModel
from ..inputs import combined_dnn_input
from ..layers import DNN
+
class WDL(BaseModel):
"""Instantiates the Wide&Deep Learning architecture.
:param linear_feature_columns: An iterable containing all the features used by linear part of the model.
:param dnn_feature_columns: An iterable containing all the features used by deep part of the model.
- :param embedding_size: positive integer,sparse feature embedding_size
:param dnn_hidden_units: list,list of positive integer or empty list, the layer number and units in each layer of DNN
:param l2_reg_linear: float. L2 regularizer strength applied to wide part
:param l2_reg_embedding: float. L2 regularizer strength applied to embedding vector
@@ -32,13 +31,15 @@ class WDL(BaseModel):
:return: A PyTorch model instance.
"""
+
def __init__(self,
- linear_feature_columns, dnn_feature_columns, embedding_size=8, dnn_hidden_units=(256, 128),
+ linear_feature_columns, dnn_feature_columns, dnn_hidden_units=(256, 128),
l2_reg_linear=1e-5,
- l2_reg_embedding=1e-5, l2_reg_dnn=0, init_std=0.0001, seed=1024, dnn_dropout=0, dnn_activation='relu', dnn_use_bn=False,
+ l2_reg_embedding=1e-5, l2_reg_dnn=0, init_std=0.0001, seed=1024, dnn_dropout=0, dnn_activation='relu',
+ dnn_use_bn=False,
task='binary', device='cpu'):
- super(WDL, self).__init__(linear_feature_columns, dnn_feature_columns, embedding_size=embedding_size,
+ super(WDL, self).__init__(linear_feature_columns, dnn_feature_columns,
dnn_hidden_units=dnn_hidden_units,
l2_reg_linear=l2_reg_linear,
l2_reg_embedding=l2_reg_embedding, l2_reg_dnn=l2_reg_dnn, init_std=init_std,
@@ -49,8 +50,9 @@ def __init__(self,
self.use_dnn = len(dnn_feature_columns) > 0 and len(
dnn_hidden_units) > 0
if self.use_dnn:
- self.dnn = DNN(self.compute_input_dim(dnn_feature_columns, embedding_size), dnn_hidden_units,
- activation=dnn_activation, l2_reg=l2_reg_dnn, dropout_rate=dnn_dropout, use_bn=dnn_use_bn, init_std=init_std, device=device)
+ self.dnn = DNN(self.compute_input_dim(dnn_feature_columns), dnn_hidden_units,
+ activation=dnn_activation, l2_reg=l2_reg_dnn, dropout_rate=dnn_dropout, use_bn=dnn_use_bn,
+ init_std=init_std, device=device)
self.dnn_linear = nn.Linear(dnn_hidden_units[-1], 1, bias=False).to(device)
self.add_regularization_loss(
filter(lambda x: 'weight' in x[0] and 'bn' not in x[0], self.dnn.named_parameters()), l2_reg_dnn)
diff --git a/deepctr_torch/models/xdeepfm.py b/deepctr_torch/models/xdeepfm.py
index 3417ddb8..de7f7230 100644
--- a/deepctr_torch/models/xdeepfm.py
+++ b/deepctr_torch/models/xdeepfm.py
@@ -8,18 +8,17 @@
import torch
import torch.nn as nn
-import torch.nn.functional as F
from .basemodel import BaseModel
from ..inputs import combined_dnn_input
from ..layers import DNN, CIN
+
class xDeepFM(BaseModel):
"""Instantiates the xDeepFM architecture.
:param linear_feature_columns: An iterable containing all the features used by linear part of the model.
:param dnn_feature_columns: An iterable containing all the features used by deep part of the model.
- :param embedding_size: positive integer,sparse feature embedding_size
:param dnn_hidden_units: list,list of positive integer or empty list, the layer number and units in each layer of deep net
:param cin_layer_size: list,list of positive integer or empty list, the feature maps in each hidden layer of Compressed Interaction Network
:param cin_split_half: bool.if set to True, half of the feature maps in each hidden will connect to output unit
@@ -39,12 +38,12 @@ class xDeepFM(BaseModel):
"""
- def __init__(self, linear_feature_columns, dnn_feature_columns, embedding_size=8, dnn_hidden_units=(256, 256),
+ def __init__(self, linear_feature_columns, dnn_feature_columns, dnn_hidden_units=(256, 256),
cin_layer_size=(256, 128,), cin_split_half=True, cin_activation='relu', l2_reg_linear=0.00001,
l2_reg_embedding=0.00001, l2_reg_dnn=0, l2_reg_cin=0, init_std=0.0001, seed=1024, dnn_dropout=0,
dnn_activation='relu', dnn_use_bn=False, task='binary', device='cpu'):
- super(xDeepFM, self).__init__(linear_feature_columns, dnn_feature_columns, embedding_size=embedding_size,
+ super(xDeepFM, self).__init__(linear_feature_columns, dnn_feature_columns,
dnn_hidden_units=dnn_hidden_units,
l2_reg_linear=l2_reg_linear,
l2_reg_embedding=l2_reg_embedding, l2_reg_dnn=l2_reg_dnn, init_std=init_std,
@@ -54,7 +53,7 @@ def __init__(self, linear_feature_columns, dnn_feature_columns, embedding_size=8
self.dnn_hidden_units = dnn_hidden_units
self.use_dnn = len(dnn_feature_columns) > 0 and len(dnn_hidden_units) > 0
if self.use_dnn:
- self.dnn = DNN(self.compute_input_dim(dnn_feature_columns, embedding_size), dnn_hidden_units,
+ self.dnn = DNN(self.compute_input_dim(dnn_feature_columns), dnn_hidden_units,
activation=dnn_activation, l2_reg=l2_reg_dnn, dropout_rate=dnn_dropout, use_bn=dnn_use_bn,
init_std=init_std, device=device)
self.dnn_linear = nn.Linear(dnn_hidden_units[-1], 1, bias=False).to(device)
@@ -73,10 +72,10 @@ def __init__(self, linear_feature_columns, dnn_feature_columns, embedding_size=8
else:
self.featuremap_num = sum(cin_layer_size)
self.cin = CIN(field_num, cin_layer_size,
- cin_activation, cin_split_half, l2_reg_cin, seed,device=device)
+ cin_activation, cin_split_half, l2_reg_cin, seed, device=device)
self.cin_linear = nn.Linear(self.featuremap_num, 1, bias=False).to(device)
self.add_regularization_loss(
- filter(lambda x: 'weight' in x[0], self.cin.named_parameters()), l2_reg_cin)
+ filter(lambda x: 'weight' in x[0], self.cin.named_parameters()), l2_reg_cin)
self.to(device)
diff --git a/deepctr_torch/utils.py b/deepctr_torch/utils.py
index d837afd3..4caf6547 100644
--- a/deepctr_torch/utils.py
+++ b/deepctr_torch/utils.py
@@ -37,8 +37,8 @@ def check(version):
logging.warning(
'\nDeepCTR-PyTorch version {0} detected. Your version is {1}.\nUse `pip install -U deepctr-torch` to upgrade.Changelog: https://github.com/shenweichen/DeepCTR-Torch/releases/tag/v{0}'.format(
latest_version, version))
- except Exception as e:
- #print(e)
+ except :
+ print("Please check the latest version manually on https://pypi.org/project/deepctr-torch/#history")
return
Thread(target=check, args=(version,)).start()
diff --git a/docs/source/Examples.md b/docs/source/Examples.md
index 2c3fbe51..d3a73acd 100644
--- a/docs/source/Examples.md
+++ b/docs/source/Examples.md
@@ -10,28 +10,21 @@ click-through rate. It has 13 integer features and
![image](../pics/criteo_sample.png)
In this example,we simply normailize the dense feature between 0 and 1,you
-can try other transformation technique like log normalization or discretization.Then we use `SparseFeat` and `DenseFeat` to generate feature columns for sparse features and dense features.
+can try other transformation technique like log normalization or discretization.Then we use [SparseFeat](./Features.html#sparsefeat) and [DenseFeat](./Features.html#densefeat) to generate feature columns for sparse features and dense features.
-``SparseFeat`` is a namedtuple with signature ``SparseFeat(name, dimension, use_hash, dtype, embedding_name,embedding)``
-
-- name : feature name
-- dimension : number of unique feature values for sprase feature,hashing space when hash_flag=True, any value for dense feature.
-- use_hash : defualt `False`.If `True` the input will be hashed to space of size `dimension`.
-- dtype : default `float32`.dtype of input tensor.
-- embedding_name : default `None`. If None, the `embedding_name` will be same as `name`.
-- embedding : default `True`.If `False`, the feature will not be embeded to a dense vector.
This example shows how to use ``DeepFM`` to solve a simple binary classification task. You can get the demo data [criteo_sample.txt](https://github.com/shenweichen/DeepCTR-Torch/tree/master/examples/criteo_sample.txt)
and run the following codes.
```python
import pandas as pd
+import torch
from sklearn.metrics import log_loss, roc_auc_score
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder, MinMaxScaler
-from deepctr_torch.models import *
+
from deepctr_torch.inputs import SparseFeat, DenseFeat, get_feature_names
-import torch
+from deepctr_torch.models import *
if __name__ == "__main__":
data = pd.read_csv('./criteo_sample.txt')
@@ -53,7 +46,7 @@ if __name__ == "__main__":
# 2.count #unique features for each sparse field,and record dense feature field name
fixlen_feature_columns = [SparseFeat(feat, data[feat].nunique())
- for feat in sparse_features] + [DenseFeat(feat, 1,)
+ for feat in sparse_features] + [DenseFeat(feat, 1, )
for feat in dense_features]
dnn_feature_columns = fixlen_feature_columns
@@ -65,8 +58,9 @@ if __name__ == "__main__":
# 3.generate input data for model
train, test = train_test_split(data, test_size=0.2)
- train_model_input = {name:train[name] for name in feature_names}
- test_model_input = {name:test[name] for name in feature_names}
+
+ train_model_input = {name: train[name] for name in feature_names}
+ test_model_input = {name: test[name] for name in feature_names}
# 4.Define Model,train,predict and evaluate
@@ -76,13 +70,14 @@ if __name__ == "__main__":
print('cuda ready...')
device = 'cuda:0'
- model = DeepFM(linear_feature_columns=linear_feature_columns, dnn_feature_columns=dnn_feature_columns, task='binary',
+ model = DeepFM(linear_feature_columns=linear_feature_columns, dnn_feature_columns=dnn_feature_columns,
+ task='binary',
l2_reg_embedding=1e-5, device=device)
model.compile("adagrad", "binary_crossentropy",
- metrics=["binary_crossentropy", "auc"],)
+ metrics=["binary_crossentropy", "auc"], )
model.fit(train_model_input, train[target].values,
- batch_size=256, epochs=10, validation_split=0.2, verbose=2)
+ batch_size=32, epochs=10, validation_split=0.0, verbose=2)
pred_ans = model.predict(test_model_input, 256)
print("")
@@ -110,8 +105,8 @@ from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
+from deepctr_torch.inputs import SparseFeat, get_feature_names
from deepctr_torch.models import DeepFM
-from deepctr_torch.inputs import SparseFeat,get_feature_names
if __name__ == "__main__":
@@ -133,8 +128,8 @@ if __name__ == "__main__":
# 3.generate input data for model
train, test = train_test_split(data, test_size=0.2)
- train_model_input = {name:train[name] for name in feature_names}
- test_model_input = {name:test[name] for name in feature_names}
+ train_model_input = {name: train[name] for name in feature_names}
+ test_model_input = {name: test[name] for name in feature_names}
# 4.Define Model,train,predict and evaluate
device = 'cpu'
@@ -143,8 +138,8 @@ if __name__ == "__main__":
print('cuda ready...')
device = 'cuda:0'
- model = DeepFM(linear_feature_columns, dnn_feature_columns, task='regression',device=device)
- model.compile("adam", "mse", metrics=['mse'],)
+ model = DeepFM(linear_feature_columns, dnn_feature_columns, task='regression', device=device)
+ model.compile("adam", "mse", metrics=['mse'], )
history = model.fit(train_model_input, train[target].values,
batch_size=256, epochs=10, verbose=2, validation_split=0.2, )
@@ -152,6 +147,7 @@ if __name__ == "__main__":
print("test MSE", round(mean_squared_error(
test[target].values, pred_ans), 4))
+
```
## Multi-value Input : Movielens
@@ -166,18 +162,7 @@ Here is a small fraction of data include sparse fields and a multivalent field.
There are 2 additional steps to use DeepCTR with sequence feature input.
1. Generate the paded and encoded sequence feature of sequence input feature(**value 0 is for padding**).
-2. Generate config of sequence feature with `deepctr_torch.inputs.VarLenSparseFeat`
-
-``VarLenSparseFeat`` is a namedtuple with signature ``VarLenSparseFeat(name, dimension, maxlen, combiner, use_hash, dtype, embedding_name,embedding)``
-
-- name : feature name,if it is already used in sparse_feature_dim,then a shared embedding mechanism will be used.
-- dimension : number of unique feature values
-- maxlen : maximum length of this feature for all samples
-- combiner : pooling method,can be ``sum``,``mean`` or ``max``
-- use_hash : defualt `False`.if `True` the input will be hashed to space of size `dimension`.
-- dtype : default `float32`.dtype of input tensor.
-- embedding_name : default `None`. If None, the embedding_name` will be same as `name`.
-- embedding : default `True`.If `False`, the feature will not be embeded to a dense vector.
+2. Generate config of sequence feature with [VarLenSparseFeat](./Features.html#varlensparsefeat)
This example shows how to use ``DeepFM`` with sequence(multi-value) feature. You can get the demo data
@@ -186,11 +171,12 @@ This example shows how to use ``DeepFM`` with sequence(multi-value) feature. You
```python
import numpy as np
import pandas as pd
+import torch
from sklearn.preprocessing import LabelEncoder
from tensorflow.python.keras.preprocessing.sequence import pad_sequences
+from deepctr_torch.inputs import SparseFeat, VarLenSparseFeat, get_feature_names
from deepctr_torch.models import DeepFM
-from deepctr_torch.inputs import SparseFeat, VarLenSparseFeat,get_feature_names,get_varlen_feature_names
def split(x):
@@ -202,46 +188,56 @@ def split(x):
return list(map(lambda x: key2index[x], key_ans))
-data = pd.read_csv("./movielens_sample.txt")
-sparse_features = ["movie_id", "user_id",
- "gender", "age", "occupation", "zip", ]
-target = ['rating']
+if __name__ == "__main__":
+ data = pd.read_csv("./movielens_sample.txt")
+ sparse_features = ["movie_id", "user_id",
+ "gender", "age", "occupation", "zip", ]
+ target = ['rating']
+
+ # 1.Label Encoding for sparse features,and process sequence features
+ for feat in sparse_features:
+ lbe = LabelEncoder()
+ data[feat] = lbe.fit_transform(data[feat])
+ # preprocess the sequence feature
+
+ key2index = {}
+ genres_list = list(map(split, data['genres'].values))
+ genres_length = np.array(list(map(len, genres_list)))
+ max_len = max(genres_length)
+ # Notice : padding=`post`
+ genres_list = pad_sequences(genres_list, maxlen=max_len, padding='post', )
-# 1.Label Encoding for sparse features,and process sequence features
-for feat in sparse_features:
- lbe = LabelEncoder()
- data[feat] = lbe.fit_transform(data[feat])
-# preprocess the sequence feature
+ # 2.count #unique features for each sparse field and generate feature config for sequence feature
-key2index = {}
-genres_list = list(map(split, data['genres'].values))
-genres_length = np.array(list(map(len, genres_list)))
-max_len = max(genres_length)
-# Notice : padding=`post`
-genres_list = pad_sequences(genres_list, maxlen=max_len, padding='post', )
+ fixlen_feature_columns = [SparseFeat(feat, data[feat].nunique(), embedding_dim=4)
+ for feat in sparse_features]
-# 2.count #unique features for each sparse field and generate feature config for sequence feature
+ varlen_feature_columns = [VarLenSparseFeat(SparseFeat('genres', vocabulary_size=len(
+ key2index) + 1, embedding_dim=4), maxlen=max_len, combiner='mean',
+ weight_name=None)] # Notice : value 0 is for padding for sequence input feature
-fixlen_feature_columns = [SparseFeat(feat, data[feat].nunique())
- for feat in sparse_features]
-varlen_feature_columns = [VarLenSparseFeat('genres', len(
- key2index) + 1, max_len, 'mean')] # Notice : value 0 is for padding for sequence input feature
+ linear_feature_columns = fixlen_feature_columns + varlen_feature_columns
+ dnn_feature_columns = fixlen_feature_columns + varlen_feature_columns
-linear_feature_columns = fixlen_feature_columns + varlen_feature_columns
-dnn_feature_columns = fixlen_feature_columns + varlen_feature_columns
-feature_names = get_feature_names(linear_feature_columns + dnn_feature_columns)
+ feature_names = get_feature_names(linear_feature_columns + dnn_feature_columns)
+ # 3.generate input data for model
+ model_input = {name: data[name] for name in sparse_features} #
+ model_input["genres"] = genres_list
-# 3.generate input data for model
-model_input = {name:data[name] for name in feature_names}
-model_input['genres'] = genres_list
+ # 4.Define Model,compile and train
+ device = 'cpu'
+ use_cuda = True
+ if use_cuda and torch.cuda.is_available():
+ print('cuda ready...')
+ device = 'cuda:0'
+
+ model = DeepFM(linear_feature_columns, dnn_feature_columns, task='regression', device=device)
-# 4.Define Model,compile and train
-model = DeepFM(linear_feature_columns,dnn_feature_columns,task='regression')
+ model.compile("adam", "mse", metrics=['mse'], )
+ history = model.fit(model_input, data[target].values,
+ batch_size=256, epochs=10, verbose=2, validation_split=0.2, )
-model.compile("adam", "mse", metrics=['mse'], )
-history = model.fit(model_input, data[target].values,
- batch_size=256, epochs=10, verbose=2, validation_split=0.2, )
```
\ No newline at end of file
diff --git a/docs/source/Features.md b/docs/source/Features.md
index c406f3f2..ba9782cd 100644
--- a/docs/source/Features.md
+++ b/docs/source/Features.md
@@ -21,6 +21,34 @@ DNN based CTR estimation models consists of the following 4 modules:
> High-order Extractor learns feature combination through complex neural network functions like MLP,Cross Net,etc.
+## Feature Columns
+### SparseFeat
+``SparseFeat`` is a namedtuple with signature ``SparseFeat(name, vocabulary_size, embedding_dim, use_hash, dtype,embedding_name, group_name)``
+
+- name : feature name
+- vocabulary_size : number of unique feature values for sprase feature or hashing space when `use_hash=True`
+- embedding_dim : embedding dimension
+- use_hash : defualt `False`.If `True` the input will be hashed to space of size `vocabulary_size`.
+- dtype : default `float32`.dtype of input tensor.
+- embedding_name : default `None`. If None, the embedding_name will be same as `name`.
+- group_name : feature group of this feature.
+
+### DenseFeat
+``DenseFeat`` is a namedtuple with signature ``DenseFeat(name, dimension, dtype)``
+
+- name : feature name
+- dimension : dimension of dense feature vector.
+- dtype : default `float32`.dtype of input tensor.
+
+### VarLenSparseFeat
+
+``VarLenSparseFeat`` is a namedtuple with signature ``VarLenSparseFeat(sparsefeat, maxlen, combiner, length_name)``
+
+- sparsefeat : a instance of `SparseFeat`
+- maxlen : maximum length of this feature for all samples
+- combiner : pooling method,can be ``sum``,``mean`` or ``max``
+- length_name : feature length name,if `None`, value 0 in feature is for padding.
+
## Models
diff --git a/docs/source/History.md b/docs/source/History.md
index 985975ee..a2227568 100644
--- a/docs/source/History.md
+++ b/docs/source/History.md
@@ -1,4 +1,5 @@
# History
+- 01/31/2020 : [v0.2.0](https://github.com/shenweichen/DeepCTR-Torch/releases/tag/v0.2.0) released.Refactor [feature columns](./Features.html#feature-columns).Support to use double precision in metric calculation.
- 10/03/2019 : [v0.1.3](https://github.com/shenweichen/DeepCTR-Torch/releases/tag/v0.1.3) released.Simplify the input logic.
- 09/28/2019 : [v0.1.2](https://github.com/shenweichen/DeepCTR-Torch/releases/tag/v0.1.2) released.Add [sequence(multi-value) input support](./Examples.html#multi-value-input-movielens).
- 09/24/2019 : [v0.1.1](https://github.com/shenweichen/DeepCTR-Torch/releases/tag/v0.1.1) released. Add [CCPM](./Features.html#ccpm-convolutional-click-prediction-model).
diff --git a/docs/source/Quick-Start.md b/docs/source/Quick-Start.md
index d59ae493..a29ca510 100644
--- a/docs/source/Quick-Start.md
+++ b/docs/source/Quick-Start.md
@@ -14,18 +14,20 @@ $ pip install -U deepctr-torch
```python
import pandas as pd
-from sklearn.preprocessing import LabelEncoder, MinMaxScaler
+import torch
+from sklearn.metrics import log_loss, roc_auc_score
from sklearn.model_selection import train_test_split
-from deepctr_torch.models import DeepFM
-from deepctr_torch.inputs import SparseFeat, DenseFeat,get_feature_names
+from sklearn.preprocessing import LabelEncoder, MinMaxScaler
+
+from deepctr_torch.inputs import SparseFeat, DenseFeat, get_feature_names
data = pd.read_csv('./criteo_sample.txt')
sparse_features = ['C' + str(i) for i in range(1, 27)]
-dense_features = ['I'+str(i) for i in range(1, 14)]
+dense_features = ['I' + str(i) for i in range(1, 14)]
data[sparse_features] = data[sparse_features].fillna('-1', )
-data[dense_features] = data[dense_features].fillna(0,)
+data[dense_features] = data[dense_features].fillna(0, )
target = ['label']
```
@@ -59,16 +61,15 @@ For dense numerical features, we concatenate them to the input tensors of fully
- Label Encoding
```python
-sparse_feature_columns = [SparseFeat(feat, data[feat].nunique())
- for feat in sparse_features]
-dense_feature_columns = [DenseFeat(feat, 1)
+fixlen_feature_columns = [SparseFeat(feat, vocabulary_size=data[feat].nunique(),embedding_dim=4)
+ for i,feat in enumerate(sparse_features)] + [DenseFeat(feat, 1,)
for feat in dense_features]
```
- Feature Hashing on the fly【currently not supported】
```python
-sparse_feature_columns = [SparseFeat(feat, dimension=1e6,use_hash=True) for feat in sparse_features]#The dimension can be set according to data
-dense_feature_columns = [DenseFeat(feat, 1)
- for feat in dense_features]
+fixlen_feature_columns = [SparseFeat(feat, vocabulary_size=1e6,embedding_dim=4, use_hash=True, dtype='string') # since the input is string
+ for feat in sparse_features] + [DenseFeat(feat, 1, )
+ for feat in dense_features]
```
- generate feature columns
```python
diff --git a/docs/source/conf.py b/docs/source/conf.py
index 27595820..276439b6 100644
--- a/docs/source/conf.py
+++ b/docs/source/conf.py
@@ -26,7 +26,7 @@
# The short X.Y version
version = ''
# The full version, including alpha/beta/rc tags
-release = '0.1.3'
+release = '0.2.0'
# -- General configuration ---------------------------------------------------
diff --git a/docs/source/index.rst b/docs/source/index.rst
index c3e70528..c1b75460 100644
--- a/docs/source/index.rst
+++ b/docs/source/index.rst
@@ -34,12 +34,12 @@ You can read the latest code at https://github.com/shenweichen/DeepCTR-Torch and
News
-----
+01/31/2020 : Refactor `feature columns <./Features.html#feature-columns>`_ . Support double precision in metric calculation . `Changelog `_
+
10/03/2019 : Simplify the input logic(`examples <./Examples.html#classification-criteo>`_). `Changelog `_
09/28/2019 : Add `sequence(multi-value) input support <./Examples.html#multi-value-input-movielens>`_ . `Changelog `_
-09/24/2019 : Add `CCPM <./Features.html#ccpm-convolutional-click-prediction-model>`_ . `Changelog `_
-
DisscussionGroup
-----------------------
diff --git a/examples/run_classification_criteo.py b/examples/run_classification_criteo.py
index 4e5915a8..dc7185de 100644
--- a/examples/run_classification_criteo.py
+++ b/examples/run_classification_criteo.py
@@ -1,12 +1,12 @@
# -*- coding: utf-8 -*-
import pandas as pd
+import torch
from sklearn.metrics import log_loss, roc_auc_score
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder, MinMaxScaler
-from deepctr_torch.models import *
-from deepctr_torch.inputs import SparseFeat, DenseFeat, get_feature_names
-import torch
+from deepctr_torch.inputs import SparseFeat, DenseFeat, get_feature_names
+from deepctr_torch.models import *
if __name__ == "__main__":
data = pd.read_csv('./criteo_sample.txt')
@@ -28,7 +28,7 @@
# 2.count #unique features for each sparse field,and record dense feature field name
fixlen_feature_columns = [SparseFeat(feat, data[feat].nunique())
- for feat in sparse_features] + [DenseFeat(feat, 1,)
+ for feat in sparse_features] + [DenseFeat(feat, 1, )
for feat in dense_features]
dnn_feature_columns = fixlen_feature_columns
@@ -41,8 +41,8 @@
train, test = train_test_split(data, test_size=0.2)
- train_model_input = {name:train[name] for name in feature_names}
- test_model_input = {name:test[name] for name in feature_names}
+ train_model_input = {name: train[name] for name in feature_names}
+ test_model_input = {name: test[name] for name in feature_names}
# 4.Define Model,train,predict and evaluate
@@ -52,15 +52,16 @@
print('cuda ready...')
device = 'cuda:0'
- model = DeepFM(linear_feature_columns=linear_feature_columns, dnn_feature_columns=dnn_feature_columns, task='binary',
+ model = DeepFM(linear_feature_columns=linear_feature_columns, dnn_feature_columns=dnn_feature_columns,
+ task='binary',
l2_reg_embedding=1e-5, device=device)
model.compile("adagrad", "binary_crossentropy",
- metrics=["binary_crossentropy", "auc"],)
+ metrics=["binary_crossentropy", "auc"], )
model.fit(train_model_input, train[target].values,
batch_size=32, epochs=10, validation_split=0.0, verbose=2)
pred_ans = model.predict(test_model_input, 256)
print("")
print("test LogLoss", round(log_loss(test[target].values, pred_ans), 4))
- print("test AUC", round(roc_auc_score(test[target].values, pred_ans), 4))
\ No newline at end of file
+ print("test AUC", round(roc_auc_score(test[target].values, pred_ans), 4))
diff --git a/examples/run_multivalue_movielens.py b/examples/run_multivalue_movielens.py
index 311e6012..d86252a7 100644
--- a/examples/run_multivalue_movielens.py
+++ b/examples/run_multivalue_movielens.py
@@ -1,10 +1,11 @@
import numpy as np
import pandas as pd
+import torch
from sklearn.preprocessing import LabelEncoder
from tensorflow.python.keras.preprocessing.sequence import pad_sequences
+from deepctr_torch.inputs import SparseFeat, VarLenSparseFeat, get_feature_names
from deepctr_torch.models import DeepFM
-from deepctr_torch.inputs import SparseFeat, VarLenSparseFeat,get_feature_names
def split(x):
@@ -15,8 +16,8 @@ def split(x):
key2index[key] = len(key2index) + 1
return list(map(lambda x: key2index[x], key_ans))
-if __name__ == "__main__":
+if __name__ == "__main__":
data = pd.read_csv("./movielens_sample.txt")
sparse_features = ["movie_id", "user_id",
"gender", "age", "occupation", "zip", ]
@@ -37,24 +38,32 @@ def split(x):
# 2.count #unique features for each sparse field and generate feature config for sequence feature
- fixlen_feature_columns = [SparseFeat(feat, data[feat].nunique())
- for feat in sparse_features]
- varlen_feature_columns = [VarLenSparseFeat('genres', len(
- key2index) + 1, max_len, 'mean')] # Notice : value 0 is for padding for sequence input feature
+ fixlen_feature_columns = [SparseFeat(feat, data[feat].nunique(), embedding_dim=4)
+ for feat in sparse_features]
+
+ varlen_feature_columns = [VarLenSparseFeat(SparseFeat('genres', vocabulary_size=len(
+ key2index) + 1, embedding_dim=4), maxlen=max_len, combiner='mean',
+ weight_name=None)] # Notice : value 0 is for padding for sequence input feature
linear_feature_columns = fixlen_feature_columns + varlen_feature_columns
dnn_feature_columns = fixlen_feature_columns + varlen_feature_columns
- feature_names = get_feature_names(linear_feature_columns + dnn_feature_columns)
+ feature_names = get_feature_names(linear_feature_columns + dnn_feature_columns)
# 3.generate input data for model
- model_input = {name:data[name] for name in feature_names}
- model_input['genres'] = genres_list
+ model_input = {name: data[name] for name in sparse_features} #
+ model_input["genres"] = genres_list
# 4.Define Model,compile and train
- model = DeepFM(linear_feature_columns,dnn_feature_columns,task='regression')
+ device = 'cpu'
+ use_cuda = True
+ if use_cuda and torch.cuda.is_available():
+ print('cuda ready...')
+ device = 'cuda:0'
+
+ model = DeepFM(linear_feature_columns, dnn_feature_columns, task='regression', device=device)
model.compile("adam", "mse", metrics=['mse'], )
history = model.fit(model_input, data[target].values,
- batch_size=256, epochs=10, verbose=2, validation_split=0.2, )
\ No newline at end of file
+ batch_size=256, epochs=10, verbose=2, validation_split=0.2, )
diff --git a/examples/run_regression_movielens.py b/examples/run_regression_movielens.py
index f88a144d..5c6862fd 100644
--- a/examples/run_regression_movielens.py
+++ b/examples/run_regression_movielens.py
@@ -4,8 +4,8 @@
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
+from deepctr_torch.inputs import SparseFeat, get_feature_names
from deepctr_torch.models import DeepFM
-from deepctr_torch.inputs import SparseFeat,get_feature_names
if __name__ == "__main__":
@@ -27,8 +27,8 @@
# 3.generate input data for model
train, test = train_test_split(data, test_size=0.2)
- train_model_input = {name:train[name] for name in feature_names}
- test_model_input = {name:test[name] for name in feature_names}
+ train_model_input = {name: train[name] for name in feature_names}
+ test_model_input = {name: test[name] for name in feature_names}
# 4.Define Model,train,predict and evaluate
device = 'cpu'
@@ -37,8 +37,8 @@
print('cuda ready...')
device = 'cuda:0'
- model = DeepFM(linear_feature_columns, dnn_feature_columns, task='regression',device=device)
- model.compile("adam", "mse", metrics=['mse'],)
+ model = DeepFM(linear_feature_columns, dnn_feature_columns, task='regression', device=device)
+ model.compile("adam", "mse", metrics=['mse'], )
history = model.fit(train_model_input, train[target].values,
batch_size=256, epochs=10, verbose=2, validation_split=0.2, )
diff --git a/setup.py b/setup.py
index b33e1fb0..377a6f78 100644
--- a/setup.py
+++ b/setup.py
@@ -9,7 +9,7 @@
setuptools.setup(
name="deepctr-torch",
- version="0.1.3",
+ version="0.2.0",
author="Weichen Shen",
author_email="wcshen1994@163.com",
description="Easy-to-use,Modular and Extendible package of deep learning based CTR(Click Through Rate) prediction models with PyTorch",
diff --git a/tests/layers/activation_test.py b/tests/layers/activation_test.py
deleted file mode 100644
index 9e14faee..00000000
--- a/tests/layers/activation_test.py
+++ /dev/null
@@ -1,3 +0,0 @@
-# -*- coding: utf-8 -*-
-from deepctr_torch.layers import activation
-
diff --git a/tests/models/AFM_test.py b/tests/models/AFM_test.py
index 52e36484..436cad6c 100644
--- a/tests/models/AFM_test.py
+++ b/tests/models/AFM_test.py
@@ -1,7 +1,9 @@
# -*- coding: utf-8 -*-
import pytest
+
from deepctr_torch.models import AFM
-from ..utils import get_test_data, SAMPLE_SIZE, check_model
+from ..utils import get_test_data, SAMPLE_SIZE, check_model, get_device
+
@pytest.mark.parametrize(
'use_attention, sparse_feature_num, dense_feature_num',
@@ -11,10 +13,10 @@ def test_AFM(use_attention, sparse_feature_num, dense_feature_num):
model_name = 'AFM'
sample_size = SAMPLE_SIZE
x, y, feature_columns = get_test_data(
- sample_size, sparse_feature_num, dense_feature_num)
+ sample_size, sparse_feature_num=sparse_feature_num, dense_feature_num=dense_feature_num)
- model = AFM(feature_columns, feature_columns,
- use_attention=use_attention, afm_dropout=0.5)
+ model = AFM(linear_feature_columns=feature_columns, dnn_feature_columns=feature_columns,
+ use_attention=use_attention, afm_dropout=0.5, device=get_device())
check_model(model, model_name, x, y)
diff --git a/tests/models/AutoInt_test.py b/tests/models/AutoInt_test.py
index a181fbe1..92a3aff8 100644
--- a/tests/models/AutoInt_test.py
+++ b/tests/models/AutoInt_test.py
@@ -1,25 +1,27 @@
# -*- coding: utf-8 -*-
import pytest
+
from deepctr_torch.models import AutoInt
-from ..utils import get_test_data, SAMPLE_SIZE, check_model
+from ..utils import get_test_data, SAMPLE_SIZE, check_model, get_device
@pytest.mark.parametrize(
'att_layer_num,dnn_hidden_units,sparse_feature_num',
- [(1, (4, ), 2), (0, (4,), 2), (2, (4, 4,), 2), (1, (), 1), (1, (4,), 1)]
+ [(1, (4,), 2), (0, (4,), 2), (2, (4, 4,), 2), (1, (), 1), (1, (4,), 1)]
)
def test_AutoInt(att_layer_num, dnn_hidden_units, sparse_feature_num):
# if version.parse(torch.__version__) >= version.parse("1.1.0") and len(dnn_hidden_units)==0:#todo check version
# return
model_name = "AutoInt"
sample_size = SAMPLE_SIZE
- x, y, feature_dim_dict = get_test_data(
- sample_size, sparse_feature_num, sparse_feature_num)
+ x, y, feature_columns = get_test_data(
+ sample_size, sparse_feature_num=sparse_feature_num, dense_feature_num=sparse_feature_num)
- model = AutoInt(feature_dim_dict, att_layer_num=att_layer_num,
- dnn_hidden_units=dnn_hidden_units, dnn_dropout=0.5, )
+ model = AutoInt(linear_feature_columns=feature_columns, dnn_feature_columns=feature_columns,
+ att_layer_num=att_layer_num,
+ dnn_hidden_units=dnn_hidden_units, dnn_dropout=0.5, device=get_device())
check_model(model, model_name, x, y)
if __name__ == "__main__":
- pass
\ No newline at end of file
+ pass
diff --git a/tests/models/CCPM_test.py b/tests/models/CCPM_test.py
index 99f0491a..fbcdcfa2 100644
--- a/tests/models/CCPM_test.py
+++ b/tests/models/CCPM_test.py
@@ -1,12 +1,12 @@
import pytest
from deepctr_torch.models import CCPM
-from tests.utils import check_model, get_test_data, SAMPLE_SIZE
+from tests.utils import check_model, get_test_data, SAMPLE_SIZE, get_device
@pytest.mark.parametrize(
'sparse_feature_num,dense_feature_num',
- [ (3, 0)
+ [(3, 0)
]
)
def test_CCPM(sparse_feature_num, dense_feature_num):
@@ -14,9 +14,9 @@ def test_CCPM(sparse_feature_num, dense_feature_num):
sample_size = SAMPLE_SIZE
x, y, feature_columns = get_test_data(
- sample_size, sparse_feature_num, dense_feature_num)
+ sample_size, sparse_feature_num=sparse_feature_num, dense_feature_num=dense_feature_num)
- model = CCPM(feature_columns,feature_columns, conv_kernel_width=(3, 2), conv_filters=(
+ model = CCPM(feature_columns, feature_columns, conv_kernel_width=(3, 2), conv_filters=(
2, 1), dnn_hidden_units=[32, ], dnn_dropout=0.5)
check_model(model, model_name, x, y)
@@ -27,15 +27,14 @@ def test_CCPM(sparse_feature_num, dense_feature_num):
]
)
def test_CCPM_without_seq(sparse_feature_num, dense_feature_num):
-
model_name = "CCPM"
sample_size = SAMPLE_SIZE
x, y, feature_columns = get_test_data(
- sample_size, sparse_feature_num, dense_feature_num, sequence_feature=())
+ sample_size, sparse_feature_num=sparse_feature_num, dense_feature_num=dense_feature_num, sequence_feature=())
- model = CCPM(feature_columns, feature_columns,conv_kernel_width=(3, 2), conv_filters=(
- 2, 1), dnn_hidden_units=[32, ], dnn_dropout=0.5)
+ model = CCPM(feature_columns, feature_columns, conv_kernel_width=(3, 2), conv_filters=(
+ 2, 1), dnn_hidden_units=[32, ], dnn_dropout=0.5, device=get_device())
check_model(model, model_name, x, y)
diff --git a/tests/models/DCN_test.py b/tests/models/DCN_test.py
index 725521b8..3600acee 100644
--- a/tests/models/DCN_test.py
+++ b/tests/models/DCN_test.py
@@ -1,7 +1,8 @@
# -*- coding: utf-8 -*-
import pytest
+
from deepctr_torch.models import DCN
-from ..utils import check_model, get_test_data, SAMPLE_SIZE
+from ..utils import check_model, get_test_data, SAMPLE_SIZE, get_device
@pytest.mark.parametrize(
@@ -14,12 +15,12 @@ def test_DCN(embedding_size, cross_num, hidden_size, sparse_feature_num):
sample_size = SAMPLE_SIZE
x, y, feature_columns = get_test_data(
- sample_size, sparse_feature_num, sparse_feature_num)
+ sample_size, sparse_feature_num=sparse_feature_num, dense_feature_num=sparse_feature_num)
- model = DCN(feature_columns, embedding_size=embedding_size,
- cross_num=cross_num, dnn_hidden_units=hidden_size, dnn_dropout=0.5)
+ model = DCN(linear_feature_columns=feature_columns, dnn_feature_columns=feature_columns,
+ cross_num=cross_num, dnn_hidden_units=hidden_size, dnn_dropout=0.5, device=get_device())
check_model(model, model_name, x, y)
if __name__ == "__main__":
- pass
\ No newline at end of file
+ pass
diff --git a/tests/models/DeepFM_test.py b/tests/models/DeepFM_test.py
index acfecfe0..2b070500 100644
--- a/tests/models/DeepFM_test.py
+++ b/tests/models/DeepFM_test.py
@@ -1,26 +1,27 @@
# -*- coding: utf-8 -*-
import pytest
+
from deepctr_torch.models import DeepFM
-from ..utils import get_test_data, SAMPLE_SIZE, check_model
+from ..utils import get_test_data, SAMPLE_SIZE, check_model, get_device
@pytest.mark.parametrize(
'use_fm,hidden_size,sparse_feature_num',
- [(True, (32, ), 3),
- (False, (32, ), 3),
- (False, (32, ), 2), (False, (32,), 1), (True, (), 1), (False, (), 2)
+ [(True, (32,), 3),
+ (False, (32,), 3),
+ (False, (32,), 2), (False, (32,), 1), (True, (), 1), (False, (), 2)
]
)
def test_DeepFM(use_fm, hidden_size, sparse_feature_num):
model_name = "DeepFM"
sample_size = SAMPLE_SIZE
x, y, feature_columns = get_test_data(
- sample_size, sparse_feature_num, sparse_feature_num)
+ sample_size, sparse_feature_num=sparse_feature_num, dense_feature_num=sparse_feature_num)
model = DeepFM(feature_columns, feature_columns, use_fm=use_fm,
- dnn_hidden_units=hidden_size, dnn_dropout=0.5)
+ dnn_hidden_units=hidden_size, dnn_dropout=0.5, device=get_device())
check_model(model, model_name, x, y)
if __name__ == "__main__":
- pass
\ No newline at end of file
+ pass
diff --git a/tests/models/FiBiNET_test.py b/tests/models/FiBiNET_test.py
index 35d94013..5f25bdf6 100644
--- a/tests/models/FiBiNET_test.py
+++ b/tests/models/FiBiNET_test.py
@@ -1,7 +1,8 @@
# -*- coding: utf-8 -*-
import pytest
+
from deepctr_torch.models import FiBiNET
-from ..utils import check_model, SAMPLE_SIZE, get_test_data
+from ..utils import check_model, SAMPLE_SIZE, get_test_data, get_device
@pytest.mark.parametrize(
@@ -12,10 +13,10 @@
def test_FiBiNET(bilinear_type):
model_name = "FiBiNET"
sample_size = SAMPLE_SIZE
- x, y, feature_columns = get_test_data(sample_size, 3, 3)
+ x, y, feature_columns = get_test_data(sample_size, sparse_feature_num=3, dense_feature_num=3)
model = FiBiNET(feature_columns, feature_columns,
- bilinear_type=bilinear_type, dnn_hidden_units=[8, 8], dnn_dropout=0.5,)
+ bilinear_type=bilinear_type, dnn_hidden_units=[8, 8], dnn_dropout=0.5, device=get_device())
check_model(model, model_name, x, y)
diff --git a/tests/models/MLR_test.py b/tests/models/MLR_test.py
index f1ec5a3b..c72aafe0 100644
--- a/tests/models/MLR_test.py
+++ b/tests/models/MLR_test.py
@@ -1,7 +1,8 @@
# -*- coding: utf-8 -*-
import pytest
+
from deepctr_torch.models import MLR
-from ..utils import check_model, SAMPLE_SIZE, get_test_data
+from ..utils import check_model, SAMPLE_SIZE, get_test_data, get_device
@pytest.mark.parametrize(
@@ -24,7 +25,7 @@ def test_MLRs(region_sparse, region_dense, base_sparse, base_dense, bias_sparse,
SAMPLE_SIZE, region_sparse, region_dense, prefix='bias')
model = MLR(region_feature_columns, base_feature_columns,
- bias_feature_columns=bias_feature_columns)
+ bias_feature_columns=bias_feature_columns, device=get_device())
model.compile('adam', 'binary_crossentropy',
metrics=['binary_crossentropy'])
print(model_name + " test pass!")
@@ -39,7 +40,7 @@ def test_MLR():
bias_x, y, bias_feature_columns = get_test_data(
SAMPLE_SIZE, 3, 3, prefix='bias')
- model = MLR(region_feature_columns)
+ model = MLR(region_feature_columns, device=get_device())
model.compile('adam', 'binary_crossentropy',
metrics=['binary_crossentropy'])
@@ -48,4 +49,4 @@ def test_MLR():
if __name__ == "__main__":
- pass
\ No newline at end of file
+ pass
diff --git a/tests/models/NFM_test.py b/tests/models/NFM_test.py
index 26bb1727..3073ed86 100644
--- a/tests/models/NFM_test.py
+++ b/tests/models/NFM_test.py
@@ -1,7 +1,8 @@
# -*- coding: utf-8 -*-
import pytest
+
from deepctr_torch.models import NFM
-from ..utils import check_model, get_test_data, SAMPLE_SIZE
+from ..utils import check_model, get_test_data, SAMPLE_SIZE, get_device
@pytest.mark.parametrize(
@@ -9,17 +10,16 @@
[((8,), 2), ((8, 8,), 2), ((8,), 1)]
)
def test_NFM(hidden_size, sparse_feature_num):
-
model_name = "NFM"
sample_size = SAMPLE_SIZE
x, y, feature_columns = get_test_data(
- sample_size, sparse_feature_num, sparse_feature_num)
+ sample_size, sparse_feature_num=sparse_feature_num, dense_feature_num=sparse_feature_num)
- model = NFM(feature_columns, feature_columns, embedding_size=8,
- dnn_hidden_units=[32, 32], dnn_dropout=0.5)
+ model = NFM(feature_columns, feature_columns,
+ dnn_hidden_units=[32, 32], dnn_dropout=0.5, device=get_device())
check_model(model, model_name, x, y)
if __name__ == "__main__":
- pass
\ No newline at end of file
+ pass
diff --git a/tests/models/ONN_test.py b/tests/models/ONN_test.py
index e80849da..11e44328 100644
--- a/tests/models/ONN_test.py
+++ b/tests/models/ONN_test.py
@@ -1,22 +1,22 @@
import pytest
+
from deepctr_torch.models import ONN
-from ..utils import check_model, get_test_data, SAMPLE_SIZE
+from ..utils import check_model, get_test_data, SAMPLE_SIZE, get_device
@pytest.mark.parametrize(
'hidden_size,sparse_feature_num',
[((8,), 2)]
)
-def test_NFFM(hidden_size, sparse_feature_num):
-
- model_name = "NFFM"
+def test_ONN(hidden_size, sparse_feature_num):
+ model_name = "ONN"
sample_size = SAMPLE_SIZE
x, y, feature_columns = get_test_data(
- sample_size, sparse_feature_num, sparse_feature_num, hash_flag=True)
+ sample_size, sparse_feature_num=sparse_feature_num, dense_feature_num=sparse_feature_num, hash_flag=True)
- model = ONN(feature_columns, feature_columns, embedding_size=4,
- dnn_hidden_units=[32, 32], dnn_dropout=0.5)
+ model = ONN(feature_columns, feature_columns,
+ dnn_hidden_units=[32, 32], dnn_dropout=0.5, device=get_device())
check_model(model, model_name, x, y)
diff --git a/tests/models/PNN_test.py b/tests/models/PNN_test.py
index 2aacc8ba..0e419a6c 100644
--- a/tests/models/PNN_test.py
+++ b/tests/models/PNN_test.py
@@ -1,7 +1,7 @@
import pytest
from deepctr_torch.models import PNN
-from ..utils import check_model, get_test_data,SAMPLE_SIZE
+from ..utils import check_model, get_test_data, SAMPLE_SIZE, get_device
@pytest.mark.parametrize(
@@ -12,11 +12,12 @@
def test_PNN(use_inner, use_outter, sparse_feature_num):
model_name = "PNN"
sample_size = SAMPLE_SIZE
- x, y, feature_columns = get_test_data(sample_size, sparse_feature_num, sparse_feature_num)
- model = PNN(feature_columns, embedding_size=8, dnn_hidden_units=[32, 32], dnn_dropout=0.5, use_inner=use_inner,
- use_outter=use_outter)
+ x, y, feature_columns = get_test_data(sample_size, sparse_feature_num=sparse_feature_num,
+ dense_feature_num=sparse_feature_num)
+ model = PNN(feature_columns, dnn_hidden_units=[32, 32], dnn_dropout=0.5, use_inner=use_inner,
+ use_outter=use_outter, device=get_device())
check_model(model, model_name, x, y)
if __name__ == "__main__":
- pass
\ No newline at end of file
+ pass
diff --git a/tests/models/WDL_test.py b/tests/models/WDL_test.py
index 12913b76..bea91a8d 100644
--- a/tests/models/WDL_test.py
+++ b/tests/models/WDL_test.py
@@ -1,7 +1,8 @@
# -*- coding: utf-8 -*-
import pytest
+
from deepctr_torch.models import WDL
-from ..utils import get_test_data, SAMPLE_SIZE, check_model
+from ..utils import get_test_data, SAMPLE_SIZE, check_model, get_device
@pytest.mark.parametrize(
@@ -13,12 +14,12 @@ def test_WDL(sparse_feature_num, dense_feature_num):
model_name = "WDL"
sample_size = SAMPLE_SIZE
x, y, feature_columns = get_test_data(
- sample_size, sparse_feature_num, dense_feature_num)
+ sample_size, sparse_feature_num=sparse_feature_num, dense_feature_num=dense_feature_num)
model = WDL(feature_columns, feature_columns,
- dnn_hidden_units=[32, 32], dnn_dropout=0.5)
+ dnn_hidden_units=[32, 32], dnn_dropout=0.5, device=get_device())
check_model(model, model_name, x, y)
if __name__ == "__main__":
- pass
\ No newline at end of file
+ pass
diff --git a/tests/models/xDeepFM_test.py b/tests/models/xDeepFM_test.py
index 561a92dc..fda42319 100644
--- a/tests/models/xDeepFM_test.py
+++ b/tests/models/xDeepFM_test.py
@@ -1,28 +1,28 @@
# -*- coding: utf-8 -*-
import pytest
-import torch.nn.functional as F
+
from deepctr_torch.models import xDeepFM
-from ..utils import get_test_data, SAMPLE_SIZE, check_model
+from ..utils import get_test_data, SAMPLE_SIZE, check_model, get_device
@pytest.mark.parametrize(
'dnn_hidden_units,cin_layer_size,cin_split_half,cin_activation,sparse_feature_num,dense_feature_dim',
[((), (), True, 'linear', 1, 2),
- ((8,), (), True, 'linear', 1, 1),
- ((), (8,), True, 'linear', 2, 2),
- ((8,), (8,), False, 'relu', 2, 0)]
+ ((8,), (), True, 'linear', 1, 1),
+ ((), (8,), True, 'linear', 2, 2),
+ ((8,), (8,), False, 'relu', 2, 0)]
)
-def test_xDeepFM(dnn_hidden_units, cin_layer_size, cin_split_half, cin_activation, sparse_feature_num, dense_feature_dim):
-
+def test_xDeepFM(dnn_hidden_units, cin_layer_size, cin_split_half, cin_activation, sparse_feature_num,
+ dense_feature_dim):
model_name = 'xDeepFM'
sample_size = SAMPLE_SIZE
x, y, feature_columns = get_test_data(
- sample_size, sparse_feature_num, sparse_feature_num)
+ sample_size, sparse_feature_num=sparse_feature_num, dense_feature_num=sparse_feature_num)
model = xDeepFM(feature_columns, feature_columns, dnn_hidden_units=dnn_hidden_units, cin_layer_size=cin_layer_size,
- cin_split_half=cin_split_half, cin_activation=cin_activation, dnn_dropout=0.5)
+ cin_split_half=cin_split_half, cin_activation=cin_activation, dnn_dropout=0.5, device=get_device())
check_model(model, model_name, x, y)
if __name__ == '__main__':
- pass
\ No newline at end of file
+ pass
diff --git a/tests/utils.py b/tests/utils.py
index 75b341c3..d60c0ef3 100644
--- a/tests/utils.py
+++ b/tests/utils.py
@@ -1,67 +1,69 @@
# -*- coding: utf-8 -*-
+import os
+
import numpy as np
import torch as torch
-import os
+
from deepctr_torch.inputs import SparseFeat, DenseFeat, VarLenSparseFeat
SAMPLE_SIZE = 64
def gen_sequence(dim, max_len, sample_size):
- return np.array([np.random.randint(0, dim, max_len) for _ in range(sample_size)]), np.random.randint(1, max_len + 1, sample_size)
+ return np.array([np.random.randint(0, dim, max_len) for _ in range(sample_size)]), np.random.randint(1, max_len + 1,
+ sample_size)
+def get_test_data(sample_size=1000, embedding_size=4, sparse_feature_num=1, dense_feature_num=1,
+ sequence_feature=['sum', 'mean', 'max'], classification=True, include_length=False,
+ hash_flag=False, prefix=''):
-def get_test_data(sample_size=1000, sparse_feature_num=1, dense_feature_num=1, sequence_feature=('sum', 'mean', 'max'),
- classification=True, include_length=False, hash_flag=False, prefix=''):
feature_columns = []
+ model_input = {}
+
+
+ if 'weight' in sequence_feature:
+ feature_columns.append(VarLenSparseFeat(SparseFeat(prefix+"weighted_seq",vocabulary_size=2,embedding_dim=embedding_size),maxlen=3,length_name=prefix+"weighted_seq"+"_seq_length",weight_name=prefix+"weight"))
+ s_input, s_len_input = gen_sequence(
+ 2, 3, sample_size)
+
+ model_input[prefix+"weighted_seq"] = s_input
+ model_input[prefix+'weight'] = np.random.randn(sample_size,3,1)
+ model_input[prefix+"weighted_seq"+"_seq_length"] = s_len_input
+ sequence_feature.pop(sequence_feature.index('weight'))
+
for i in range(sparse_feature_num):
dim = np.random.randint(1, 10)
- feature_columns.append(SparseFeat(
- prefix+'sparse_feature_'+str(i), dim, hash_flag, torch.int32))
+ feature_columns.append(SparseFeat(prefix+'sparse_feature_'+str(i), dim,embedding_size,dtype=torch.int32))
for i in range(dense_feature_num):
- feature_columns.append(
- DenseFeat(prefix+'dense_feature_'+str(i), 1, torch.float32))
+ feature_columns.append(DenseFeat(prefix+'dense_feature_'+str(i), 1,dtype=torch.float32))
for i, mode in enumerate(sequence_feature):
dim = np.random.randint(1, 10)
maxlen = np.random.randint(1, 10)
feature_columns.append(
- VarLenSparseFeat(prefix+'sequence_' + str(i), dim, maxlen, mode))
+ VarLenSparseFeat(SparseFeat(prefix +'sequence_' + mode,vocabulary_size=dim, embedding_dim=embedding_size), maxlen=maxlen, combiner=mode))
- model_input = []
- sequence_input = []
- sequence_len_input = []
for fc in feature_columns:
- if isinstance(fc, SparseFeat):
- model_input.append(np.random.randint(0, fc.dimension, sample_size))
- elif isinstance(fc, DenseFeat):
- model_input.append(np.random.random(sample_size))
+ if isinstance(fc,SparseFeat):
+ model_input[fc.name]= np.random.randint(0, fc.vocabulary_size, sample_size)
+ elif isinstance(fc,DenseFeat):
+ model_input[fc.name] = np.random.random(sample_size)
else:
s_input, s_len_input = gen_sequence(
- fc.dimension, fc.maxlen, sample_size)
- sequence_input.append(s_input)
- sequence_len_input.append(s_len_input)
+ fc.vocabulary_size, fc.maxlen, sample_size)
+ model_input[fc.name] = s_input
+ if include_length:
+ fc.length_name = prefix+"sequence_"+str(i)+'_seq_length'
+ model_input[prefix+"sequence_"+str(i)+'_seq_length'] = s_len_input
if classification:
y = np.random.randint(0, 2, sample_size)
- while sum(y) < 0.3*sample_size:
- y = np.random.randint(0, 2, sample_size)
else:
y = np.random.random(sample_size)
- x = model_input + sequence_input
- if include_length:
- for i, mode in enumerate(sequence_feature):
- dim = np.random.randint(1, 10)
- maxlen = np.random.randint(1, 10)
- feature_columns.append(
- SparseFeat(prefix+'sequence_' + str(i)+'_seq_length', 1, embedding=False))
-
- x += sequence_len_input
-
- return x, y, feature_columns
+ return model_input, y, feature_columns
def check_model(model, model_name, x, y, check_model_io=True):
@@ -77,7 +79,7 @@ def check_model(model, model_name, x, y, check_model_io=True):
model.compile('adam', 'binary_crossentropy',
metrics=['binary_crossentropy'])
- model.fit(x, y, batch_size=100, epochs=1, validation_split=0.5)
+ model.fit(x, y, batch_size=100, epochs=1, validation_split=0.5,use_double=True)
print(model_name + 'test, train valid pass!')
torch.save(model.state_dict(), model_name + '_weights.h5')
@@ -89,4 +91,11 @@ def check_model(model, model_name, x, y, check_model_io=True):
model = torch.load(model_name + '.h5')
os.remove(model_name + '.h5')
print(model_name + 'test save load model pass!')
- print(model_name + 'test pass!')
\ No newline at end of file
+ print(model_name + 'test pass!')
+
+def get_device(use_cuda = True):
+ device = 'cpu'
+ if use_cuda and torch.cuda.is_available():
+ print('cuda ready...')
+ device = 'cuda:0'
+ return device
\ No newline at end of file