Skip to content

Commit

Permalink
remove name overrides, fix docstrings
Browse files Browse the repository at this point in the history
  • Loading branch information
brainsqueeze committed Jul 15, 2022
1 parent 0e025ff commit 542e683
Show file tree
Hide file tree
Showing 3 changed files with 10 additions and 10 deletions.
14 changes: 7 additions & 7 deletions text2vec/models/components/attention.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ class ScaledDotAttention(layers.Layer):
"""

def __init__(self):
super().__init__(name="ScaledDotAttention")
super().__init__()
self.neg_inf = tf.constant(-1e9, dtype=tf.float32)

# pylint: disable=missing-function-docstring
Expand Down Expand Up @@ -78,7 +78,7 @@ class BahdanauAttention(layers.Layer):
dims = 12
encoded_sequences = tf.random.uniform(shape=[4, 7, dims])
decoded_sequences = tf.random.uniform(shape=[4, 11, dims])
attention = BahdanauAttention(dims)
attention = BahdanauAttention(dims, drop_rate=0.25)
# self attention
attention(encoded_sequences)
Expand All @@ -89,7 +89,7 @@ class BahdanauAttention(layers.Layer):
"""

def __init__(self, size: int, drop_rate: float = 0.):
super().__init__(name="BahdanauAttention")
super().__init__()

self.hidden = layers.Dense(units=size, activation="tanh")
self.U = tf.Variable(initializers.GlorotUniform()(shape=[size]), name="U", dtype=tf.float32, trainable=True)
Expand Down Expand Up @@ -141,15 +141,15 @@ class SingleHeadAttention(layers.Layer):
V = tf.random.uniform(shape=[4, 5, 12])
# 25% dropout rate
attention = SingleHeadAttention(emb_dims=12, keep_prob=0.75)
attention = SingleHeadAttention(emb_dims=12, drop_rate=0.25)
# masking and dropout turned on
attention(inputs=(Q, K, V), mask_future=True, training=True)
```
"""

def __init__(self, emb_dims, num_layers: int = 8, drop_rate: float = 0.):
super().__init__(name="SingleHeadAttention")
super().__init__()
assert isinstance(num_layers, int) and num_layers > 0

dims = emb_dims
Expand Down Expand Up @@ -205,15 +205,15 @@ class MultiHeadAttention(layers.Layer):
V = tf.random.uniform(shape=[4, 5, 12])
# 25% dropout rate
attention = MultiHeadAttention(emb_dims=12, keep_prob=0.75)
attention = MultiHeadAttention(emb_dims=12, drop_rate=0.25)
# masking and dropout turned on
attention(inputs=(Q, K, V), mask_future=True, training=True)
```
"""

def __init__(self, emb_dims: int, num_layers: int = 8, drop_rate: float = 0.):
super().__init__(name="MultiHeadAttention")
super().__init__()
self.layer_heads = [
SingleHeadAttention(emb_dims=emb_dims, num_layers=num_layers, drop_rate=drop_rate)
for _ in range(num_layers)
Expand Down
2 changes: 1 addition & 1 deletion text2vec/models/components/text_inputs.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ class Tokenizer(layers.Layer):
"""

def __init__(self, sep: str = ' '):
super().__init__(name="Tokenizer")
super().__init__()
self.sep = sep

def call(self, corpus):
Expand Down
4 changes: 2 additions & 2 deletions text2vec/models/components/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ class LayerNorm(layers.Layer):
"""

def __init__(self, epsilon: float = 1e-8, scale: float = 1.0, bias: float = 0):
super().__init__(name="LayerNorm")
super().__init__()
self.epsilon = tf.constant(epsilon, dtype=tf.float32)
self.scale = tf.constant(scale, dtype=tf.float32)
self.bias = tf.constant(bias, dtype=tf.float32)
Expand Down Expand Up @@ -60,7 +60,7 @@ class TensorProjection(layers.Layer):
"""

def __init__(self):
super().__init__(name="TensorProjection")
super().__init__()

def call(self, x, projection_vector):
projection_vector = tf.math.l2_normalize(projection_vector, axis=-1)
Expand Down

0 comments on commit 542e683

Please sign in to comment.