Skip to content

Commit

Permalink
residual LFQ can accept a soft clamp input value and appropriately sc…
Browse files Browse the repository at this point in the history
…ale down across residual layers
  • Loading branch information
lucidrains committed May 9, 2024
1 parent 4d26b61 commit 85f03c5
Show file tree
Hide file tree
Showing 3 changed files with 7 additions and 2 deletions.
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[project]
name = "vector-quantize-pytorch"
version = "1.14.20"
version = "1.14.22"
description = "Vector Quantization - Pytorch"
authors = [
{ name = "Phil Wang", email = "[email protected]" }
Expand Down
2 changes: 1 addition & 1 deletion vector_quantize_pytorch/lookup_free_quantization.py
Original file line number Diff line number Diff line change
Expand Up @@ -152,7 +152,7 @@ def __init__(
# whether to soft clamp the input value from -value to value

self.soft_clamp_input_value = soft_clamp_input_value
assert not exists(soft_clamp_input_value) or soft_clamp_input_value >= 1.
assert not exists(soft_clamp_input_value) or soft_clamp_input_value >= codebook_scale

# for no auxiliary loss, during inference

Expand Down
5 changes: 5 additions & 0 deletions vector_quantize_pytorch/residual_lfq.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,7 @@ def __init__(
quantize_dropout = False,
quantize_dropout_cutoff_index = 0,
quantize_dropout_multiple_of = 1,
soft_clamp_input_value = None,
**kwargs
):
super().__init__()
Expand All @@ -59,11 +60,15 @@ def __init__(
lfq = LFQ(
dim = codebook_dim,
codebook_scale = codebook_scale,
soft_clamp_input_value = soft_clamp_input_value,
**kwargs
)

self.layers.append(lfq)

if exists(soft_clamp_input_value):
soft_clamp_input_value *= 0.5

assert all([not lfq.has_projections for lfq in self.layers])

self.quantize_dropout = quantize_dropout and num_quantizers > 1
Expand Down

0 comments on commit 85f03c5

Please sign in to comment.