Skip to content

Commit

Permalink
potential root cause fix for #142
Browse files Browse the repository at this point in the history
  • Loading branch information
lucidrains committed Jun 29, 2024
1 parent d23b27c commit c14fa4d
Show file tree
Hide file tree
Showing 2 changed files with 28 additions and 8 deletions.
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[project]
name = "vector-quantize-pytorch"
version = "1.14.42"
version = "1.14.43"
description = "Vector Quantization - Pytorch"
authors = [
{ name = "Phil Wang", email = "[email protected]" }
Expand Down
34 changes: 27 additions & 7 deletions vector_quantize_pytorch/residual_vq.py
Original file line number Diff line number Diff line change
@@ -1,14 +1,16 @@
from __future__ import annotations
from typing import List

import random
from math import ceil
from functools import partial
from functools import partial, cache
from itertools import zip_longest
from typing import List

import torch
from torch import nn, Tensor
from torch.nn import Module, ModuleList
import torch.nn.functional as F
import torch.distributed as dist
from vector_quantize_pytorch.vector_quantize_pytorch import VectorQuantize

from einops import rearrange, repeat, reduce, pack, unpack
Expand All @@ -26,9 +28,15 @@ def default(val, d):
def round_up_multiple(num, mult):
return ceil(num / mult) * mult

# distributed helpers

@cache
def is_distributed():
return dist.is_initialized() and dist.get_world_size() > 1

# main class

class ResidualVQ(nn.Module):
class ResidualVQ(Module):
""" Follows Algorithm 1. in https://arxiv.org/pdf/2107.03312.pdf """
def __init__(
self,
Expand Down Expand Up @@ -57,7 +65,7 @@ def __init__(
self.num_quantizers = num_quantizers

self.accept_image_fmap = accept_image_fmap
self.layers = nn.ModuleList([VectorQuantize(dim = codebook_dim, codebook_dim = codebook_dim, accept_image_fmap = accept_image_fmap, **kwargs) for _ in range(num_quantizers)])
self.layers = ModuleList([VectorQuantize(dim = codebook_dim, codebook_dim = codebook_dim, accept_image_fmap = accept_image_fmap, **kwargs) for _ in range(num_quantizers)])

assert all([not vq.has_projections for vq in self.layers])

Expand Down Expand Up @@ -156,7 +164,19 @@ def forward(
# also prepare null indices and loss

if should_quantize_dropout:
rand = random.Random(rand_quantize_dropout_fixed_seed) if exists(rand_quantize_dropout_fixed_seed) else random

if exists(rand_quantize_dropout_fixed_seed):
# seed is manually passed in
rand = random.Random(rand_quantize_dropout_fixed_seed)

elif is_distributed():
# in distributed environment, synchronize a random seed value if not given
t = torch.tensor(random.randrange(10_000))
dropout_seed = dist.all_reduce(t).item()
rand = random.Random(dropout_seed)

else:
rand = random

rand_quantize_dropout_index = rand.randrange(self.quantize_dropout_cutoff_index, num_quant)

Expand Down Expand Up @@ -227,7 +247,7 @@ def forward(

# grouped residual vq

class GroupedResidualVQ(nn.Module):
class GroupedResidualVQ(Module):
def __init__(
self,
*,
Expand All @@ -244,7 +264,7 @@ def __init__(

self.accept_image_fmap = accept_image_fmap

self.rvqs = nn.ModuleList([])
self.rvqs = ModuleList([])

for _ in range(groups):
self.rvqs.append(ResidualVQ(
Expand Down

0 comments on commit c14fa4d

Please sign in to comment.