File tree Expand file tree Collapse file tree 3 files changed +7
-2
lines changed Expand file tree Collapse file tree 3 files changed +7
-2
lines changed Original file line number Diff line number Diff line change 11[project ]
22name = " vector-quantize-pytorch"
3- version = " 1.14.20 "
3+ version = " 1.14.22 "
44description = " Vector Quantization - Pytorch"
55authors = [
66 { name = " Phil Wang" , email = " lucidrains@gmail.com" }
Original file line number Diff line number Diff line change @@ -152,7 +152,7 @@ def __init__(
152152 # whether to soft clamp the input value from -value to value
153153
154154 self .soft_clamp_input_value = soft_clamp_input_value
155- assert not exists (soft_clamp_input_value ) or soft_clamp_input_value >= 1.
155+ assert not exists (soft_clamp_input_value ) or soft_clamp_input_value >= codebook_scale
156156
157157 # for no auxiliary loss, during inference
158158
Original file line number Diff line number Diff line change @@ -39,6 +39,7 @@ def __init__(
3939 quantize_dropout = False ,
4040 quantize_dropout_cutoff_index = 0 ,
4141 quantize_dropout_multiple_of = 1 ,
42+ soft_clamp_input_value = None ,
4243 ** kwargs
4344 ):
4445 super ().__init__ ()
@@ -59,11 +60,15 @@ def __init__(
5960 lfq = LFQ (
6061 dim = codebook_dim ,
6162 codebook_scale = codebook_scale ,
63+ soft_clamp_input_value = soft_clamp_input_value ,
6264 ** kwargs
6365 )
6466
6567 self .layers .append (lfq )
6668
69+ if exists (soft_clamp_input_value ):
70+ soft_clamp_input_value *= 0.5
71+
6772 assert all ([not lfq .has_projections for lfq in self .layers ])
6873
6974 self .quantize_dropout = quantize_dropout and num_quantizers > 1
You can’t perform that action at this time.
0 commit comments