|
|
@ -93,8 +93,7 @@ class MelGANGenerator(nn.Layer):
|
|
|
|
initialize(self, init_type)
|
|
|
|
initialize(self, init_type)
|
|
|
|
|
|
|
|
|
|
|
|
# for compatibility
|
|
|
|
# for compatibility
|
|
|
|
if nonlinear_activation == "LeakyReLU":
|
|
|
|
nonlinear_activation = nonlinear_activation.lower()
|
|
|
|
nonlinear_activation = "leakyrelu"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# check hyper parameters is valid
|
|
|
|
# check hyper parameters is valid
|
|
|
|
assert channels >= np.prod(upsample_scales)
|
|
|
|
assert channels >= np.prod(upsample_scales)
|
|
|
@ -328,8 +327,7 @@ class MelGANDiscriminator(nn.Layer):
|
|
|
|
super().__init__()
|
|
|
|
super().__init__()
|
|
|
|
|
|
|
|
|
|
|
|
# for compatibility
|
|
|
|
# for compatibility
|
|
|
|
if nonlinear_activation == "LeakyReLU":
|
|
|
|
nonlinear_activation = nonlinear_activation.lower()
|
|
|
|
nonlinear_activation = "leakyrelu"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# initialize parameters
|
|
|
|
# initialize parameters
|
|
|
|
initialize(self, init_type)
|
|
|
|
initialize(self, init_type)
|
|
|
@ -479,8 +477,7 @@ class MelGANMultiScaleDiscriminator(nn.Layer):
|
|
|
|
initialize(self, init_type)
|
|
|
|
initialize(self, init_type)
|
|
|
|
|
|
|
|
|
|
|
|
# for compatibility
|
|
|
|
# for compatibility
|
|
|
|
if nonlinear_activation == "LeakyReLU":
|
|
|
|
nonlinear_activation = nonlinear_activation.lower()
|
|
|
|
nonlinear_activation = "leakyrelu"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
self.discriminators = nn.LayerList()
|
|
|
|
self.discriminators = nn.LayerList()
|
|
|
|
|
|
|
|
|
|
|
|