|
@@ -2,8 +2,7 @@ import numpy as np
|
|
|
import torch
|
|
import torch
|
|
|
import torch.nn.functional as F
|
|
import torch.nn.functional as F
|
|
|
from torch import nn
|
|
from torch import nn
|
|
|
-from modelscope.utils.logger import get_logger
|
|
|
|
|
-logger = get_logger()
|
|
|
|
|
|
|
+
|
|
|
|
|
|
|
|
class EncoderDecoderAttractor(nn.Module):
|
|
class EncoderDecoderAttractor(nn.Module):
|
|
|
|
|
|
|
@@ -17,14 +16,12 @@ class EncoderDecoderAttractor(nn.Module):
|
|
|
self.n_units = n_units
|
|
self.n_units = n_units
|
|
|
|
|
|
|
|
def forward_core(self, xs, zeros):
|
|
def forward_core(self, xs, zeros):
|
|
|
- logger.info("xs: ".format(xs))
|
|
|
|
|
- ilens = torch.from_numpy(np.array([x.shape[0] for x in xs])).to(torch.float32).to(xs[0].device)
|
|
|
|
|
- logger.info("ilens: ".format(ilens))
|
|
|
|
|
|
|
+ ilens = torch.from_numpy(np.array([x.shape[0] for x in xs])).to(torch.int64)
|
|
|
xs = [self.enc0_dropout(x) for x in xs]
|
|
xs = [self.enc0_dropout(x) for x in xs]
|
|
|
xs = nn.utils.rnn.pad_sequence(xs, batch_first=True, padding_value=-1)
|
|
xs = nn.utils.rnn.pad_sequence(xs, batch_first=True, padding_value=-1)
|
|
|
xs = nn.utils.rnn.pack_padded_sequence(xs, ilens, batch_first=True, enforce_sorted=False)
|
|
xs = nn.utils.rnn.pack_padded_sequence(xs, ilens, batch_first=True, enforce_sorted=False)
|
|
|
_, (hx, cx) = self.encoder(xs)
|
|
_, (hx, cx) = self.encoder(xs)
|
|
|
- zlens = torch.from_numpy(np.array([z.shape[0] for z in zeros])).to(torch.float32).to(zeros[0].device)
|
|
|
|
|
|
|
+ zlens = torch.from_numpy(np.array([z.shape[0] for z in zeros])).to(torch.int64)
|
|
|
max_zlen = torch.max(zlens).to(torch.int).item()
|
|
max_zlen = torch.max(zlens).to(torch.int).item()
|
|
|
zeros = [self.enc0_dropout(z) for z in zeros]
|
|
zeros = [self.enc0_dropout(z) for z in zeros]
|
|
|
zeros = nn.utils.rnn.pad_sequence(zeros, batch_first=True, padding_value=-1)
|
|
zeros = nn.utils.rnn.pad_sequence(zeros, batch_first=True, padding_value=-1)
|
|
@@ -50,4 +47,4 @@ class EncoderDecoderAttractor(nn.Module):
|
|
|
zeros = [torch.zeros(max_n_speakers, self.n_units).to(torch.float32).to(xs[0].device) for _ in xs]
|
|
zeros = [torch.zeros(max_n_speakers, self.n_units).to(torch.float32).to(xs[0].device) for _ in xs]
|
|
|
attractors = self.forward_core(xs, zeros)
|
|
attractors = self.forward_core(xs, zeros)
|
|
|
probs = [torch.sigmoid(torch.flatten(self.counter(att))) for att in attractors]
|
|
probs = [torch.sigmoid(torch.flatten(self.counter(att))) for att in attractors]
|
|
|
- return attractors, probs
|
|
|
|
|
|
|
+ return attractors, probs
|