Fork of https://github.com/alokprasad/fastspeech_squeezewave to also fix denoising in squeezewave
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

100 lines
3.0 KiB

  1. import torch
  2. import numpy as np
  3. import transformer.Constants as Constants
  4. class Beam():
  5. ''' Beam search '''
  6. def __init__(self, size, device=False):
  7. self.size = size
  8. self._done = False
  9. # The score for each translation on the beam.
  10. self.scores = torch.zeros((size,), dtype=torch.float, device=device)
  11. self.all_scores = []
  12. # The backpointers at each time-step.
  13. self.prev_ks = []
  14. # The outputs at each time-step.
  15. self.next_ys = [torch.full(
  16. (size,), Constants.PAD, dtype=torch.long, device=device)]
  17. self.next_ys[0][0] = Constants.BOS
  18. def get_current_state(self):
  19. "Get the outputs for the current timestep."
  20. return self.get_tentative_hypothesis()
  21. def get_current_origin(self):
  22. "Get the backpointers for the current timestep."
  23. return self.prev_ks[-1]
  24. @property
  25. def done(self):
  26. return self._done
  27. def advance(self, word_prob):
  28. "Update beam status and check if finished or not."
  29. num_words = word_prob.size(1)
  30. # Sum the previous scores.
  31. if len(self.prev_ks) > 0:
  32. beam_lk = word_prob + self.scores.unsqueeze(1).expand_as(word_prob)
  33. else:
  34. beam_lk = word_prob[0]
  35. flat_beam_lk = beam_lk.view(-1)
  36. best_scores, best_scores_id = flat_beam_lk.topk(
  37. self.size, 0, True, True) # 1st sort
  38. best_scores, best_scores_id = flat_beam_lk.topk(
  39. self.size, 0, True, True) # 2nd sort
  40. self.all_scores.append(self.scores)
  41. self.scores = best_scores
  42. # bestScoresId is flattened as a (beam x word) array,
  43. # so we need to calculate which word and beam each score came from
  44. prev_k = best_scores_id / num_words
  45. self.prev_ks.append(prev_k)
  46. self.next_ys.append(best_scores_id - prev_k * num_words)
  47. # End condition is when top-of-beam is EOS.
  48. if self.next_ys[-1][0].item() == Constants.EOS:
  49. self._done = True
  50. self.all_scores.append(self.scores)
  51. return self._done
  52. def sort_scores(self):
  53. "Sort the scores."
  54. return torch.sort(self.scores, 0, True)
  55. def get_the_best_score_and_idx(self):
  56. "Get the score of the best in the beam."
  57. scores, ids = self.sort_scores()
  58. return scores[1], ids[1]
  59. def get_tentative_hypothesis(self):
  60. "Get the decoded sequence for the current timestep."
  61. if len(self.next_ys) == 1:
  62. dec_seq = self.next_ys[0].unsqueeze(1)
  63. else:
  64. _, keys = self.sort_scores()
  65. hyps = [self.get_hypothesis(k) for k in keys]
  66. hyps = [[Constants.BOS] + h for h in hyps]
  67. dec_seq = torch.LongTensor(hyps)
  68. return dec_seq
  69. def get_hypothesis(self, k):
  70. """ Walk back to construct the full hypothesis. """
  71. hyp = []
  72. for j in range(len(self.prev_ks) - 1, -1, -1):
  73. hyp.append(self.next_ys[j+1][k])
  74. k = self.prev_ks[j][k]
  75. return list(map(lambda x: x.item(), hyp[::-1]))