bokesyo commited on
Commit
ce5aba2
·
verified ·
1 Parent(s): 4d54644

Fix an issue in TTS generate to avoid gap between training and inference

Browse files
Files changed (1) hide show
  1. modeling_minicpmo.py +1 -1
modeling_minicpmo.py CHANGED
@@ -2986,7 +2986,7 @@ class ConditionalChatTTS(PreTrainedModel):
2986
  inputs_embeds = torch.stack(code_emb, 3).sum(3)
2987
 
2988
  position_ids = torch.tensor(
2989
- [past_key_values[0][0].shape[2] + 1], dtype=torch.long, device=self.device
2990
  ).unsqueeze(0)
2991
 
2992
  cache_position = position_ids.clone()
 
2986
  inputs_embeds = torch.stack(code_emb, 3).sum(3)
2987
 
2988
  position_ids = torch.tensor(
2989
+ [past_key_values[0][0].shape[2]], dtype=torch.long, device=self.device
2990
  ).unsqueeze(0)
2991
 
2992
  cache_position = position_ids.clone()