Fx: capture compile error on Windows

This commit is contained in:
刘鑫
2025-09-18 19:23:13 +08:00
parent cef6aefb3d
commit dc6b6d1d1c
3 changed files with 5 additions and 3 deletions

View File

@@ -36,7 +36,7 @@ dependencies = [
"addict", "addict",
"wetext", "wetext",
"modelscope>=1.22.0", "modelscope>=1.22.0",
"datasets>=2,<4", "datasets>=3,<4",
"huggingface-hub", "huggingface-hub",
"pydantic", "pydantic",
"tqdm", "tqdm",

View File

@@ -1,6 +1,7 @@
import torch import torch
import torchaudio import torchaudio
import os import os
import re
import tempfile import tempfile
from huggingface_hub import snapshot_download from huggingface_hub import snapshot_download
from .model.voxcpm import VoxCPMModel from .model.voxcpm import VoxCPMModel
@@ -131,6 +132,7 @@ class VoxCPM:
raise ValueError("prompt_wav_path and prompt_text must both be provided or both be None") raise ValueError("prompt_wav_path and prompt_text must both be provided or both be None")
text = text.replace("\n", " ") text = text.replace("\n", " ")
text = re.sub(r'\s+', ' ', text)
temp_prompt_wav_path = None temp_prompt_wav_path = None
try: try:

View File

@@ -160,8 +160,8 @@ class VoxCPMModel(nn.Module):
self.feat_encoder_step = torch.compile(self.feat_encoder, mode="reduce-overhead", fullgraph=True) self.feat_encoder_step = torch.compile(self.feat_encoder, mode="reduce-overhead", fullgraph=True)
self.feat_decoder.estimator = torch.compile(self.feat_decoder.estimator, mode="reduce-overhead", fullgraph=True) self.feat_decoder.estimator = torch.compile(self.feat_decoder.estimator, mode="reduce-overhead", fullgraph=True)
except Exception as e: except Exception as e:
print(e) print(f"Error: {e}")
print("VoxCPMModel can not be optimized by torch.compile, using original forward_step functions") print("Warning: VoxCPMModel can not be optimized by torch.compile, using original forward_step functions")
self.base_lm.forward_step = self.base_lm.forward_step self.base_lm.forward_step = self.base_lm.forward_step
self.residual_lm.forward_step = self.residual_lm.forward_step self.residual_lm.forward_step = self.residual_lm.forward_step
self.feat_encoder_step = self.feat_encoder self.feat_encoder_step = self.feat_encoder