mirror of
https://github.com/SWivid/F5-TTS.git
synced 2025-12-12 15:50:07 -08:00
change some infer function to support two vocoder
This commit is contained in:
14
README.md
14
README.md
@@ -44,20 +44,18 @@ pip install git+https://github.com/SWivid/F5-TTS.git
|
||||
git clone https://github.com/SWivid/F5-TTS.git
|
||||
cd F5-TTS
|
||||
pip install -e .
|
||||
```
|
||||
|
||||
### 3. Init submodule( optional, if you want to change the vocoder from vocos to bigvgan)
|
||||
|
||||
```bash
|
||||
# Init submodule(optional, if you want to change the vocoder from vocos to bigvgan)
|
||||
git submodule update --init --recursive
|
||||
```
|
||||
After that, you need to change the `src/third_party/BigVGAN/bigvgan.py` by adding the following code at the beginning of the file.
|
||||
|
||||
After init submodule, you need to change the `src/third_party/BigVGAN/bigvgan.py` by adding the following code at the beginning of the file.
|
||||
```python
|
||||
import sys
|
||||
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
|
||||
```
|
||||
|
||||
### 4. Docker usage
|
||||
### 3. Docker usage
|
||||
```bash
|
||||
# Build from Dockerfile
|
||||
docker build -t f5tts:v1 .
|
||||
@@ -106,6 +104,10 @@ f5-tts_infer-cli -c custom.toml
|
||||
|
||||
# Multi voice. See src/f5_tts/infer/README.md
|
||||
f5-tts_infer-cli -c src/f5_tts/infer/examples/multi/story.toml
|
||||
|
||||
# Choose Vocoder
|
||||
f5-tts_infer-cli --vocoder_name bigvgan --load_vocoder_from_local --ckpt_file <YOUR_CKPT_PATH, eg:ckpts/model_1250000.pt >
|
||||
f5-tts_infer-cli --vocoder_name vocos --load_vocoder_from_local --ckpt_file <YOUR_CKPT_PATH, eg:ckpts/F5TTS_Base/model_1200000.safetensors >
|
||||
```
|
||||
|
||||
### 3. More instructions
|
||||
|
||||
@@ -7,10 +7,16 @@ import torch
|
||||
import tqdm
|
||||
from cached_path import cached_path
|
||||
|
||||
from f5_tts.infer.utils_infer import (hop_length, infer_process, load_model,
|
||||
load_vocoder, preprocess_ref_audio_text,
|
||||
remove_silence_for_generated_wav,
|
||||
save_spectrogram, target_sample_rate)
|
||||
from f5_tts.infer.utils_infer import (
|
||||
hop_length,
|
||||
infer_process,
|
||||
load_model,
|
||||
load_vocoder,
|
||||
preprocess_ref_audio_text,
|
||||
remove_silence_for_generated_wav,
|
||||
save_spectrogram,
|
||||
target_sample_rate,
|
||||
)
|
||||
from f5_tts.model import DiT, UNetT
|
||||
from f5_tts.model.utils import seed_everything
|
||||
|
||||
@@ -32,6 +38,7 @@ class F5TTS:
|
||||
self.target_sample_rate = target_sample_rate
|
||||
self.hop_length = hop_length
|
||||
self.seed = -1
|
||||
self.extract_backend = vocoder_name
|
||||
|
||||
# Set device
|
||||
self.device = device or (
|
||||
@@ -40,12 +47,12 @@ class F5TTS:
|
||||
|
||||
# Load models
|
||||
self.load_vocoder_model(vocoder_name, local_path)
|
||||
self.load_ema_model(model_type, ckpt_file, vocab_file, ode_method, use_ema)
|
||||
self.load_ema_model(model_type, ckpt_file, vocoder_name, vocab_file, ode_method, use_ema)
|
||||
|
||||
def load_vocoder_model(self, vocoder_name, local_path):
|
||||
self.vocoder = load_vocoder(vocoder_name, local_path is not None, local_path, self.device)
|
||||
|
||||
def load_ema_model(self, model_type, ckpt_file, vocab_file, ode_method, use_ema):
|
||||
def load_ema_model(self, model_type, ckpt_file, extract_backend, vocab_file, ode_method, use_ema):
|
||||
if model_type == "F5-TTS":
|
||||
if not ckpt_file:
|
||||
ckpt_file = str(cached_path("hf://SWivid/F5-TTS/F5TTS_Base/model_1200000.safetensors"))
|
||||
@@ -59,7 +66,9 @@ class F5TTS:
|
||||
else:
|
||||
raise ValueError(f"Unknown model type: {model_type}")
|
||||
|
||||
self.ema_model = load_model(model_cls, model_cfg, ckpt_file, vocab_file, ode_method, use_ema, self.device)
|
||||
self.ema_model = load_model(
|
||||
model_cls, model_cfg, ckpt_file, extract_backend, vocab_file, ode_method, use_ema, self.device
|
||||
)
|
||||
|
||||
def export_wav(self, wav, file_wave, remove_silence=False):
|
||||
sf.write(file_wave, wav, self.target_sample_rate)
|
||||
@@ -102,6 +111,7 @@ class F5TTS:
|
||||
gen_text,
|
||||
self.ema_model,
|
||||
self.vocoder,
|
||||
self.extract_backend,
|
||||
show_info=show_info,
|
||||
progress=progress,
|
||||
target_rms=target_rms,
|
||||
|
||||
@@ -12,9 +12,11 @@ import torchaudio
|
||||
from accelerate import Accelerator
|
||||
from tqdm import tqdm
|
||||
|
||||
from f5_tts.eval.utils_eval import (get_inference_prompt,
|
||||
get_librispeech_test_clean_metainfo,
|
||||
get_seedtts_testset_metainfo)
|
||||
from f5_tts.eval.utils_eval import (
|
||||
get_inference_prompt,
|
||||
get_librispeech_test_clean_metainfo,
|
||||
get_seedtts_testset_metainfo,
|
||||
)
|
||||
from f5_tts.infer.utils_infer import load_checkpoint, load_vocoder
|
||||
from f5_tts.model import CFM, DiT, UNetT
|
||||
from f5_tts.model.utils import get_tokenizer
|
||||
@@ -185,7 +187,7 @@ def main():
|
||||
gen = gen[ref_mel_lens[i] : total_mel_lens[i], :].unsqueeze(0)
|
||||
gen_mel_spec = gen.permute(0, 2, 1)
|
||||
if extract_backend == "vocos":
|
||||
generated_wave = vocoder.decode(gen_mel_spec.cpu())
|
||||
generated_wave = vocoder.decode(gen_mel_spec)
|
||||
elif extract_backend == "bigvgan":
|
||||
generated_wave = vocoder(gen_mel_spec)
|
||||
|
||||
|
||||
@@ -10,9 +10,13 @@ import soundfile as sf
|
||||
import tomli
|
||||
from cached_path import cached_path
|
||||
|
||||
from f5_tts.infer.utils_infer import (infer_process, load_model, load_vocoder,
|
||||
preprocess_ref_audio_text,
|
||||
remove_silence_for_generated_wav)
|
||||
from f5_tts.infer.utils_infer import (
|
||||
infer_process,
|
||||
load_model,
|
||||
load_vocoder,
|
||||
preprocess_ref_audio_text,
|
||||
remove_silence_for_generated_wav,
|
||||
)
|
||||
from f5_tts.model import DiT, UNetT
|
||||
|
||||
parser = argparse.ArgumentParser(
|
||||
@@ -108,12 +112,13 @@ speed = args.speed
|
||||
wave_path = Path(output_dir) / "infer_cli_out.wav"
|
||||
# spectrogram_path = Path(output_dir) / "infer_cli_out.png"
|
||||
if args.vocoder_name == "vocos":
|
||||
vocoder_local_path = "../checkpoints/charactr/vocos-mel-24khz"
|
||||
vocoder_local_path = "../checkpoints/vocos-mel-24khz"
|
||||
elif args.vocoder_name == "bigvgan":
|
||||
vocoder_local_path = "../checkpoints/bigvgan_v2_24khz_100band_256x"
|
||||
extract_backend = args.vocoder_name
|
||||
|
||||
vocoder = load_vocoder(
|
||||
vocoder_name=args.vocoder_name, is_local=args.load_vocoder_from_local, local_path=vocoder_local_path
|
||||
vocoder_name=extract_backend, is_local=args.load_vocoder_from_local, local_path=vocoder_local_path
|
||||
)
|
||||
|
||||
|
||||
@@ -122,11 +127,17 @@ if model == "F5-TTS":
|
||||
model_cls = DiT
|
||||
model_cfg = dict(dim=1024, depth=22, heads=16, ff_mult=2, text_dim=512, conv_layers=4)
|
||||
if ckpt_file == "":
|
||||
repo_name = "F5-TTS"
|
||||
exp_name = "F5TTS_Base"
|
||||
ckpt_step = 1200000
|
||||
ckpt_file = str(cached_path(f"hf://SWivid/{repo_name}/{exp_name}/model_{ckpt_step}.safetensors"))
|
||||
# ckpt_file = f"ckpts/{exp_name}/model_{ckpt_step}.pt" # .pt | .safetensors; local path
|
||||
if args.vocoder_name == "vocos":
|
||||
repo_name = "F5-TTS"
|
||||
exp_name = "F5TTS_Base"
|
||||
ckpt_step = 1200000
|
||||
ckpt_file = str(cached_path(f"hf://SWivid/{repo_name}/{exp_name}/model_{ckpt_step}.safetensors"))
|
||||
# ckpt_file = f"ckpts/{exp_name}/model_{ckpt_step}.pt" # .pt | .safetensors; local path
|
||||
elif args.vocoder_name == "bigvgan":
|
||||
repo_name = "F5-TTS"
|
||||
exp_name = "F5TTS_Base_bigvgan"
|
||||
ckpt_step = 1250000
|
||||
ckpt_file = str(cached_path(f"hf://SWivid/{repo_name}/{exp_name}/model_{ckpt_step}.pt"))
|
||||
|
||||
elif model == "E2-TTS":
|
||||
model_cls = UNetT
|
||||
@@ -145,10 +156,10 @@ elif model == "E2-TTS":
|
||||
|
||||
|
||||
print(f"Using {model}...")
|
||||
ema_model = load_model(model_cls, model_cfg, ckpt_file, vocab_file)
|
||||
ema_model = load_model(model_cls, model_cfg, ckpt_file, args.vocoder_name, vocab_file)
|
||||
|
||||
|
||||
def main_process(ref_audio, ref_text, text_gen, model_obj, remove_silence, speed):
|
||||
def main_process(ref_audio, ref_text, text_gen, model_obj, extract_backend, remove_silence, speed):
|
||||
main_voice = {"ref_audio": ref_audio, "ref_text": ref_text}
|
||||
if "voices" not in config:
|
||||
voices = {"main": main_voice}
|
||||
@@ -183,7 +194,7 @@ def main_process(ref_audio, ref_text, text_gen, model_obj, remove_silence, speed
|
||||
ref_text = voices[voice]["ref_text"]
|
||||
print(f"Voice: {voice}")
|
||||
audio, final_sample_rate, spectragram = infer_process(
|
||||
ref_audio, ref_text, gen_text, model_obj, vocoder, speed=speed
|
||||
ref_audio, ref_text, gen_text, model_obj, vocoder, extract_backend, speed=speed
|
||||
)
|
||||
generated_audio_segments.append(audio)
|
||||
|
||||
@@ -202,7 +213,7 @@ def main_process(ref_audio, ref_text, text_gen, model_obj, remove_silence, speed
|
||||
|
||||
|
||||
def main():
|
||||
main_process(ref_audio, ref_text, gen_text, ema_model, remove_silence, speed)
|
||||
main_process(ref_audio, ref_text, gen_text, ema_model, extract_backend, remove_silence, speed)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
@@ -4,8 +4,7 @@ import torch
|
||||
import torch.nn.functional as F
|
||||
import torchaudio
|
||||
|
||||
from f5_tts.infer.utils_infer import (load_checkpoint, load_vocoder,
|
||||
save_spectrogram)
|
||||
from f5_tts.infer.utils_infer import load_checkpoint, load_vocoder, save_spectrogram
|
||||
from f5_tts.model import CFM, DiT, UNetT
|
||||
from f5_tts.model.utils import convert_char_to_pinyin, get_tokenizer
|
||||
|
||||
@@ -173,20 +172,20 @@ with torch.inference_mode():
|
||||
seed=seed,
|
||||
edit_mask=edit_mask,
|
||||
)
|
||||
print(f"Generated mel: {generated.shape}")
|
||||
print(f"Generated mel: {generated.shape}")
|
||||
|
||||
# Final result
|
||||
generated = generated.to(torch.float32)
|
||||
generated = generated[:, ref_audio_len:, :]
|
||||
gen_mel_spec = generated.permute(0, 2, 1)
|
||||
if extract_backend == "vocos":
|
||||
generated_wave = vocoder.decode(gen_mel_spec.cpu())
|
||||
elif extract_backend == "bigvgan":
|
||||
generated_wave = vocoder(gen_mel_spec)
|
||||
# Final result
|
||||
generated = generated.to(torch.float32)
|
||||
generated = generated[:, ref_audio_len:, :]
|
||||
gen_mel_spec = generated.permute(0, 2, 1)
|
||||
if extract_backend == "vocos":
|
||||
generated_wave = vocoder.decode(gen_mel_spec)
|
||||
elif extract_backend == "bigvgan":
|
||||
generated_wave = vocoder(gen_mel_spec)
|
||||
|
||||
if rms < target_rms:
|
||||
generated_wave = generated_wave * rms / target_rms
|
||||
if rms < target_rms:
|
||||
generated_wave = generated_wave * rms / target_rms
|
||||
|
||||
save_spectrogram(gen_mel_spec[0].cpu().numpy(), f"{output_dir}/speech_edit_out.png")
|
||||
torchaudio.save(f"{output_dir}/speech_edit_out.wav", generated_wave.squeeze(0).cpu(), target_sample_rate)
|
||||
print(f"Generated wav: {generated_wave.shape}")
|
||||
save_spectrogram(gen_mel_spec[0].cpu().numpy(), f"{output_dir}/speech_edit_out.png")
|
||||
torchaudio.save(f"{output_dir}/speech_edit_out.wav", generated_wave.squeeze(0).cpu(), target_sample_rate)
|
||||
print(f"Generated wav: {generated_wave.shape}")
|
||||
|
||||
@@ -94,7 +94,6 @@ def load_vocoder(vocoder_name="vocos", is_local=False, local_path="", device=dev
|
||||
vocoder = Vocos.from_hparams(f"{local_path}/config.yaml")
|
||||
state_dict = torch.load(f"{local_path}/pytorch_model.bin", map_location="cpu")
|
||||
vocoder.load_state_dict(state_dict)
|
||||
vocoder.eval()
|
||||
vocoder = vocoder.eval().to(device)
|
||||
else:
|
||||
print("Download Vocos from huggingface charactr/vocos-mel-24khz")
|
||||
@@ -148,6 +147,11 @@ def load_checkpoint(model, ckpt_path, device, dtype, use_ema=True):
|
||||
for k, v in checkpoint["ema_model_state_dict"].items()
|
||||
if k not in ["initted", "step"]
|
||||
}
|
||||
|
||||
for key in ["mel_spec.mel_stft.mel_scale.fb", "mel_spec.mel_stft.spectrogram.window"]:
|
||||
if key in checkpoint["model_state_dict"]:
|
||||
del checkpoint["model_state_dict"][key]
|
||||
|
||||
model.load_state_dict(checkpoint["model_state_dict"])
|
||||
else:
|
||||
if ckpt_type == "safetensors":
|
||||
@@ -160,7 +164,9 @@ def load_checkpoint(model, ckpt_path, device, dtype, use_ema=True):
|
||||
# load model for inference
|
||||
|
||||
|
||||
def load_model(model_cls, model_cfg, ckpt_path, vocab_file="", ode_method=ode_method, use_ema=True, device=device):
|
||||
def load_model(
|
||||
model_cls, model_cfg, ckpt_path, extract_backend, vocab_file="", ode_method=ode_method, use_ema=True, device=device
|
||||
):
|
||||
if vocab_file == "":
|
||||
vocab_file = str(files("f5_tts").joinpath("infer/examples/vocab.txt"))
|
||||
tokenizer = "custom"
|
||||
@@ -282,6 +288,7 @@ def infer_process(
|
||||
gen_text,
|
||||
model_obj,
|
||||
vocoder,
|
||||
extract_backend,
|
||||
show_info=print,
|
||||
progress=tqdm,
|
||||
target_rms=target_rms,
|
||||
@@ -307,6 +314,7 @@ def infer_process(
|
||||
gen_text_batches,
|
||||
model_obj,
|
||||
vocoder,
|
||||
extract_backend,
|
||||
progress=progress,
|
||||
target_rms=target_rms,
|
||||
cross_fade_duration=cross_fade_duration,
|
||||
@@ -328,6 +336,7 @@ def infer_batch_process(
|
||||
gen_text_batches,
|
||||
model_obj,
|
||||
vocoder,
|
||||
extract_backend,
|
||||
progress=tqdm,
|
||||
target_rms=0.1,
|
||||
cross_fade_duration=0.15,
|
||||
@@ -384,7 +393,7 @@ def infer_batch_process(
|
||||
generated = generated[:, ref_audio_len:, :]
|
||||
generated_mel_spec = generated.permute(0, 2, 1)
|
||||
if extract_backend == "vocos":
|
||||
generated_wave = vocoder.decode(generated_mel_spec.cpu())
|
||||
generated_wave = vocoder.decode(generated_mel_spec)
|
||||
elif extract_backend == "bigvgan":
|
||||
generated_wave = vocoder(generated_mel_spec)
|
||||
if rms < target_rms:
|
||||
|
||||
@@ -19,8 +19,14 @@ from torch.nn.utils.rnn import pad_sequence
|
||||
from torchdiffeq import odeint
|
||||
|
||||
from f5_tts.model.modules import MelSpec
|
||||
from f5_tts.model.utils import (default, exists, lens_to_mask, list_str_to_idx,
|
||||
list_str_to_tensor, mask_from_frac_lengths)
|
||||
from f5_tts.model.utils import (
|
||||
default,
|
||||
exists,
|
||||
lens_to_mask,
|
||||
list_str_to_idx,
|
||||
list_str_to_tensor,
|
||||
mask_from_frac_lengths,
|
||||
)
|
||||
|
||||
|
||||
class CFM(nn.Module):
|
||||
@@ -92,12 +98,6 @@ class CFM(nn.Module):
|
||||
edit_mask=None,
|
||||
):
|
||||
self.eval()
|
||||
|
||||
assert next(self.parameters()).dtype == torch.float32 or next(self.parameters()).dtype == torch.float16, print(
|
||||
"Only support fp16 and fp32 inference currently"
|
||||
)
|
||||
cond = cond.to(next(self.parameters()).dtype)
|
||||
|
||||
# raw wave
|
||||
|
||||
if cond.ndim == 2:
|
||||
@@ -105,6 +105,11 @@ class CFM(nn.Module):
|
||||
cond = cond.permute(0, 2, 1)
|
||||
assert cond.shape[-1] == self.num_channels
|
||||
|
||||
assert next(self.parameters()).dtype == torch.float32 or next(self.parameters()).dtype == torch.float16, print(
|
||||
"Only support fp16 and fp32 inference currently"
|
||||
)
|
||||
cond = cond.to(next(self.parameters()).dtype)
|
||||
|
||||
batch, cond_seq_len, device = *cond.shape[:2], cond.device
|
||||
if not exists(lens):
|
||||
lens = torch.full((batch,), cond_seq_len, device=device, dtype=torch.long)
|
||||
|
||||
@@ -123,7 +123,7 @@ def get_vocos_mel_spectrogram(
|
||||
center=True,
|
||||
normalized=False,
|
||||
norm=None,
|
||||
)
|
||||
).to(waveform.device)
|
||||
if len(waveform.shape) == 3:
|
||||
waveform = waveform.squeeze(1) # 'b 1 nw -> b nw'
|
||||
|
||||
|
||||
@@ -187,8 +187,7 @@ class Trainer:
|
||||
|
||||
def train(self, train_dataset: Dataset, num_workers=16, resumable_with_seed: int = None):
|
||||
if self.log_samples:
|
||||
from f5_tts.infer.utils_infer import (cfg_strength, load_vocoder,
|
||||
nfe_step, sway_sampling_coef)
|
||||
from f5_tts.infer.utils_infer import cfg_strength, load_vocoder, nfe_step, sway_sampling_coef
|
||||
|
||||
vocoder = load_vocoder(vocoder_name=self.vocoder_name)
|
||||
target_sample_rate = self.accelerator.unwrap_model(self.model).mel_spec.mel_stft.sample_rate
|
||||
@@ -315,7 +314,7 @@ class Trainer:
|
||||
self.save_checkpoint(global_step)
|
||||
|
||||
if self.log_samples and self.accelerator.is_local_main_process:
|
||||
ref_audio, ref_audio_len = vocoder.decode(batch["mel"][0].unsqueeze(0).cpu()), mel_lengths[0]
|
||||
ref_audio, ref_audio_len = vocoder.decode(batch["mel"][0].unsqueeze(0)), mel_lengths[0]
|
||||
torchaudio.save(f"{log_samples_path}/step_{global_step}_ref.wav", ref_audio, target_sample_rate)
|
||||
with torch.inference_mode():
|
||||
generated, _ = self.accelerator.unwrap_model(self.model).sample(
|
||||
|
||||
Reference in New Issue
Block a user