mirror of
https://github.com/OpenBMB/VoxCPM
synced 2025-12-12 11:58:11 +00:00
Update: VoxCPM1.5 and fine-tuning supprt
This commit is contained in:
21
conf/voxcpm_v1.5/voxcpm_finetune_all.yaml
Normal file
21
conf/voxcpm_v1.5/voxcpm_finetune_all.yaml
Normal file
@@ -0,0 +1,21 @@
|
||||
pretrained_path: /path/to/VoxCPM1.5/
|
||||
train_manifest: /path/to/train.jsonl
|
||||
val_manifest: null
|
||||
sample_rate: 44100
|
||||
batch_size: 16
|
||||
grad_accum_steps: 1 # Gradient accumulation steps, >1 can increase effective batch size without increasing memory
|
||||
num_workers: 2
|
||||
num_iters: 2000
|
||||
log_interval: 10
|
||||
valid_interval: 1000
|
||||
save_interval: 1000
|
||||
learning_rate: 0.00001
|
||||
weight_decay: 0.01
|
||||
warmup_steps: 100
|
||||
max_steps: 2000
|
||||
max_batch_tokens: 8192 # Example: single batch can have at most 16k tokens, with batch_size=4, each sample can have at most 4096 tokens
|
||||
save_path: /path/to/checkpoints/finetune_all
|
||||
tensorboard: /path/to/logs/finetune_all
|
||||
lambdas:
|
||||
loss/diff: 1.0
|
||||
loss/stop: 1.0
|
||||
28
conf/voxcpm_v1.5/voxcpm_finetune_lora.yaml
Normal file
28
conf/voxcpm_v1.5/voxcpm_finetune_lora.yaml
Normal file
@@ -0,0 +1,28 @@
|
||||
pretrained_path: /path/to/VoxCPM1.5/
|
||||
train_manifest: /path/to/train.jsonl
|
||||
val_manifest: null
|
||||
sample_rate: 44100
|
||||
batch_size: 16
|
||||
grad_accum_steps: 1 # Gradient accumulation steps, >1 can increase effective batch size without increasing memory
|
||||
num_workers: 2
|
||||
num_iters: 2000
|
||||
log_interval: 10
|
||||
valid_interval: 1000
|
||||
save_interval: 1000
|
||||
learning_rate: 0.0001
|
||||
weight_decay: 0.01
|
||||
warmup_steps: 100
|
||||
max_steps: 2000
|
||||
max_batch_tokens: 8192 # Example: single batch can have at most 16k tokens, with batch_size=4, each sample can have at most 4096 tokens
|
||||
save_path: /path/to/checkpoints/finetune_lora
|
||||
tensorboard: /path/to/logs/finetune_lora
|
||||
lambdas:
|
||||
loss/diff: 1.0
|
||||
loss/stop: 1.0
|
||||
lora:
|
||||
enable_lm: true
|
||||
enable_dit: true
|
||||
enable_proj: false
|
||||
r: 32
|
||||
alpha: 16
|
||||
dropout: 0.0
|
||||
Reference in New Issue
Block a user