Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

step4 报错 #599

Open
pencail opened this issue Dec 15, 2024 · 0 comments
Open

step4 报错 #599

pencail opened this issue Dec 15, 2024 · 0 comments

Comments

@pencail
Copy link

pencail commented Dec 15, 2024

以下是报错日志,用的是google colab

INFO:OUTPUT_MODEL:{'train': {'log_interval': 10, 'eval_interval': 100, 'seed': 1234, 'epochs': 10000, 'learning_rate': 0.0002, 'betas': [0.8, 0.99], 'eps': 1e-09, 'batch_size': 16, 'fp16_run': True, 'lr_decay': 0.999875, 'segment_size': 8192, 'init_lr_ratio': 1, 'warmup_epochs': 0, 'c_mel': 45, 'c_kl': 1.0}, 'data': {'training_files': 'final_annotation_train.txt', 'validation_files': 'final_annotation_val.txt', 'text_cleaners': ['cjke_cleaners2'], 'max_wav_value': 32768.0, 'sampling_rate': 22050, 'filter_length': 1024, 'hop_length': 256, 'win_length': 1024, 'n_mel_channels': 80, 'mel_fmin': 0.0, 'mel_fmax': None, 'add_blank': True, 'n_speakers': 1, 'cleaned_text': True}, 'model': {'inter_channels': 192, 'hidden_channels': 192, 'filter_channels': 768, 'n_heads': 2, 'n_layers': 6, 'kernel_size': 3, 'p_dropout': 0.1, 'resblock': '1', 'resblock_kernel_sizes': [3, 7, 11], 'resblock_dilation_sizes': [[1, 3, 5], [1, 3, 5], [1, 3, 5]], 'upsample_rates': [8, 8, 2, 2], 'upsample_initial_channel': 512, 'upsample_kernel_sizes': [16, 16, 4, 4], 'n_layers_q': 3, 'use_spectral_norm': False, 'gin_channels': 256}, 'symbols': ['_', ',', '.', '!', '?', '-', '~', '…', 'N', 'Q', 'a', 'b', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 'ɑ', 'æ', 'ʃ', 'ʑ', 'ç', 'ɯ', 'ɪ', 'ɔ', 'ɛ', 'ɹ', 'ð', 'ə', 'ɫ', 'ɥ', 'ɸ', 'ʊ', 'ɾ', 'ʒ', 'θ', 'β', 'ŋ', 'ɦ', '⁼', 'ʰ', '`', '^', '#', '*', '=', 'ˈ', 'ˌ', '→', '↓', '↑', ' '], 'speakers': {'xuefeng': 0}, 'model_dir': '././OUTPUT_MODEL', 'max_epochs': 200, 'cont': True, 'drop_speaker_embed': False, 'train_with_pretrained_model': True, 'preserved': 4}
INFO:torch.distributed.distributed_c10d:Added key: store_based_barrier_key:1 to store for rank: 0
INFO:torch.distributed.distributed_c10d:Rank 0: Completed store-based barrier for key:store_based_barrier_key:1 with 1 nodes.
Failed to find latest checkpoint, loading G_0.pth...
Train with pretrained model...
INFO:OUTPUT_MODEL:emb_g.weight is not in the checkpoint
INFO:OUTPUT_MODEL:Loaded checkpoint './pretrained_models/G_0.pth' (iteration 419)
INFO:OUTPUT_MODEL:Loaded checkpoint './pretrained_models/D_0.pth' (iteration 419)
  0% 0/2 [00:11<?, ?it/s]
Traceback (most recent call last):
  File "finetune_speaker_v2.py", line 372, in <module>
    main()
  File "finetune_speaker_v2.py", line 55, in main
    mp.spawn(run, nprocs=n_gpus, args=(n_gpus, hps,))
  File "/content/vits/lib/python3.8/site-packages/torch/multiprocessing/spawn.py", line 240, in spawn
    return start_processes(fn, args, nprocs, join, daemon, start_method='spawn')
  File "/content/vits/lib/python3.8/site-packages/torch/multiprocessing/spawn.py", line 198, in start_processes
    while not context.join():
  File "/content/vits/lib/python3.8/site-packages/torch/multiprocessing/spawn.py", line 160, in join
    raise ProcessRaisedException(msg, error_index, failed_process.pid)
torch.multiprocessing.spawn.ProcessRaisedException: 

-- Process 0 terminated with the following error:
Traceback (most recent call last):
  File "/content/vits/lib/python3.8/site-packages/torch/multiprocessing/spawn.py", line 69, in _wrap
    fn(i, *args)
  File "/content/VITS-fast-fine-tuning/finetune_speaker_v2.py", line 154, in run
    train_and_evaluate(rank, epoch, hps, [net_g, net_d], [optim_g, optim_d], [scheduler_g, scheduler_d], scaler, [train_loader, eval_loader], logger, [writer, writer_eval])
  File "/content/VITS-fast-fine-tuning/finetune_speaker_v2.py", line 174, in train_and_evaluate
    for batch_idx, (x, x_lengths, spec, spec_lengths, y, y_lengths, speakers) in enumerate(tqdm(train_loader)):
  File "/content/vits/lib/python3.8/site-packages/tqdm/std.py", line 1181, in __iter__
    for obj in iterable:
  File "/content/vits/lib/python3.8/site-packages/torch/utils/data/dataloader.py", line 628, in __next__
    data = self._next_data()
  File "/content/vits/lib/python3.8/site-packages/torch/utils/data/dataloader.py", line 1333, in _next_data
    return self._process_data(data)
  File "/content/vits/lib/python3.8/site-packages/torch/utils/data/dataloader.py", line 1359, in _process_data
    data.reraise()
  File "/content/vits/lib/python3.8/site-packages/torch/_utils.py", line 543, in reraise
    raise exception
RuntimeError: Caught RuntimeError in DataLoader worker process 0.
Original Traceback (most recent call last):
  File "/content/vits/lib/python3.8/site-packages/torch/utils/data/_utils/worker.py", line 302, in _worker_loop
    data = fetcher.fetch(index)
  File "/content/vits/lib/python3.8/site-packages/torch/utils/data/_utils/fetch.py", line 61, in fetch
    return self.collate_fn(data)
  File "/content/VITS-fast-fine-tuning/data_utils.py", line 156, in __call__
    spec_padded[i, :, :spec.size(1)] = spec
RuntimeError: expand(torch.FloatTensor{[2, 513, 65]}, size=[2, 513]): the number of sizes provided (2) must be greater or equal to the number of dimensions in the tensor (3)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
None yet
Projects
None yet
Development

No branches or pull requests

1 participant