|
| 1 | +import os |
| 2 | +from dataclasses import dataclass |
| 3 | +from typing import Dict |
| 4 | + |
| 5 | +from TTS.tts.configs.shared_configs import BaseTTSConfig |
| 6 | +from TTS.tts.layers.bark.model import GPTConfig |
| 7 | +from TTS.tts.layers.bark.model_fine import FineGPTConfig |
| 8 | +from TTS.tts.models.bark import BarkAudioConfig |
| 9 | +from TTS.utils.generic_utils import get_user_data_dir |
| 10 | + |
| 11 | + |
| 12 | +@dataclass |
| 13 | +class BarkConfig(BaseTTSConfig): |
| 14 | + """Bark TTS configuration |
| 15 | +
|
| 16 | + Args: |
| 17 | + model (str): model name that registers the model. |
| 18 | + audio (BarkAudioConfig): audio configuration. Defaults to BarkAudioConfig(). |
| 19 | + num_chars (int): number of characters in the alphabet. Defaults to 0. |
| 20 | + semantic_config (GPTConfig): semantic configuration. Defaults to GPTConfig(). |
| 21 | + fine_config (FineGPTConfig): fine configuration. Defaults to FineGPTConfig(). |
| 22 | + coarse_config (GPTConfig): coarse configuration. Defaults to GPTConfig(). |
| 23 | + CONTEXT_WINDOW_SIZE (int): GPT context window size. Defaults to 1024. |
| 24 | + SEMANTIC_RATE_HZ (float): semantic tokens rate in Hz. Defaults to 49.9. |
| 25 | + SEMANTIC_VOCAB_SIZE (int): semantic vocabulary size. Defaults to 10_000. |
| 26 | + CODEBOOK_SIZE (int): encodec codebook size. Defaults to 1024. |
| 27 | + N_COARSE_CODEBOOKS (int): number of coarse codebooks. Defaults to 2. |
| 28 | + N_FINE_CODEBOOKS (int): number of fine codebooks. Defaults to 8. |
| 29 | + COARSE_RATE_HZ (int): coarse tokens rate in Hz. Defaults to 75. |
| 30 | + SAMPLE_RATE (int): sample rate. Defaults to 24_000. |
| 31 | + USE_SMALLER_MODELS (bool): use smaller models. Defaults to False. |
| 32 | + TEXT_ENCODING_OFFSET (int): text encoding offset. Defaults to 10_048. |
| 33 | + SEMANTIC_PAD_TOKEN (int): semantic pad token. Defaults to 10_000. |
| 34 | + TEXT_PAD_TOKEN ([type]): text pad token. Defaults to 10_048. |
| 35 | + TEXT_EOS_TOKEN ([type]): text end of sentence token. Defaults to 10_049. |
| 36 | + TEXT_SOS_TOKEN ([type]): text start of sentence token. Defaults to 10_050. |
| 37 | + SEMANTIC_INFER_TOKEN (int): semantic infer token. Defaults to 10_051. |
| 38 | + COARSE_SEMANTIC_PAD_TOKEN (int): coarse semantic pad token. Defaults to 12_048. |
| 39 | + COARSE_INFER_TOKEN (int): coarse infer token. Defaults to 12_050. |
| 40 | + REMOTE_BASE_URL ([type]): remote base url. Defaults to "https://huggingface.co/erogol/bark/tree". |
| 41 | + REMOTE_MODEL_PATHS (Dict): remote model paths. Defaults to None. |
| 42 | + LOCAL_MODEL_PATHS (Dict): local model paths. Defaults to None. |
| 43 | + SMALL_REMOTE_MODEL_PATHS (Dict): small remote model paths. Defaults to None. |
| 44 | + CACHE_DIR (str): local cache directory. Defaults to get_user_data_dir(). |
| 45 | + DEF_SPEAKER_DIR (str): default speaker directory to stoke speaker values for voice cloning. Defaults to get_user_data_dir(). |
| 46 | + """ |
| 47 | + |
| 48 | + model: str = "bark" |
| 49 | + audio: BarkAudioConfig = BarkAudioConfig() |
| 50 | + num_chars: int = 0 |
| 51 | + semantic_config: GPTConfig = GPTConfig() |
| 52 | + fine_config: FineGPTConfig = FineGPTConfig() |
| 53 | + coarse_config: GPTConfig = GPTConfig() |
| 54 | + CONTEXT_WINDOW_SIZE: int = 1024 |
| 55 | + SEMANTIC_RATE_HZ: float = 49.9 |
| 56 | + SEMANTIC_VOCAB_SIZE: int = 10_000 |
| 57 | + CODEBOOK_SIZE: int = 1024 |
| 58 | + N_COARSE_CODEBOOKS: int = 2 |
| 59 | + N_FINE_CODEBOOKS: int = 8 |
| 60 | + COARSE_RATE_HZ: int = 75 |
| 61 | + SAMPLE_RATE: int = 24_000 |
| 62 | + USE_SMALLER_MODELS: bool = False |
| 63 | + |
| 64 | + TEXT_ENCODING_OFFSET: int = 10_048 |
| 65 | + SEMANTIC_PAD_TOKEN: int = 10_000 |
| 66 | + TEXT_PAD_TOKEN: int = 129_595 |
| 67 | + SEMANTIC_INFER_TOKEN: int = 129_599 |
| 68 | + COARSE_SEMANTIC_PAD_TOKEN: int = 12_048 |
| 69 | + COARSE_INFER_TOKEN: int = 12_050 |
| 70 | + |
| 71 | + REMOTE_BASE_URL = "https://huggingface.co/erogol/bark/tree/main/" |
| 72 | + REMOTE_MODEL_PATHS: Dict = None |
| 73 | + LOCAL_MODEL_PATHS: Dict = None |
| 74 | + SMALL_REMOTE_MODEL_PATHS: Dict = None |
| 75 | + CACHE_DIR: str = str(get_user_data_dir("tts/suno/bark_v0")) |
| 76 | + DEF_SPEAKER_DIR: str = str(get_user_data_dir("tts/bark_v0/speakers")) |
| 77 | + |
| 78 | + def __post_init__(self): |
| 79 | + self.REMOTE_MODEL_PATHS = { |
| 80 | + "text": { |
| 81 | + "path": os.path.join(self.REMOTE_BASE_URL, "text_2.pt"), |
| 82 | + "checksum": "54afa89d65e318d4f5f80e8e8799026a", |
| 83 | + }, |
| 84 | + "coarse": { |
| 85 | + "path": os.path.join(self.REMOTE_BASE_URL, "coarse_2.pt"), |
| 86 | + "checksum": "8a98094e5e3a255a5c9c0ab7efe8fd28", |
| 87 | + }, |
| 88 | + "fine": { |
| 89 | + "path": os.path.join(self.REMOTE_BASE_URL, "fine_2.pt"), |
| 90 | + "checksum": "59d184ed44e3650774a2f0503a48a97b", |
| 91 | + }, |
| 92 | + } |
| 93 | + self.LOCAL_MODEL_PATHS = { |
| 94 | + "text": os.path.join(self.CACHE_DIR, "text_2.pt"), |
| 95 | + "coarse": os.path.join(self.CACHE_DIR, "coarse_2.pt"), |
| 96 | + "fine": os.path.join(self.CACHE_DIR, "fine_2.pt"), |
| 97 | + "hubert_tokenizer": os.path.join(self.CACHE_DIR, "tokenizer.pth"), |
| 98 | + "hubert": os.path.join(self.CACHE_DIR, "hubert.pt"), |
| 99 | + } |
| 100 | + self.SMALL_REMOTE_MODEL_PATHS = { |
| 101 | + "text": {"path": os.path.join(self.REMOTE_BASE_URL, "text.pt")}, |
| 102 | + "coarse": {"path": os.path.join(self.REMOTE_BASE_URL, "coarse.pt")}, |
| 103 | + "fine": {"path": os.path.join(self.REMOTE_BASE_URL, "fine.pt")}, |
| 104 | + } |
| 105 | + self.sample_rate = self.SAMPLE_RATE # pylint: disable=attribute-defined-outside-init |
0 commit comments