首页 > 解决方案 > 加载训练有素的模型的多处理。(通过 Python3)

问题描述

这是我的情况:我有一个训练有素的语音合成模型。我将通过多处理加快合成速度,将模型预加载到每个 CPU 中,然后继续输入句子以进行文本转语音。

这是我的尝试脚本:

####################################################################
#!/usr/bin/python3
####################################################################

from multiprocessing import Process, Pool, cpu_count
import os,time

####################################################################

from tacotron.demo_synthesizer import Synthesizer
from splitting_sent import splitting_para
import tensorflow as tf
from datasets import audio

####################################################################

from pypinyin import pinyin, Style

####################################################################

BASE_DIR = os.path.split(os.path.realpath(__file__))[0]
VOICE = BASE_DIR + "/tmp"
TXT = BASE_DIR + "/txt"
os.makedirs(VOICE, exist_ok=True)
os.makedirs(TXT, exist_ok=True)

####################################################################

def syn(py):
    synthesizer = Synthesizer()
    synthesizer.load("/path/to/the/model")
    wav_name = time.time()
    wav_path = VOICE + "/" + str(wav_name)
    wav = synthesizer.synthesize(py)
    audio.save_wav(wav, wav_path)

if __name__ =='__main__':

    with open(os.path.join(TXT, "content.txt"), "r") as f:
        lines = f.read().splitlines()
    lines = "".join(lines)

    sentences = splitting_para(lines)
    # splitting paragraph into individual sentences.

    py_list = []
    for sent in sentences:
        py_sent = pinyin(sent, style=Style.TONE3)
        py_sent = " ".join([i[0] for i in py_sent if i[0].isalnum()])
        py_list.append(py_sent)
    # as I am trying the Chinese TTS, it is the inevitable prerequisite step of translating Chinese character into Pinyin.

    print('Run the main process (%s).' % (os.getpid()))
    mainStart = time.time()
    p = Pool(cpu_count())
    for py in py_list:
        p.apply_async(syn,args=(py,))

    print('Waiting for all subprocesses done ...')
    p.close()
    p.join()
    print('All subprocesses done')
    mainEnd = time.time()
    print('All process ran %0.2f seconds.' % (mainEnd-mainStart))

我被困在这个问题上:我只能将 12 个模型预加载到 12 个进程中来合成随机句子。但是,接下来的 12 个句子无法继续输入到预加载模型中。在第一组 12 句-TTS 完成后,这些过程终止。我完全迷失在这里。:(

如果有任何建议,我将不胜感激。:)

标签: pythontensorflowtext-to-speechmultiprocess

解决方案


我从朋友那里得到了答案,如下:

####################################################################

import multiprocessing
from multiprocessing import Process, Pool, cpu_count, Queue
import os, time, sys

####################################################################

from tacotron.demo_synthesizer import Synthesizer
from splitting_sent import splitting_para
import tensorflow as tf
from datasets import audio

####################################################################

from pypinyin import pinyin, Style

####################################################################

BASE_DIR = os.path.split(os.path.realpath(__file__))[0]
VOICE = BASE_DIR + "/tmp"
TXT = BASE_DIR + "/txt"
os.makedirs(VOICE, exist_ok=True)
os.makedirs(TXT, exist_ok=True)

####################################################################

num_cpu = cpu_count() - 1
q = []
q_re = []

for tmp in range(num_cpu):
    q.append(Queue())

def syn(id):
    synthesizer = Synthesizer()
    synthesizer.load("/home/chris/Pictures/tts-server/logs-Tacotron/model.ckpt-1255000")
    print("LOADED at {} CPU".format(id))
    while not q[id].empty():
        input_py = q[id].get(True)
        print("Starting decode:", id)
        wav_name = time.time()
        wav_path = VOICE + "/" + str(wav_name)
        wav = synthesizer.synthesize(input_py)
        audio.save_wav(wav, wav_path)
        print("Decoded:", id)

if __name__ =='__main__':

    with open(os.path.join(TXT, "content.txt"), "r") as f:
        lines = f.read().splitlines()
    lines = "".join(lines)

    sentences = splitting_para(lines)

    py_list = []
    for sent in sentences:
        py_sent = pinyin(sent, style=Style.TONE3)
        py_sent = " ".join([i[0] for i in py_sent if i[0].isalnum()])
        py_list.append(py_sent)

    for x in range(num_cpu):
        p = multiprocessing.Process(target=syn, args=(x,))
        p.start()

    # decoding
    for index, py in enumerate(py_list):
        q[index % num_cpu].put(py)

推荐阅读