Bug fix and refine cloud training for DS2.

Summary:
1. Add missing is_local argument (when set False, use pserver).
2. Add exception thrown if cp failed.
3. Add cloud mkdir if not cloud path for uploading does not exist.
4. Fix a bug using common path ./local_manifest for all nodes. (convert to /local_manifest)
5. Refine coding style.
pull/2/head
Xinghai Sun 7 years ago
parent b417681251
commit 0e79ee37a4

@ -0,0 +1,17 @@
"""Set up paths for DS2"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
import sys
def add_path(path):
if path not in sys.path:
sys.path.insert(0, path)
this_dir = os.path.dirname(__file__)
proj_path = os.path.join(this_dir, '..')
add_path(proj_path)

@ -1,17 +1,22 @@
# Configure input data set in local filesystem # Configure input data set in local filesystem
TRAIN_MANIFEST="/home/work/demo/ds2/pcloud/models/deep_speech_2/datasets/manifest.dev" TRAIN_MANIFEST="../datasets/manifest.dev"
TEST_MANIFEST="/home/work/demo/ds2/pcloud/models/deep_speech_2/datasets/manifest.dev" DEV_MANIFEST="../datasets/manifest.dev"
VOCAB_FILE="/home/work/demo/ds2/pcloud/models/deep_speech_2/datasets/vocab/eng_vocab.txt" VOCAB_FILE="../datasets/vocab/eng_vocab.txt"
MEAN_STD_FILE="/home/work/demo/ds2/pcloud/models/deep_speech_2/mean_std.npz" MEAN_STD_FILE="../mean_std.npz"
# Configure output path in PaddleCloud filesystem # Configure output path in PaddleCloud filesystem
CLOUD_DATA_DIR="/pfs/dlnel/home/demo/deepspeech2/data" CLOUD_DATA_DIR="/pfs/dlnel/home/sunxinghai@baidu.com/deepspeech2/data"
CLOUD_MODEL_DIR="/pfs/dlnel/home/demo/deepspeech2/model" CLOUD_MODEL_DIR="/pfs/dlnel/home/sunxinghai@baidu.com/deepspeech2/model"
# Configure cloud resources
NUM_CPU=12
NUM_GPU=4
NUM_NODE=2
MEMORY="10Gi"
IS_LOCAL="False"
# Pack and upload local data to PaddleCloud filesystem # Pack and upload local data to PaddleCloud filesystem
python upload_data.py \ python upload_data.py \
--train_manifest_path=${TRAIN_MANIFEST} \ --train_manifest_path=${TRAIN_MANIFEST} \
--test_manifest_path=${TEST_MANIFEST} \ --dev_manifest_path=${DEV_MANIFEST} \
--vocab_file=${VOCAB_FILE} \ --vocab_file=${VOCAB_FILE} \
--mean_std_file=${MEAN_STD_FILE} \ --mean_std_file=${MEAN_STD_FILE} \
--cloud_data_path=${CLOUD_DATA_DIR} --cloud_data_path=${CLOUD_DATA_DIR}
@ -21,23 +26,23 @@ then
exit 1 exit 1
fi fi
JOB_NAME=deepspeech`date +%Y%m%d%H%M%S` # Submit job to PaddleCloud
JOB_NAME=deepspeech-`date +%Y%m%d%H%M%S`
DS2_PATH=${PWD%/*} DS2_PATH=${PWD%/*}
cp -f pcloud_train.sh ${DS2_PATH} cp -f pcloud_train.sh ${DS2_PATH}
# Configure computation resource and submit job to PaddleCloud
paddlecloud submit \ paddlecloud submit \
-image bootstrapper:5000/wanghaoshuang/pcloud_ds2:latest \ -image bootstrapper:5000/wanghaoshuang/pcloud_ds2:latest \
-jobname ${JOB_NAME} \ -jobname ${JOB_NAME} \
-cpu 4 \ -cpu ${NUM_CPU} \
-gpu 4 \ -gpu ${NUM_GPU} \
-memory 10Gi \ -memory ${MEMORY} \
-parallelism 2 \ -parallelism ${NUM_NODE} \
-pscpu 1 \ -pscpu 1 \
-pservers 1 \ -pservers 1 \
-psmemory 10Gi \ -psmemory ${MEMORY} \
-passes 1 \ -passes 1 \
-entry "sh pcloud_train.sh ${CLOUD_DATA_DIR} ${CLOUD_MODEL_DIR}" \ -entry "sh pcloud_train.sh ${CLOUD_DATA_DIR} ${CLOUD_MODEL_DIR} ${NUM_CPU} ${NUM_GPU} ${IS_LOCAL}" \
${DS2_PATH} ${DS2_PATH}
rm ${DS2_PATH}/pcloud_train.sh rm ${DS2_PATH}/pcloud_train.sh

@ -1,28 +1,36 @@
DATA_PATH=$1 DATA_PATH=$1
MODEL_PATH=$2 MODEL_PATH=$2
NUM_CPU=$3
NUM_GPU=$4
IS_LOCAL=$5
TRAIN_MANI=${DATA_PATH}/cloud.train.manifest TRAIN_MANI=${DATA_PATH}/cloud.train.manifest
DEV_MANI=${DATA_PATH}/cloud.test.manifest DEV_MANI=${DATA_PATH}/cloud.dev.manifest
TRAIN_TAR=${DATA_PATH}/cloud.train.tar TRAIN_TAR=${DATA_PATH}/cloud.train.tar
DEV_TAR=${DATA_PATH}/cloud.test.tar DEV_TAR=${DATA_PATH}/cloud.dev.tar
VOCAB_PATH=${DATA_PATH}/vocab.txt VOCAB_PATH=${DATA_PATH}/vocab.txt
MEAN_STD_FILE=${DATA_PATH}/mean_std.npz MEAN_STD_FILE=${DATA_PATH}/mean_std.npz
# split train data for each pcloud node # split train data for each pcloud node
python ./cloud/split_data.py \ python ./cloud/split_data.py \
--in_manifest_path=$TRAIN_MANI \ --in_manifest_path=${TRAIN_MANI} \
--data_tar_path=$TRAIN_TAR \ --data_tar_path=${TRAIN_TAR} \
--out_manifest_path='./local.train.manifest' --out_manifest_path='/local.train.manifest'
# split dev data for each pcloud node # split dev data for each pcloud node
python ./cloud/split_data.py \ python ./cloud/split_data.py \
--in_manifest_path=$DEV_MANI \ --in_manifest_path=${DEV_MANI} \
--data_tar_path=$DEV_TAR \ --data_tar_path=${DEV_TAR} \
--out_manifest_path='./local.test.manifest' --out_manifest_path='/local.dev.manifest'
# run train
python train.py \ python train.py \
--use_gpu=1 \ --use_gpu=1 \
--mean_std_filepath=$MEAN_STD_FILE \ --trainer_count=${NUM_GPU} \
--train_manifest_path='./local.train.manifest' \ --num_threads_data=${NUM_CPU} \
--dev_manifest_path='./local.test.manifest' \ --is_local=${IS_LOCAL} \
--vocab_filepath=$VOCAB_PATH \ --mean_std_filepath=${MEAN_STD_FILE} \
--train_manifest_path='/local.train.manifest' \
--dev_manifest_path='/local.dev.manifest' \
--vocab_filepath=${VOCAB_PATH} \
--output_model_dir=${MODEL_PATH} --output_model_dir=${MODEL_PATH}

@ -6,6 +6,7 @@ This script should be called in paddle cloud.
from __future__ import absolute_import from __future__ import absolute_import
from __future__ import division from __future__ import division
from __future__ import print_function from __future__ import print_function
import os import os
import json import json
import argparse import argparse

@ -1,30 +1,31 @@
"""This tool is used for preparing data for DeepSpeech2 trainning on paddle cloud. """This script is used for preparing data for DeepSpeech2 trainning on paddle
cloud.
Steps: Steps:
1. Read original manifest and get the local path of sound files. 1. Read original manifest and get the local path of sound files.
2. Tar all local sound files into one tar file. 2. Tar all local sound files into one tar file.
3. Modify original manifest to remove the local path information. 3. Modify original manifest to remove the local path information.
Finally, we will get a tar file and a manifest with sound file name, duration Finally, we will get a tar file and a new manifest.
and text.
""" """
from __future__ import absolute_import from __future__ import absolute_import
from __future__ import division from __future__ import division
from __future__ import print_function from __future__ import print_function
import json import json
import os import os
import tarfile import tarfile
import sys import sys
import argparse import argparse
import shutil import shutil
sys.path.append('../')
from data_utils.utils import read_manifest
from subprocess import call from subprocess import call
import _init_paths
from data_utils.utils import read_manifest
TRAIN_TAR = "cloud.train.tar" TRAIN_TAR = "cloud.train.tar"
TRAIN_MANIFEST = "cloud.train.manifest" TRAIN_MANIFEST = "cloud.train.manifest"
TEST_TAR = "cloud.test.tar" DEV_TAR = "cloud.dev.tar"
TEST_MANIFEST = "cloud.test.manifest" DEV_MANIFEST = "cloud.dev.manifest"
VOCAB_FILE = "vocab.txt" VOCAB_FILE = "vocab.txt"
MEAN_STD_FILE = "mean_std.npz" MEAN_STD_FILE = "mean_std.npz"
@ -33,41 +34,41 @@ parser.add_argument(
"--train_manifest_path", "--train_manifest_path",
default="../datasets/manifest.train", default="../datasets/manifest.train",
type=str, type=str,
help="Manifest file of train data. (default: %(default)s)") help="Manifest file path for train data. (default: %(default)s)")
parser.add_argument( parser.add_argument(
"--test_manifest_path", "--dev_manifest_path",
default="../datasets/manifest.test", default="../datasets/manifest.dev",
type=str, type=str,
help="Manifest file of test data. (default: %(default)s)") help="Manifest file path for validation data. (default: %(default)s)")
parser.add_argument( parser.add_argument(
"--vocab_file", "--vocab_file",
default="../datasets/vocab/eng_vocab.txt", default="../datasets/vocab/eng_vocab.txt",
type=str, type=str,
help="Vocab file to be uploaded to paddlecloud. (default: %(default)s)") help="Vocabulary file to be uploaded to paddlecloud. "
"(default: %(default)s)")
parser.add_argument( parser.add_argument(
"--mean_std_file", "--mean_std_file",
default="../mean_std.npz", default="../mean_std.npz",
type=str, type=str,
help="mean_std file to be uploaded to paddlecloud. (default: %(default)s)") help="Normalizer's statistics (mean and stddev) file to be uploaded to "
"paddlecloud. (default: %(default)s)")
parser.add_argument( parser.add_argument(
"--cloud_data_path", "--cloud_data_path",
required=True, required=True,
type=str, type=str,
help="Destination path on paddlecloud. (default: %(default)s)") help="Destination path on paddlecloud. (default: %(default)s)")
args = parser.parse_args()
parser.add_argument( parser.add_argument(
"--local_tmp_path", "--local_tmp_path",
default="./tmp/", default="./tmp/",
type=str, type=str,
help="Local directory for storing temporary data. (default: %(default)s)") help="Local directory for storing temporary data. (default: %(default)s)")
args = parser.parse_args() args = parser.parse_args()
def pack_data(manifest_path, out_tar_path, out_manifest_path): def pack_data(manifest_path, out_tar_path, out_manifest_path):
'''1. According to the manifest, tar sound files into out_tar_path """1. According to the manifest, tar sound files into out_tar_path.
2. Generate a new manifest for output tar file 2. Generate a new manifest for output tar file.
''' """
out_tar = tarfile.open(out_tar_path, 'w') out_tar = tarfile.open(out_tar_path, 'w')
manifest = read_manifest(manifest_path) manifest = read_manifest(manifest_path)
results = [] results = []
@ -83,11 +84,19 @@ def pack_data(manifest_path, out_tar_path, out_manifest_path):
out_tar.close() out_tar.close()
def pcloud_mkdir(dir):
"""Make directory in PaddleCloud filesystem.
"""
if call(['paddlecloud', 'mkdir', dir]) != 0:
raise IOError("PaddleCloud mkdir failed: %s." % dir)
def pcloud_cp(src, dst): def pcloud_cp(src, dst):
"""Copy src from local filesytem to dst in PaddleCloud filesystem. """Copy src from local filesytem to dst in PaddleCloud filesystem,
or downlowd src from PaddleCloud filesystem to dst in local filesystem.
""" """
ret = call(['paddlecloud', 'cp', src, dst]) if call(['paddlecloud', 'cp', src, dst]) != 0:
return ret raise IOError("PaddleCloud cp failed: from [%s] to [%s]." % (src, dst))
def pcloud_exist(path): def pcloud_exist(path):
@ -100,48 +109,34 @@ def pcloud_exist(path):
if __name__ == '__main__': if __name__ == '__main__':
cloud_train_manifest = os.path.join(args.cloud_data_path, TRAIN_MANIFEST) cloud_train_manifest = os.path.join(args.cloud_data_path, TRAIN_MANIFEST)
cloud_train_tar = os.path.join(args.cloud_data_path, TRAIN_TAR) cloud_train_tar = os.path.join(args.cloud_data_path, TRAIN_TAR)
cloud_test_manifest = os.path.join(args.cloud_data_path, TEST_MANIFEST) cloud_dev_manifest = os.path.join(args.cloud_data_path, DEV_MANIFEST)
cloud_test_tar = os.path.join(args.cloud_data_path, TEST_TAR) cloud_dev_tar = os.path.join(args.cloud_data_path, DEV_TAR)
cloud_vocab_file = os.path.join(args.cloud_data_path, VOCAB_FILE) cloud_vocab_file = os.path.join(args.cloud_data_path, VOCAB_FILE)
cloud_mean_file = os.path.join(args.cloud_data_path, MEAN_STD_FILE) cloud_mean_file = os.path.join(args.cloud_data_path, MEAN_STD_FILE)
local_train_manifest = os.path.join(args.local_tmp_path, TRAIN_MANIFEST) local_train_manifest = os.path.join(args.local_tmp_path, TRAIN_MANIFEST)
local_train_tar = os.path.join(args.local_tmp_path, TRAIN_TAR) local_train_tar = os.path.join(args.local_tmp_path, TRAIN_TAR)
local_test_manifest = os.path.join(args.local_tmp_path, TEST_MANIFEST) local_dev_manifest = os.path.join(args.local_tmp_path, DEV_MANIFEST)
local_test_tar = os.path.join(args.local_tmp_path, TEST_TAR) local_dev_tar = os.path.join(args.local_tmp_path, DEV_TAR)
# prepare local and cloud dir
if os.path.exists(args.local_tmp_path): if os.path.exists(args.local_tmp_path):
shutil.rmtree(args.local_tmp_path) shutil.rmtree(args.local_tmp_path)
os.makedirs(args.local_tmp_path) os.makedirs(args.local_tmp_path)
pcloud_mkdir(args.cloud_data_path)
# pack and upload train data
pack_data(args.train_manifest_path, local_train_tar, local_train_manifest)
pcloud_cp(local_train_manifest, cloud_train_manifest)
pcloud_cp(local_train_tar, cloud_train_tar)
# pack and upload validation data
pack_data(args.dev_manifest_path, local_dev_tar, local_dev_manifest)
pcloud_cp(local_dev_manifest, cloud_dev_manifest)
pcloud_cp(local_dev_tar, cloud_dev_tar)
# train data # upload vocab file and mean_std file
if args.train_manifest_path != "": pcloud_cp(args.vocab_file, cloud_vocab_file)
ret = pcloud_exist(cloud_train_manifest) pcloud_cp(args.mean_std_file, cloud_mean_file)
if ret != 0:
pack_data(args.train_manifest_path, local_train_tar,
local_train_manifest)
pcloud_cp(local_train_manifest, cloud_train_manifest)
pcloud_cp(local_train_tar, cloud_train_tar)
# test data
if args.test_manifest_path != "":
ret = pcloud_exist(cloud_test_manifest)
if ret != 0:
pack_data(args.test_manifest_path, local_test_tar,
local_test_manifest)
pcloud_cp(local_test_manifest, cloud_test_manifest)
pcloud_cp(local_test_tar, cloud_test_tar)
# vocab file
if args.vocab_file != "":
ret = pcloud_exist(cloud_vocab_file)
if ret != 0:
pcloud_cp(args.vocab_file, cloud_vocab_file)
# mean_std file
if args.mean_std_file != "":
ret = pcloud_exist(cloud_mean_file)
if ret != 0:
pcloud_cp(args.mean_std_file, cloud_mean_file)
shutil.rmtree(args.local_tmp_path) shutil.rmtree(args.local_tmp_path)

@ -127,6 +127,12 @@ parser.add_argument(
type=str, type=str,
help="Augmentation configuration in json-format. " help="Augmentation configuration in json-format. "
"(default: %(default)s)") "(default: %(default)s)")
parser.add_argument(
"--is_local",
default=True,
type=distutils.util.strtobool,
help="Set to false if running with pserver in paddlecloud. "
"(default: %(default)s)")
args = parser.parse_args() args = parser.parse_args()
@ -178,7 +184,10 @@ def train():
def main(): def main():
utils.print_arguments(args) utils.print_arguments(args)
paddle.init(use_gpu=args.use_gpu, trainer_count=args.trainer_count) paddle.init(
use_gpu=args.use_gpu,
trainer_count=args.trainer_count,
is_local=args.is_local)
train() train()

Loading…
Cancel
Save