|
|
|
@ -8,13 +8,30 @@
|
|
|
|
|
#include <numeric>
|
|
|
|
|
#include <functional>
|
|
|
|
|
|
|
|
|
|
using std::cout;
|
|
|
|
|
using std::endl;
|
|
|
|
|
|
|
|
|
|
DEFINE_string(model_path, "avg_1.jit.pdmodel", "xxx.pdmodel");
|
|
|
|
|
DEFINE_string(param_path, "avg_1.jit.pdiparams", "xxx.pdiparams");
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
void produce_data(std::vector<std::vector<float>>* data);
|
|
|
|
|
void model_forward_test();
|
|
|
|
|
|
|
|
|
|
int main(int argc, char* argv[]) {
|
|
|
|
|
gflags::ParseCommandLineFlags(&argc, &argv, true);
|
|
|
|
|
model_forward_test();
|
|
|
|
|
return 0;
|
|
|
|
|
void produce_data(std::vector<std::vector<float>>* data) {
|
|
|
|
|
int chunk_size = 35; // chunk_size in frame
|
|
|
|
|
int col_size = 161; // feat dim
|
|
|
|
|
cout << "chunk size: " << chunk_size << endl;
|
|
|
|
|
cout << "feat dim: " << col_size << endl;
|
|
|
|
|
|
|
|
|
|
data->reserve(chunk_size);
|
|
|
|
|
data->back().reserve(col_size);
|
|
|
|
|
for (int row = 0; row < chunk_size; ++row) {
|
|
|
|
|
data->push_back(std::vector<float>());
|
|
|
|
|
for (int col_idx = 0; col_idx < col_size; ++col_idx) {
|
|
|
|
|
data->back().push_back(0.201);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void model_forward_test() {
|
|
|
|
@ -23,18 +40,23 @@ void model_forward_test() {
|
|
|
|
|
produce_data(&feats);
|
|
|
|
|
|
|
|
|
|
std::cout << "2. load the model" << std::endl;;
|
|
|
|
|
std::string model_graph = "../../../../model/paddle_online_deepspeech/model/avg_1.jit.pdmodel";
|
|
|
|
|
std::string model_params = "../../../../model/paddle_online_deepspeech/model/avg_1.jit.pdiparams";
|
|
|
|
|
std::string model_graph = FLAGS_model_path;
|
|
|
|
|
std::string model_params = FLAGS_param_path;
|
|
|
|
|
cout << "model path: " << model_graph << endl;
|
|
|
|
|
cout << "model param path : " << model_params << endl;
|
|
|
|
|
|
|
|
|
|
paddle_infer::Config config;
|
|
|
|
|
config.SetModel(model_graph, model_params);
|
|
|
|
|
config.SwitchIrOptim(false);
|
|
|
|
|
cout << "SwitchIrOptim: " << false << endl;
|
|
|
|
|
config.DisableFCPadding();
|
|
|
|
|
cout << "DisableFCPadding: " << endl;
|
|
|
|
|
auto predictor = paddle_infer::CreatePredictor(config);
|
|
|
|
|
|
|
|
|
|
std::cout << "3. feat shape, row=" << feats.size() << ",col=" << feats[0].size() << std::endl;
|
|
|
|
|
std::vector<float> paddle_input_feature_matrix;
|
|
|
|
|
std::vector<float> pp_input_mat;
|
|
|
|
|
for(const auto& item : feats) {
|
|
|
|
|
paddle_input_feature_matrix.insert(paddle_input_feature_matrix.end(), item.begin(), item.end());
|
|
|
|
|
pp_input_mat.insert(pp_input_mat.end(), item.begin(), item.end());
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
std::cout << "4. fead the data to model" << std::endl;
|
|
|
|
@ -42,13 +64,21 @@ void model_forward_test() {
|
|
|
|
|
int col = feats[0].size();
|
|
|
|
|
std::vector<std::string> input_names = predictor->GetInputNames();
|
|
|
|
|
std::vector<std::string> output_names = predictor->GetOutputNames();
|
|
|
|
|
for (auto name : input_names){
|
|
|
|
|
cout << "model input names: " << name << endl;
|
|
|
|
|
}
|
|
|
|
|
for (auto name : output_names){
|
|
|
|
|
cout << "model output names: " << name << endl;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// input
|
|
|
|
|
std::unique_ptr<paddle_infer::Tensor> input_tensor =
|
|
|
|
|
predictor->GetInputHandle(input_names[0]);
|
|
|
|
|
std::vector<int> INPUT_SHAPE = {1, row, col};
|
|
|
|
|
input_tensor->Reshape(INPUT_SHAPE);
|
|
|
|
|
input_tensor->CopyFromCpu(paddle_input_feature_matrix.data());
|
|
|
|
|
input_tensor->CopyFromCpu(pp_input_mat.data());
|
|
|
|
|
|
|
|
|
|
// input length
|
|
|
|
|
std::unique_ptr<paddle_infer::Tensor> input_len = predictor->GetInputHandle(input_names[1]);
|
|
|
|
|
std::vector<int> input_len_size = {1};
|
|
|
|
|
input_len->Reshape(input_len_size);
|
|
|
|
@ -56,6 +86,7 @@ void model_forward_test() {
|
|
|
|
|
audio_len.push_back(row);
|
|
|
|
|
input_len->CopyFromCpu(audio_len.data());
|
|
|
|
|
|
|
|
|
|
// state_h
|
|
|
|
|
std::unique_ptr<paddle_infer::Tensor> chunk_state_h_box = predictor->GetInputHandle(input_names[2]);
|
|
|
|
|
std::vector<int> chunk_state_h_box_shape = {3, 1, 1024};
|
|
|
|
|
chunk_state_h_box->Reshape(chunk_state_h_box_shape);
|
|
|
|
@ -64,6 +95,7 @@ void model_forward_test() {
|
|
|
|
|
std::vector<float> chunk_state_h_box_data(chunk_state_h_box_size, 0.0f);
|
|
|
|
|
chunk_state_h_box->CopyFromCpu(chunk_state_h_box_data.data());
|
|
|
|
|
|
|
|
|
|
// state_c
|
|
|
|
|
std::unique_ptr<paddle_infer::Tensor> chunk_state_c_box = predictor->GetInputHandle(input_names[3]);
|
|
|
|
|
std::vector<int> chunk_state_c_box_shape = {3, 1, 1024};
|
|
|
|
|
chunk_state_c_box->Reshape(chunk_state_c_box_shape);
|
|
|
|
@ -72,8 +104,10 @@ void model_forward_test() {
|
|
|
|
|
std::vector<float> chunk_state_c_box_data(chunk_state_c_box_size, 0.0f);
|
|
|
|
|
chunk_state_c_box->CopyFromCpu(chunk_state_c_box_data.data());
|
|
|
|
|
|
|
|
|
|
// run
|
|
|
|
|
bool success = predictor->Run();
|
|
|
|
|
|
|
|
|
|
// state_h out
|
|
|
|
|
std::unique_ptr<paddle_infer::Tensor> h_out = predictor->GetOutputHandle(output_names[2]);
|
|
|
|
|
std::vector<int> h_out_shape = h_out->shape();
|
|
|
|
|
int h_out_size = std::accumulate(h_out_shape.begin(), h_out_shape.end(),
|
|
|
|
@ -81,6 +115,7 @@ void model_forward_test() {
|
|
|
|
|
std::vector<float> h_out_data(h_out_size);
|
|
|
|
|
h_out->CopyToCpu(h_out_data.data());
|
|
|
|
|
|
|
|
|
|
// stage_c out
|
|
|
|
|
std::unique_ptr<paddle_infer::Tensor> c_out = predictor->GetOutputHandle(output_names[3]);
|
|
|
|
|
std::vector<int> c_out_shape = c_out->shape();
|
|
|
|
|
int c_out_size = std::accumulate(c_out_shape.begin(), c_out_shape.end(),
|
|
|
|
@ -88,6 +123,7 @@ void model_forward_test() {
|
|
|
|
|
std::vector<float> c_out_data(c_out_size);
|
|
|
|
|
c_out->CopyToCpu(c_out_data.data());
|
|
|
|
|
|
|
|
|
|
// output tensor
|
|
|
|
|
std::unique_ptr<paddle_infer::Tensor> output_tensor =
|
|
|
|
|
predictor->GetOutputHandle(output_names[0]);
|
|
|
|
|
std::vector<int> output_shape = output_tensor->shape();
|
|
|
|
@ -99,6 +135,7 @@ void model_forward_test() {
|
|
|
|
|
row = output_shape[1];
|
|
|
|
|
col = output_shape[2];
|
|
|
|
|
|
|
|
|
|
// probs
|
|
|
|
|
std::vector<std::vector<float>> probs;
|
|
|
|
|
probs.reserve(row);
|
|
|
|
|
for (int i = 0; i < row; i++) {
|
|
|
|
@ -120,15 +157,8 @@ void model_forward_test() {
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void produce_data(std::vector<std::vector<float>>* data) {
|
|
|
|
|
int chunk_size = 35;
|
|
|
|
|
int col_size = 161;
|
|
|
|
|
data->reserve(chunk_size);
|
|
|
|
|
data->back().reserve(col_size);
|
|
|
|
|
for (int row = 0; row < chunk_size; ++row) {
|
|
|
|
|
data->push_back(std::vector<float>());
|
|
|
|
|
for (int col_idx = 0; col_idx < col_size; ++col_idx) {
|
|
|
|
|
data->back().push_back(0.201);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
int main(int argc, char* argv[]) {
|
|
|
|
|
gflags::ParseCommandLineFlags(&argc, &argv, true);
|
|
|
|
|
model_forward_test();
|
|
|
|
|
return 0;
|
|
|
|
|
}
|