@ -0,0 +1,2 @@
|
||||
include paddlespeech/t2s/exps/*.txt
|
||||
include paddlespeech/t2s/frontend/*.yaml
|
@ -1,3 +1,3 @@
|
||||
# [Aishell1](http://www.openslr.org/33/)
|
||||
# [Aishell1](http://openslr.elda.org/33/)
|
||||
|
||||
This Open Source Mandarin Speech Corpus, AISHELL-ASR0009-OS1, is 178 hours long. It is a part of AISHELL-ASR0009, of which utterance contains 11 domains, including smart home, autonomous driving, and industrial production. The whole recording was put in quiet indoor environment, using 3 different devices at the same time: high fidelity microphone (44.1kHz, 16-bit,); Android-system mobile phone (16kHz, 16-bit), iOS-system mobile phone (16kHz, 16-bit). Audios in high fidelity were re-sampled to 16kHz to build AISHELL- ASR0009-OS1. 400 speakers from different accent areas in China were invited to participate in the recording. The manual transcription accuracy rate is above 95%, through professional speech annotation and strict quality inspection. The corpus is divided into training, development and testing sets. ( This database is free for academic research, not in the commerce, if without permission. )
|
||||
|
@ -1 +1 @@
|
||||
# [FreeST](http://www.openslr.org/38/)
|
||||
# [FreeST](http://openslr.elda.org/38/)
|
||||
|
@ -0,0 +1,7 @@
|
||||
#!/bin/bash
|
||||
|
||||
wget -c https://paddlespeech.bj.bcebos.com/kws/hey_snips.wav https://paddlespeech.bj.bcebos.com/kws/non-keyword.wav
|
||||
|
||||
# kws
|
||||
paddlespeech kws --input ./hey_snips.wav
|
||||
paddlespeech kws --input non-keyword.wav
|
@ -1,3 +1,3 @@
|
||||
#!/bin/bash
|
||||
|
||||
paddlespeech_server start --config_file ./conf/application.yaml
|
||||
paddlespeech_server start --config_file ./conf/application.yaml &> server.log &
|
||||
|
@ -0,0 +1,10 @@
|
||||
#!/bin/bash
|
||||
|
||||
wget -c https://paddlespeech.bj.bcebos.com/vector/audio/85236145389.wav
|
||||
wget -c https://paddlespeech.bj.bcebos.com/vector/audio/123456789.wav
|
||||
|
||||
# sid extract
|
||||
paddlespeech_client vector --server_ip 127.0.0.1 --port 8090 --task spk --input ./85236145389.wav
|
||||
|
||||
# sid score
|
||||
paddlespeech_client vector --server_ip 127.0.0.1 --port 8090 --task score --enroll ./85236145389.wav --test ./123456789.wav
|
@ -0,0 +1,4 @@
|
||||
#!/bin/bash
|
||||
|
||||
|
||||
paddlespeech_client text --server_ip 127.0.0.1 --port 8090 --input 今天的天气真好啊你下午有空吗我想约你一起去吃饭
|
@ -0,0 +1,16 @@
|
||||
*/.vscode/*
|
||||
*.wav
|
||||
*/resource/*
|
||||
.Ds*
|
||||
*.pyc
|
||||
*.pcm
|
||||
*.npy
|
||||
*.diff
|
||||
*.sqlite
|
||||
*/static/*
|
||||
*.pdparams
|
||||
*.pdiparams*
|
||||
*.pdmodel
|
||||
*/source/*
|
||||
*/PaddleSpeech/*
|
||||
|
After Width: | Height: | Size: 84 KiB |
@ -0,0 +1,103 @@
|
||||
# This is the parameter configuration file for streaming tts server.
|
||||
|
||||
#################################################################################
|
||||
# SERVER SETTING #
|
||||
#################################################################################
|
||||
host: 0.0.0.0
|
||||
port: 8092
|
||||
|
||||
# The task format in the engin_list is: <speech task>_<engine type>
|
||||
# engine_list choices = ['tts_online', 'tts_online-onnx'], the inference speed of tts_online-onnx is faster than tts_online.
|
||||
# protocol choices = ['websocket', 'http']
|
||||
protocol: 'http'
|
||||
engine_list: ['tts_online-onnx']
|
||||
|
||||
|
||||
#################################################################################
|
||||
# ENGINE CONFIG #
|
||||
#################################################################################
|
||||
|
||||
################################### TTS #########################################
|
||||
################### speech task: tts; engine_type: online #######################
|
||||
tts_online:
|
||||
# am (acoustic model) choices=['fastspeech2_csmsc', 'fastspeech2_cnndecoder_csmsc']
|
||||
# fastspeech2_cnndecoder_csmsc support streaming am infer.
|
||||
am: 'fastspeech2_csmsc'
|
||||
am_config:
|
||||
am_ckpt:
|
||||
am_stat:
|
||||
phones_dict:
|
||||
tones_dict:
|
||||
speaker_dict:
|
||||
spk_id: 0
|
||||
|
||||
# voc (vocoder) choices=['mb_melgan_csmsc, hifigan_csmsc']
|
||||
# Both mb_melgan_csmsc and hifigan_csmsc support streaming voc inference
|
||||
voc: 'mb_melgan_csmsc'
|
||||
voc_config:
|
||||
voc_ckpt:
|
||||
voc_stat:
|
||||
|
||||
# others
|
||||
lang: 'zh'
|
||||
device: 'cpu' # set 'gpu:id' or 'cpu'
|
||||
# am_block and am_pad only for fastspeech2_cnndecoder_onnx model to streaming am infer,
|
||||
# when am_pad set 12, streaming synthetic audio is the same as non-streaming synthetic audio
|
||||
am_block: 72
|
||||
am_pad: 12
|
||||
# voc_pad and voc_block voc model to streaming voc infer,
|
||||
# when voc model is mb_melgan_csmsc, voc_pad set 14, streaming synthetic audio is the same as non-streaming synthetic audio; The minimum value of pad can be set to 7, streaming synthetic audio sounds normal
|
||||
# when voc model is hifigan_csmsc, voc_pad set 19, streaming synthetic audio is the same as non-streaming synthetic audio; voc_pad set 14, streaming synthetic audio sounds normal
|
||||
voc_block: 36
|
||||
voc_pad: 14
|
||||
|
||||
|
||||
|
||||
#################################################################################
|
||||
# ENGINE CONFIG #
|
||||
#################################################################################
|
||||
|
||||
################################### TTS #########################################
|
||||
################### speech task: tts; engine_type: online-onnx #######################
|
||||
tts_online-onnx:
|
||||
# am (acoustic model) choices=['fastspeech2_csmsc_onnx', 'fastspeech2_cnndecoder_csmsc_onnx']
|
||||
# fastspeech2_cnndecoder_csmsc_onnx support streaming am infer.
|
||||
am: 'fastspeech2_cnndecoder_csmsc_onnx'
|
||||
# am_ckpt is a list, if am is fastspeech2_cnndecoder_csmsc_onnx, am_ckpt = [encoder model, decoder model, postnet model];
|
||||
# if am is fastspeech2_csmsc_onnx, am_ckpt = [ckpt model];
|
||||
am_ckpt: # list
|
||||
am_stat:
|
||||
phones_dict:
|
||||
tones_dict:
|
||||
speaker_dict:
|
||||
spk_id: 0
|
||||
am_sample_rate: 24000
|
||||
am_sess_conf:
|
||||
device: "cpu" # set 'gpu:id' or 'cpu'
|
||||
use_trt: False
|
||||
cpu_threads: 4
|
||||
|
||||
# voc (vocoder) choices=['mb_melgan_csmsc_onnx, hifigan_csmsc_onnx']
|
||||
# Both mb_melgan_csmsc_onnx and hifigan_csmsc_onnx support streaming voc inference
|
||||
voc: 'hifigan_csmsc_onnx'
|
||||
voc_ckpt:
|
||||
voc_sample_rate: 24000
|
||||
voc_sess_conf:
|
||||
device: "cpu" # set 'gpu:id' or 'cpu'
|
||||
use_trt: False
|
||||
cpu_threads: 4
|
||||
|
||||
# others
|
||||
lang: 'zh'
|
||||
# am_block and am_pad only for fastspeech2_cnndecoder_onnx model to streaming am infer,
|
||||
# when am_pad set 12, streaming synthetic audio is the same as non-streaming synthetic audio
|
||||
am_block: 72
|
||||
am_pad: 12
|
||||
# voc_pad and voc_block voc model to streaming voc infer,
|
||||
# when voc model is mb_melgan_csmsc_onnx, voc_pad set 14, streaming synthetic audio is the same as non-streaming synthetic audio; The minimum value of pad can be set to 7, streaming synthetic audio sounds normal
|
||||
# when voc model is hifigan_csmsc_onnx, voc_pad set 19, streaming synthetic audio is the same as non-streaming synthetic audio; voc_pad set 14, streaming synthetic audio sounds normal
|
||||
voc_block: 36
|
||||
voc_pad: 14
|
||||
# voc_upsample should be same as n_shift on voc config.
|
||||
voc_upsample: 300
|
||||
|
@ -0,0 +1,48 @@
|
||||
# This is the parameter configuration file for PaddleSpeech Serving.
|
||||
|
||||
#################################################################################
|
||||
# SERVER SETTING #
|
||||
#################################################################################
|
||||
host: 0.0.0.0
|
||||
port: 8090
|
||||
|
||||
# The task format in the engin_list is: <speech task>_<engine type>
|
||||
# task choices = ['asr_online']
|
||||
# protocol = ['websocket'] (only one can be selected).
|
||||
# websocket only support online engine type.
|
||||
protocol: 'websocket'
|
||||
engine_list: ['asr_online']
|
||||
|
||||
|
||||
#################################################################################
|
||||
# ENGINE CONFIG #
|
||||
#################################################################################
|
||||
|
||||
################################### ASR #########################################
|
||||
################### speech task: asr; engine_type: online #######################
|
||||
asr_online:
|
||||
model_type: 'conformer_online_wenetspeech'
|
||||
am_model: # the pdmodel file of am static model [optional]
|
||||
am_params: # the pdiparams file of am static model [optional]
|
||||
lang: 'zh'
|
||||
sample_rate: 16000
|
||||
cfg_path:
|
||||
decode_method:
|
||||
force_yes: True
|
||||
device: 'cpu' # cpu or gpu:id
|
||||
decode_method: "attention_rescoring"
|
||||
continuous_decoding: True # enable continue decoding when endpoint detected
|
||||
num_decoding_left_chunks: 16
|
||||
am_predictor_conf:
|
||||
device: # set 'gpu:id' or 'cpu'
|
||||
switch_ir_optim: True
|
||||
glog_info: False # True -> print glog
|
||||
summary: True # False -> do not show predictor config
|
||||
|
||||
chunk_buffer_conf:
|
||||
window_n: 7 # frame
|
||||
shift_n: 4 # frame
|
||||
window_ms: 25 # ms
|
||||
shift_ms: 10 # ms
|
||||
sample_rate: 16000
|
||||
sample_width: 2
|
@ -0,0 +1,14 @@
|
||||
aiofiles
|
||||
fastapi
|
||||
librosa
|
||||
numpy
|
||||
pydantic
|
||||
scikit_learn
|
||||
SoundFile
|
||||
starlette
|
||||
uvicorn
|
||||
paddlepaddle
|
||||
paddlespeech
|
||||
paddlenlp
|
||||
faiss-cpu
|
||||
python-multipart
|
@ -0,0 +1,62 @@
|
||||
from re import sub
|
||||
import numpy as np
|
||||
import paddle
|
||||
import librosa
|
||||
import soundfile
|
||||
|
||||
from paddlespeech.server.engine.asr.online.python.asr_engine import ASREngine
|
||||
from paddlespeech.server.engine.asr.online.python.asr_engine import PaddleASRConnectionHanddler
|
||||
from paddlespeech.server.utils.config import get_config
|
||||
|
||||
def readWave(samples):
|
||||
x_len = len(samples)
|
||||
|
||||
chunk_size = 85 * 16 #80ms, sample_rate = 16kHz
|
||||
if x_len % chunk_size != 0:
|
||||
padding_len_x = chunk_size - x_len % chunk_size
|
||||
else:
|
||||
padding_len_x = 0
|
||||
|
||||
padding = np.zeros((padding_len_x), dtype=samples.dtype)
|
||||
padded_x = np.concatenate([samples, padding], axis=0)
|
||||
|
||||
assert (x_len + padding_len_x) % chunk_size == 0
|
||||
num_chunk = (x_len + padding_len_x) / chunk_size
|
||||
num_chunk = int(num_chunk)
|
||||
for i in range(0, num_chunk):
|
||||
start = i * chunk_size
|
||||
end = start + chunk_size
|
||||
x_chunk = padded_x[start:end]
|
||||
yield x_chunk
|
||||
|
||||
|
||||
class ASR:
|
||||
def __init__(self, config_path, ) -> None:
|
||||
self.config = get_config(config_path)['asr_online']
|
||||
self.engine = ASREngine()
|
||||
self.engine.init(self.config)
|
||||
self.connection_handler = PaddleASRConnectionHanddler(self.engine)
|
||||
|
||||
def offlineASR(self, samples, sample_rate=16000):
|
||||
x_chunk, x_chunk_lens = self.engine.preprocess(samples=samples, sample_rate=sample_rate)
|
||||
self.engine.run(x_chunk, x_chunk_lens)
|
||||
result = self.engine.postprocess()
|
||||
self.engine.reset()
|
||||
return result
|
||||
|
||||
def onlineASR(self, samples:bytes=None, is_finished=False):
|
||||
if not is_finished:
|
||||
# 流式开始
|
||||
self.connection_handler.extract_feat(samples)
|
||||
self.connection_handler.decode(is_finished)
|
||||
asr_results = self.connection_handler.get_result()
|
||||
return asr_results
|
||||
else:
|
||||
# 流式结束
|
||||
self.connection_handler.decode(is_finished=True)
|
||||
self.connection_handler.rescoring()
|
||||
asr_results = self.connection_handler.get_result()
|
||||
self.connection_handler.reset()
|
||||
return asr_results
|
||||
|
||||
|
@ -0,0 +1,23 @@
|
||||
from paddlenlp import Taskflow
|
||||
|
||||
class NLP:
|
||||
def __init__(self, ie_model_path=None):
|
||||
schema = ["时间", "出发地", "目的地", "费用"]
|
||||
if ie_model_path:
|
||||
self.ie_model = Taskflow("information_extraction",
|
||||
schema=schema, task_path=ie_model_path)
|
||||
else:
|
||||
self.ie_model = Taskflow("information_extraction",
|
||||
schema=schema)
|
||||
|
||||
self.dialogue_model = Taskflow("dialogue")
|
||||
|
||||
def chat(self, text):
|
||||
result = self.dialogue_model([text])
|
||||
return result[0]
|
||||
|
||||
def ie(self, text):
|
||||
result = self.ie_model(text)
|
||||
return result
|
||||
|
||||
|
@ -0,0 +1,20 @@
|
||||
from paddlespeech.cli.vector import VectorExecutor
|
||||
import numpy as np
|
||||
import logging
|
||||
|
||||
vector_executor = VectorExecutor()
|
||||
|
||||
def get_audio_embedding(path):
|
||||
"""
|
||||
Use vpr_inference to generate embedding of audio
|
||||
"""
|
||||
try:
|
||||
embedding = vector_executor(
|
||||
audio_file=path, model='ecapatdnn_voxceleb12')
|
||||
embedding = embedding / np.linalg.norm(embedding)
|
||||
return embedding
|
||||
except Exception as e:
|
||||
logging.error(f"Error with embedding:{e}")
|
||||
return None
|
||||
|
||||
|
@ -0,0 +1,31 @@
|
||||
from typing import List
|
||||
|
||||
from fastapi import WebSocket
|
||||
|
||||
class ConnectionManager:
|
||||
def __init__(self):
|
||||
# 存放激活的ws连接对象
|
||||
self.active_connections: List[WebSocket] = []
|
||||
|
||||
async def connect(self, ws: WebSocket):
|
||||
# 等待连接
|
||||
await ws.accept()
|
||||
# 存储ws连接对象
|
||||
self.active_connections.append(ws)
|
||||
|
||||
def disconnect(self, ws: WebSocket):
|
||||
# 关闭时 移除ws对象
|
||||
self.active_connections.remove(ws)
|
||||
|
||||
@staticmethod
|
||||
async def send_personal_message(message: str, ws: WebSocket):
|
||||
# 发送个人消息
|
||||
await ws.send_text(message)
|
||||
|
||||
async def broadcast(self, message: str):
|
||||
# 广播消息
|
||||
for connection in self.active_connections:
|
||||
await connection.send_text(message)
|
||||
|
||||
|
||||
manager = ConnectionManager()
|
@ -0,0 +1,18 @@
|
||||
import random
|
||||
|
||||
def randName(n=5):
|
||||
return "".join(random.sample('zyxwvutsrqponmlkjihgfedcba',n))
|
||||
|
||||
def SuccessRequest(result=None, message="ok"):
|
||||
return {
|
||||
"code": 0,
|
||||
"result":result,
|
||||
"message": message
|
||||
}
|
||||
|
||||
def ErrorRequest(result=None, message="error"):
|
||||
return {
|
||||
"code": -1,
|
||||
"result":result,
|
||||
"message": message
|
||||
}
|
@ -0,0 +1,25 @@
|
||||
# Logs
|
||||
logs
|
||||
*.log
|
||||
npm-debug.log*
|
||||
yarn-debug.log*
|
||||
yarn-error.log*
|
||||
pnpm-debug.log*
|
||||
lerna-debug.log*
|
||||
|
||||
node_modules
|
||||
dist
|
||||
dist-ssr
|
||||
*.local
|
||||
|
||||
# Editor directories and files
|
||||
.vscode/*
|
||||
!.vscode/extensions.json
|
||||
.idea
|
||||
.DS_Store
|
||||
*.suo
|
||||
*.ntvs*
|
||||
*.njsproj
|
||||
*.sln
|
||||
*.sw?
|
||||
.vscode/*
|
@ -0,0 +1,13 @@
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="UTF-8" />
|
||||
<link rel="icon" href="/favicon.ico" />
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
|
||||
<title>飞桨PaddleSpeech</title>
|
||||
</head>
|
||||
<body>
|
||||
<div id="app"></div>
|
||||
<script type="module" src="/src/main.js"></script>
|
||||
</body>
|
||||
</html>
|
@ -0,0 +1,23 @@
|
||||
{
|
||||
"name": "paddlespeechwebclient",
|
||||
"private": true,
|
||||
"version": "0.0.0",
|
||||
"scripts": {
|
||||
"dev": "vite",
|
||||
"build": "vite build",
|
||||
"preview": "vite preview"
|
||||
},
|
||||
"dependencies": {
|
||||
"ant-design-vue": "^2.2.8",
|
||||
"axios": "^0.26.1",
|
||||
"element-plus": "^2.1.9",
|
||||
"js-audio-recorder": "0.5.7",
|
||||
"lamejs": "^1.2.1",
|
||||
"less": "^4.1.2",
|
||||
"vue": "^3.2.25"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@vitejs/plugin-vue": "^2.3.0",
|
||||
"vite": "^2.9.0"
|
||||
}
|
||||
}
|
After Width: | Height: | Size: 4.2 KiB |
@ -0,0 +1,19 @@
|
||||
<script setup>
|
||||
import Experience from './components/Experience.vue'
|
||||
import Header from './components/Content/Header/Header.vue'
|
||||
</script>
|
||||
|
||||
<template>
|
||||
<div class="app">
|
||||
<Header></Header>
|
||||
<Experience></Experience>
|
||||
</div>
|
||||
|
||||
|
||||
</template>
|
||||
|
||||
<style style="less">
|
||||
.app {
|
||||
background: url("assets/image/在线体验-背景@2x.png") no-repeat;
|
||||
};
|
||||
</style>
|
@ -0,0 +1,29 @@
|
||||
export const apiURL = {
|
||||
ASR_OFFLINE : '/api/asr/offline', // 获取离线语音识别结果
|
||||
ASR_COLLECT_ENV : '/api/asr/collectEnv', // 采集环境噪音
|
||||
ASR_STOP_RECORD : '/api/asr/stopRecord', // 后端暂停录音
|
||||
ASR_RESUME_RECORD : '/api/asr/resumeRecord',// 后端恢复录音
|
||||
|
||||
NLP_CHAT : '/api/nlp/chat', // NLP闲聊接口
|
||||
NLP_IE : '/api/nlp/ie', // 信息抽取接口
|
||||
|
||||
TTS_OFFLINE : '/api/tts/offline', // 获取TTS音频
|
||||
|
||||
VPR_RECOG : '/api/vpr/recog', // 声纹识别接口,返回声纹对比相似度
|
||||
VPR_ENROLL : '/api/vpr/enroll', // 声纹识别注册接口
|
||||
VPR_LIST : '/api/vpr/list', // 获取声纹注册的数据列表
|
||||
VPR_DEL : '/api/vpr/del', // 删除用户声纹
|
||||
VPR_DATA : '/api/vpr/database64?vprId=', // 获取声纹注册数据 bs64格式
|
||||
|
||||
// websocket
|
||||
CHAT_SOCKET_RECORD: 'ws://localhost:8010/ws/asr/offlineStream', // ChatBot websocket 接口
|
||||
ASR_SOCKET_RECORD: 'ws://localhost:8010/ws/asr/onlineStream', // Stream ASR 接口
|
||||
TTS_SOCKET_RECORD: 'ws://localhost:8010/ws/tts/online', // Stream TTS 接口
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
@ -0,0 +1,30 @@
|
||||
import axios from 'axios'
|
||||
import {apiURL} from "./API.js"
|
||||
|
||||
// 上传音频文件,获得识别结果
|
||||
export async function asrOffline(params){
|
||||
const result = await axios.post(
|
||||
apiURL.ASR_OFFLINE, params
|
||||
)
|
||||
return result
|
||||
}
|
||||
|
||||
// 上传环境采集文件
|
||||
export async function asrCollentEnv(params){
|
||||
const result = await axios.post(
|
||||
apiURL.ASR_OFFLINE, params
|
||||
)
|
||||
return result
|
||||
}
|
||||
|
||||
// 暂停录音
|
||||
export async function asrStopRecord(){
|
||||
const result = await axios.get(apiURL.ASR_STOP_RECORD);
|
||||
return result
|
||||
}
|
||||
|
||||
// 恢复录音
|
||||
export async function asrResumeRecord(){
|
||||
const result = await axios.get(apiURL.ASR_RESUME_RECORD);
|
||||
return result
|
||||
}
|
@ -0,0 +1,17 @@
|
||||
import axios from 'axios'
|
||||
import {apiURL} from "./API.js"
|
||||
|
||||
// 获取闲聊对话结果
|
||||
export async function nlpChat(text){
|
||||
const result = await axios.post(apiURL.NLP_CHAT, { chat : text});
|
||||
return result
|
||||
}
|
||||
|
||||
// 获取信息抽取结果
|
||||
export async function nlpIE(text){
|
||||
const result = await axios.post(apiURL.NLP_IE, { chat : text});
|
||||
return result
|
||||
}
|
||||
|
||||
|
||||
|
@ -0,0 +1,8 @@
|
||||
import axios from 'axios'
|
||||
import {apiURL} from "./API.js"
|
||||
|
||||
export async function ttsOffline(text){
|
||||
const result = await axios.post(apiURL.TTS_OFFLINE, { text : text});
|
||||
return result
|
||||
}
|
||||
|
@ -0,0 +1,32 @@
|
||||
import axios from 'axios'
|
||||
import {apiURL} from "./API.js"
|
||||
|
||||
// 注册声纹
|
||||
export async function vprEnroll(params){
|
||||
const result = await axios.post(apiURL.VPR_ENROLL, params);
|
||||
return result
|
||||
}
|
||||
|
||||
// 声纹识别
|
||||
export async function vprRecog(params){
|
||||
const result = await axios.post(apiURL.VPR_RECOG, params);
|
||||
return result
|
||||
}
|
||||
|
||||
// 删除声纹
|
||||
export async function vprDel(params){
|
||||
const result = await axios.post(apiURL.VPR_DEL, params);
|
||||
return result
|
||||
}
|
||||
|
||||
// 获取声纹列表
|
||||
export async function vprList(){
|
||||
const result = await axios.get(apiURL.VPR_LIST);
|
||||
return result
|
||||
}
|
||||
|
||||
// 获取声纹音频
|
||||
export async function vprData(params){
|
||||
const result = await axios.get(apiURL.VPR_DATA+params);
|
||||
return result
|
||||
}
|
After Width: | Height: | Size: 585 B |
After Width: | Height: | Size: 1.1 KiB |
After Width: | Height: | Size: 3.1 KiB |
After Width: | Height: | Size: 2.9 KiB |
After Width: | Height: | Size: 872 B |
After Width: | Height: | Size: 3.2 KiB |
After Width: | Height: | Size: 3.2 KiB |
After Width: | Height: | Size: 198 B |
After Width: | Height: | Size: 242 B |
After Width: | Height: | Size: 1.3 KiB |
After Width: | Height: | Size: 861 B |
After Width: | Height: | Size: 3.5 KiB |
After Width: | Height: | Size: 77 KiB |
After Width: | Height: | Size: 8.3 KiB |
After Width: | Height: | Size: 8.8 KiB |
After Width: | Height: | Size: 9.7 KiB |
After Width: | Height: | Size: 5.4 KiB |
After Width: | Height: | Size: 7.3 KiB |
After Width: | Height: | Size: 8.0 KiB |
After Width: | Height: | Size: 6.7 KiB |
@ -0,0 +1,148 @@
|
||||
.speech_header {
|
||||
width: 1200px;
|
||||
margin: 0 auto;
|
||||
padding-top: 50px;
|
||||
// background: url("../../../assets/image/在线体验-背景@2x.png") no-repeat;
|
||||
box-sizing: border-box;
|
||||
&::after {
|
||||
content: "";
|
||||
display: block;
|
||||
clear: both;
|
||||
visibility: hidden;
|
||||
}
|
||||
|
||||
;
|
||||
|
||||
// background: pink;
|
||||
.speech_header_title {
|
||||
height: 57px;
|
||||
font-family: PingFangSC-Medium;
|
||||
font-size: 38px;
|
||||
color: #000000;
|
||||
letter-spacing: 0;
|
||||
line-height: 57px;
|
||||
font-weight: 500;
|
||||
margin-bottom: 15px;
|
||||
}
|
||||
|
||||
;
|
||||
|
||||
.speech_header_describe {
|
||||
height: 26px;
|
||||
font-family: PingFangSC-Regular;
|
||||
font-size: 16px;
|
||||
color: #575757;
|
||||
line-height: 26px;
|
||||
font-weight: 400;
|
||||
margin-bottom: 24px;
|
||||
}
|
||||
|
||||
;
|
||||
.speech_header_link_box {
|
||||
height: 40px;
|
||||
margin-bottom: 40px;
|
||||
display: flex;
|
||||
align-items: center;
|
||||
};
|
||||
.speech_header_link {
|
||||
display: block;
|
||||
background: #2932E1;
|
||||
width: 120px;
|
||||
height: 40px;
|
||||
line-height: 40px;
|
||||
border-radius: 20px;
|
||||
font-family: PingFangSC-Medium;
|
||||
font-size: 14px;
|
||||
color: #FFFFFF;
|
||||
text-align: center;
|
||||
font-weight: 500;
|
||||
margin-right: 20px;
|
||||
// margin-bottom: 40px;
|
||||
|
||||
&:hover {
|
||||
opacity: 0.9;
|
||||
}
|
||||
|
||||
;
|
||||
}
|
||||
|
||||
;
|
||||
|
||||
.speech_header_divider {
|
||||
width: 1200px;
|
||||
height: 1px;
|
||||
background: #D1D1D1;
|
||||
margin-bottom: 40px;
|
||||
}
|
||||
|
||||
;
|
||||
|
||||
.speech_header_content_wrapper {
|
||||
width: 1200px;
|
||||
margin: 0 auto;
|
||||
// background: pink;
|
||||
margin-bottom: 20px;
|
||||
display: flex;
|
||||
justify-content: space-between;
|
||||
flex-wrap: wrap;
|
||||
|
||||
.speech_header_module {
|
||||
width: 384px;
|
||||
background: #FFFFFF;
|
||||
border: 1px solid rgba(224, 224, 224, 1);
|
||||
box-shadow: 4px 8px 12px 0px rgba(0, 0, 0, 0.05);
|
||||
border-radius: 16px;
|
||||
padding: 30px 34px 0px 34px;
|
||||
box-sizing: border-box;
|
||||
display: flex;
|
||||
margin-bottom: 40px;
|
||||
.speech_header_background_img {
|
||||
width: 46px;
|
||||
height: 46px;
|
||||
background-size: 46px 46px;
|
||||
background-repeat: no-repeat;
|
||||
background-position: center;
|
||||
margin-right: 20px;
|
||||
}
|
||||
|
||||
;
|
||||
|
||||
.speech_header_content {
|
||||
padding-top: 4px;
|
||||
margin-bottom: 32px;
|
||||
|
||||
.speech_header_module_title {
|
||||
height: 26px;
|
||||
font-family: PingFangSC-Medium;
|
||||
font-size: 20px;
|
||||
color: #000000;
|
||||
letter-spacing: 0;
|
||||
line-height: 26px;
|
||||
font-weight: 500;
|
||||
margin-bottom: 10px;
|
||||
}
|
||||
|
||||
;
|
||||
|
||||
.speech_header_module_introduce {
|
||||
font-family: PingFangSC-Regular;
|
||||
font-size: 16px;
|
||||
color: #666666;
|
||||
letter-spacing: 0;
|
||||
font-weight: 400;
|
||||
}
|
||||
|
||||
;
|
||||
}
|
||||
|
||||
;
|
||||
}
|
||||
|
||||
;
|
||||
}
|
||||
|
||||
;
|
||||
}
|
||||
|
||||
;
|
||||
|
@ -0,0 +1,38 @@
|
||||
<script setup>
|
||||
import AudioFileIdentification from "./AudioFile/AudioFileIdentification.vue"
|
||||
import RealTime from "./RealTime/RealTime.vue"
|
||||
import EndToEndIdentification from "./EndToEnd/EndToEndIdentification.vue";
|
||||
|
||||
|
||||
</script>
|
||||
|
||||
<template>
|
||||
<div class="speech_recognition">
|
||||
<div class="speech_recognition_tabs">
|
||||
<div class="frame"></div>
|
||||
<el-tabs class="speech_recognition_mytabs" type="border-card">
|
||||
<el-tab-pane label="实时语音识别" key="1">
|
||||
<RealTime />
|
||||
</el-tab-pane>
|
||||
<el-tab-pane label="端到端识别" key="2">
|
||||
<EndToEndIdentification />
|
||||
</el-tab-pane>
|
||||
<el-tab-pane label="音频文件识别" key="3">
|
||||
<AudioFileIdentification />
|
||||
</el-tab-pane>
|
||||
</el-tabs>
|
||||
</div>
|
||||
</div>
|
||||
</template>
|
||||
|
||||
<script>
|
||||
|
||||
export default {
|
||||
|
||||
}
|
||||
</script>
|
||||
|
||||
<style lang="less" scoped>
|
||||
@import "./style.less";
|
||||
|
||||
</style>
|
@ -0,0 +1,241 @@
|
||||
<template>
|
||||
<div class="audioFileIdentification">
|
||||
|
||||
|
||||
<div v-if="uploadStatus === 0" class="public_recognition_speech">
|
||||
<!-- 上传前 -->
|
||||
<el-upload
|
||||
:multiple="false"
|
||||
:accept="'.wav'"
|
||||
:limit="1"
|
||||
:auto-upload="false"
|
||||
:on-change="handleChange"
|
||||
:show-file-list="false"
|
||||
>
|
||||
<div class="upload_img">
|
||||
<div class="upload_img_back"></div>
|
||||
</div>
|
||||
</el-upload>
|
||||
<div class="speech_text">
|
||||
上传文件
|
||||
</div>
|
||||
<div class="speech_text_prompt">
|
||||
支持50秒内的.wav文件
|
||||
</div>
|
||||
</div>
|
||||
<!-- 上传中 -->
|
||||
<div v-else-if="uploadStatus === 1" class="on_the_cross_speech">
|
||||
<div class="on_the_upload_img">
|
||||
<div class="on_the_upload_img_back"></div>
|
||||
</div>
|
||||
<div class="on_the_speech_text">
|
||||
<span class="on_the_speech_loading"> <Spin indicator={antIcon} /></span> 上传中
|
||||
</div>
|
||||
</div>
|
||||
<div v-else>
|
||||
|
||||
<!-- // {/* //开始识别 */} -->
|
||||
<div v-if="recognitionStatus === 0" class="public_recognition_speech_start">
|
||||
<div class="public_recognition_speech_content">
|
||||
<div
|
||||
class="public_recognition_speech_title"
|
||||
>
|
||||
{{ filename }}
|
||||
</div>
|
||||
<div
|
||||
class="public_recognition_speech_again"
|
||||
@click="uploadAgain()"
|
||||
>重新上传</div>
|
||||
<div
|
||||
class="public_recognition_speech_play"
|
||||
@click="paly()"
|
||||
>播放</div>
|
||||
</div>
|
||||
<div class="speech_promp"
|
||||
@click="beginToIdentify()">
|
||||
开始识别
|
||||
</div>
|
||||
</div>
|
||||
<!-- // {/* 识别中 */} -->
|
||||
<div v-else-if="recognitionStatus === 1" class="public_recognition_speech_identify">
|
||||
<div class="public_recognition_speech_identify_box">
|
||||
<div
|
||||
class="public_recognition_speech_identify_back_img"
|
||||
>
|
||||
<a-spin />
|
||||
</div>
|
||||
|
||||
<div
|
||||
class="public_recognition__identify_the_promp"
|
||||
>识别中</div>
|
||||
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- // {/* // 重新识别 */} -->
|
||||
<div v-else class="public_recognition_speech_identify_ahain">
|
||||
<div class="public_recognition_speech_identify_box_btn">
|
||||
|
||||
<div
|
||||
class="public_recognition__identify_the_btn"
|
||||
@click="toIdentifyThe()"
|
||||
>重新识别</div>
|
||||
|
||||
</div>
|
||||
</div>
|
||||
|
||||
</div>
|
||||
|
||||
<!-- {/* 指向 */} -->
|
||||
<div class="public_recognition_point_to">
|
||||
|
||||
</div>
|
||||
<!-- {/* 识别结果 */} -->
|
||||
<div class="public_recognition_result">
|
||||
<div>识别结果</div>
|
||||
<div>{{ asrResult }}</div>
|
||||
</div>
|
||||
</div>
|
||||
</template>
|
||||
|
||||
<script>
|
||||
import { asrOffline } from '../../../../api/ApiASR'
|
||||
|
||||
let audioCtx = new AudioContext({
|
||||
latencyHint: 'interactive',
|
||||
sampleRate: 24000,
|
||||
});
|
||||
|
||||
export default {
|
||||
name:"",
|
||||
data(){
|
||||
return {
|
||||
uploadStatus : 0,
|
||||
recognitionStatus : 0,
|
||||
asrResult : "",
|
||||
indicator : "",
|
||||
|
||||
filename: "",
|
||||
upfile: ""
|
||||
|
||||
}
|
||||
},
|
||||
|
||||
methods:{
|
||||
// 上传文件切换
|
||||
handleChange(file, fileList){
|
||||
this.uploadStatus = 2
|
||||
this.filename = file.name
|
||||
this.upfile = file
|
||||
console.log(file)
|
||||
|
||||
// debugger
|
||||
// var result = Buffer.from(file);
|
||||
|
||||
|
||||
},
|
||||
readFile(file) {
|
||||
return new Promise((resolve, reject) => {
|
||||
const fileReader = new FileReader();
|
||||
fileReader.onload = function () {
|
||||
resolve(fileReader);
|
||||
};
|
||||
fileReader.onerror = function (err) {
|
||||
reject(err);
|
||||
};
|
||||
fileReader.readAsDataURL(file);
|
||||
});
|
||||
},
|
||||
// 重新上传
|
||||
uploadAgain(){
|
||||
this.uploadStatus = 0
|
||||
this.upfile = ""
|
||||
this.filename = ""
|
||||
this.asrResult = ""
|
||||
},
|
||||
|
||||
// 播放音频
|
||||
playAudioData(wav_buffer){
|
||||
audioCtx.decodeAudioData(wav_buffer, buffer => {
|
||||
let source = audioCtx.createBufferSource();
|
||||
source.buffer = buffer
|
||||
|
||||
source.connect(audioCtx.destination);
|
||||
source.start();
|
||||
}, function (e) {
|
||||
});
|
||||
},
|
||||
|
||||
// 播放本地音频
|
||||
async paly(){
|
||||
if(this.upfile){
|
||||
let fileRes = ""
|
||||
let fileString = ""
|
||||
fileRes = await this.readFile(this.upfile.raw);
|
||||
fileString = fileRes.result;
|
||||
const audioBase64type = (fileString.match(/data:[^;]*;base64,/))?.[0] ?? '';
|
||||
const isBase64 = !!fileString.match(/data:[^;]*;base64,/);
|
||||
const uploadBase64 = fileString.substr(audioBase64type.length);
|
||||
// isBase64 ? uploadBase64 : undefined
|
||||
// base转换二进制数
|
||||
let typedArray = this.base64ToUint8Array(isBase64 ? uploadBase64 : undefined)
|
||||
this.playAudioData(typedArray.buffer)
|
||||
}
|
||||
},
|
||||
base64ToUint8Array(base64String){
|
||||
const padding = '='.repeat((4 - base64String.length % 4) % 4);
|
||||
const base64 = (base64String + padding)
|
||||
.replace(/-/g, '+')
|
||||
.replace(/_/g, '/');
|
||||
|
||||
const rawData = window.atob(base64);
|
||||
const outputArray = new Uint8Array(rawData.length);
|
||||
|
||||
for (let i = 0; i < rawData.length; ++i) {
|
||||
outputArray[i] = rawData.charCodeAt(i);
|
||||
}
|
||||
return outputArray;
|
||||
},
|
||||
|
||||
// 开始识别
|
||||
async beginToIdentify(){
|
||||
// 识别中
|
||||
this.recognitionStatus = 1
|
||||
|
||||
const formData = new FormData();
|
||||
formData.append('files', this.upfile.raw);
|
||||
|
||||
const result = await asrOffline(formData)
|
||||
// 重新识别
|
||||
this.recognitionStatus = 2
|
||||
console.log(result);
|
||||
// debugger
|
||||
if (result.data.code === 0) {
|
||||
|
||||
this.$message.success("识别成功")
|
||||
// 获取识别文本
|
||||
this.asrResult = result.data.result
|
||||
|
||||
} else {
|
||||
this.$message.success("识别失败")
|
||||
};
|
||||
},
|
||||
|
||||
// 重新识别
|
||||
toIdentifyThe(){
|
||||
// this.uploadAgain()
|
||||
this.uploadStatus = 0
|
||||
this.recognitionStatus = 0
|
||||
this.asrResult = ""
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
</script>
|
||||
|
||||
<style lang="less" scoped>
|
||||
@import "./style.less";
|
||||
|
||||
|
||||
</style>
|
@ -0,0 +1,293 @@
|
||||
.audioFileIdentification {
|
||||
width: 1106px;
|
||||
height: 270px;
|
||||
// background-color: pink;
|
||||
padding-top: 40px;
|
||||
box-sizing: border-box;
|
||||
display: flex;
|
||||
// 开始上传
|
||||
.public_recognition_speech {
|
||||
width: 295px;
|
||||
height: 230px;
|
||||
padding-top: 32px;
|
||||
box-sizing: border-box;
|
||||
// 开始上传
|
||||
.upload_img {
|
||||
width: 116px;
|
||||
height: 116px;
|
||||
background: #2932E1;
|
||||
border-radius: 50%;
|
||||
margin-left: 98px;
|
||||
cursor: pointer;
|
||||
margin-bottom: 20px;
|
||||
display: flex;
|
||||
justify-content: center;
|
||||
align-items: center;
|
||||
.upload_img_back {
|
||||
width: 34.38px;
|
||||
height: 30.82px;
|
||||
background: #2932E1;
|
||||
background: url("../../../../assets/image/ic_大-上传文件.svg");
|
||||
background-repeat: no-repeat;
|
||||
background-position: center;
|
||||
background-size: 34.38px 30.82px;
|
||||
cursor: pointer;
|
||||
}
|
||||
&:hover {
|
||||
opacity: 0.9;
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
|
||||
.speech_text {
|
||||
height: 22px;
|
||||
font-family: PingFangSC-Medium;
|
||||
font-size: 16px;
|
||||
color: #000000;
|
||||
font-weight: 500;
|
||||
margin-left: 124px;
|
||||
margin-bottom: 10px;
|
||||
};
|
||||
.speech_text_prompt {
|
||||
height: 20px;
|
||||
font-family: PingFangSC-Regular;
|
||||
font-size: 14px;
|
||||
color: #999999;
|
||||
font-weight: 400;
|
||||
margin-left: 84px;
|
||||
};
|
||||
};
|
||||
// 上传中
|
||||
.on_the_cross_speech {
|
||||
width: 295px;
|
||||
height: 230px;
|
||||
padding-top: 32px;
|
||||
box-sizing: border-box;
|
||||
|
||||
.on_the_upload_img {
|
||||
width: 116px;
|
||||
height: 116px;
|
||||
background: #7278F5;
|
||||
border-radius: 50%;
|
||||
margin-left: 98px;
|
||||
cursor: pointer;
|
||||
margin-bottom: 20px;
|
||||
display: flex;
|
||||
justify-content: center;
|
||||
align-items: center;
|
||||
|
||||
.on_the_upload_img_back {
|
||||
width: 34.38px;
|
||||
height: 30.82px;
|
||||
background: #7278F5;
|
||||
background: url("../../../../assets/image/ic_大-上传文件.svg");
|
||||
background-repeat: no-repeat;
|
||||
background-position: center;
|
||||
background-size: 34.38px 30.82px;
|
||||
cursor: pointer;
|
||||
|
||||
};
|
||||
};
|
||||
|
||||
|
||||
.on_the_speech_text {
|
||||
height: 22px;
|
||||
font-family: PingFangSC-Medium;
|
||||
font-size: 16px;
|
||||
color: #000000;
|
||||
font-weight: 500;
|
||||
margin-left: 124px;
|
||||
margin-bottom: 10px;
|
||||
display: flex;
|
||||
// justify-content: center;
|
||||
align-items: center;
|
||||
.on_the_speech_loading {
|
||||
display: inline-block;
|
||||
width: 16px;
|
||||
height: 16px;
|
||||
background: #7278F5;
|
||||
// background: url("../../../../assets/image/ic_开始聊天.svg");
|
||||
// background-repeat: no-repeat;
|
||||
// background-position: center;
|
||||
// background-size: 16px 16px;
|
||||
margin-right: 8px;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
//开始识别
|
||||
.public_recognition_speech_start {
|
||||
width: 295px;
|
||||
height: 230px;
|
||||
padding-top: 32px;
|
||||
box-sizing: border-box;
|
||||
position: relative;
|
||||
.public_recognition_speech_content {
|
||||
width: 100%;
|
||||
position: absolute;
|
||||
top: 40px;
|
||||
left: 50%;
|
||||
transform: translateX(-50%);
|
||||
display: flex;
|
||||
justify-content: center;
|
||||
align-items: center;
|
||||
|
||||
.public_recognition_speech_title {
|
||||
height: 22px;
|
||||
font-family: PingFangSC-Regular;
|
||||
font-size: 16px;
|
||||
color: #000000;
|
||||
font-weight: 400;
|
||||
};
|
||||
.public_recognition_speech_again {
|
||||
height: 22px;
|
||||
font-family: PingFangSC-Regular;
|
||||
font-size: 16px;
|
||||
color: #2932E1;
|
||||
font-weight: 400;
|
||||
margin-left: 30px;
|
||||
cursor: pointer;
|
||||
};
|
||||
.public_recognition_speech_play {
|
||||
height: 22px;
|
||||
font-family: PingFangSC-Regular;
|
||||
font-size: 16px;
|
||||
color: #2932E1;
|
||||
font-weight: 400;
|
||||
margin-left: 20px;
|
||||
cursor: pointer;
|
||||
};
|
||||
};
|
||||
.speech_promp {
|
||||
position: absolute;
|
||||
top: 112px;
|
||||
left: 50%;
|
||||
transform: translateX(-50%);
|
||||
width: 142px;
|
||||
height: 44px;
|
||||
background: #2932E1;
|
||||
border-radius: 22px;
|
||||
font-family: PingFangSC-Medium;
|
||||
font-size: 14px;
|
||||
color: #FFFFFF;
|
||||
text-align: center;
|
||||
line-height: 44px;
|
||||
font-weight: 500;
|
||||
cursor: pointer;
|
||||
};
|
||||
|
||||
|
||||
};
|
||||
// 识别中
|
||||
.public_recognition_speech_identify {
|
||||
width: 295px;
|
||||
height: 230px;
|
||||
padding-top: 32px;
|
||||
box-sizing: border-box;
|
||||
position: relative;
|
||||
.public_recognition_speech_identify_box {
|
||||
width: 143px;
|
||||
height: 44px;
|
||||
background: #7278F5;
|
||||
border-radius: 22px;
|
||||
position: absolute;
|
||||
top: 50%;
|
||||
left: 50%;
|
||||
transform: translate(-50%,-50%);
|
||||
display: flex;
|
||||
justify-content: center;
|
||||
align-items: center;
|
||||
cursor: pointer;
|
||||
.public_recognition_speech_identify_back_img {
|
||||
width: 16px;
|
||||
height: 16px;
|
||||
// background: #7278F5;
|
||||
// background: url("../../../../assets/image/ic_开始聊天.svg");
|
||||
// background-repeat: no-repeat;
|
||||
// background-position: center;
|
||||
// background-size: 16px 16px;
|
||||
};
|
||||
.public_recognition__identify_the_promp {
|
||||
height: 20px;
|
||||
font-family: PingFangSC-Medium;
|
||||
font-size: 14px;
|
||||
color: #FFFFFF;
|
||||
font-weight: 500;
|
||||
margin-left: 12px;
|
||||
};
|
||||
};
|
||||
|
||||
|
||||
|
||||
};
|
||||
// 重新识别
|
||||
.public_recognition_speech_identify_ahain {
|
||||
width: 295px;
|
||||
height: 230px;
|
||||
padding-top: 32px;
|
||||
box-sizing: border-box;
|
||||
position: relative;
|
||||
cursor: pointer;
|
||||
.public_recognition_speech_identify_box_btn {
|
||||
width: 143px;
|
||||
height: 44px;
|
||||
background: #2932E1;
|
||||
border-radius: 22px;
|
||||
position: absolute;
|
||||
top: 50%;
|
||||
left: 50%;
|
||||
transform: translate(-50%,-50%);
|
||||
display: flex;
|
||||
justify-content: center;
|
||||
align-items: center;
|
||||
cursor: pointer;
|
||||
.public_recognition__identify_the_btn {
|
||||
height: 20px;
|
||||
font-family: PingFangSC-Medium;
|
||||
font-size: 14px;
|
||||
color: #FFFFFF;
|
||||
font-weight: 500;
|
||||
};
|
||||
};
|
||||
|
||||
|
||||
|
||||
};
|
||||
// 指向
|
||||
.public_recognition_point_to {
|
||||
width: 47px;
|
||||
height: 67px;
|
||||
background: url("../../../../assets/image/步骤-箭头切图@2x.png") no-repeat;
|
||||
background-position: center;
|
||||
background-size: 47px 67px;
|
||||
margin-top: 91px;
|
||||
margin-right: 67px;
|
||||
};
|
||||
// 识别结果
|
||||
.public_recognition_result {
|
||||
width: 680px;
|
||||
height: 230px;
|
||||
background: #FAFAFA;
|
||||
padding: 40px 50px 0px 50px;
|
||||
div {
|
||||
&:nth-of-type(1) {
|
||||
height: 26px;
|
||||
font-family: PingFangSC-Medium;
|
||||
font-size: 16px;
|
||||
color: #666666;
|
||||
line-height: 26px;
|
||||
font-weight: 500;
|
||||
margin-bottom: 20px;
|
||||
};
|
||||
&:nth-of-type(2) {
|
||||
height: 26px;
|
||||
font-family: PingFangSC-Medium;
|
||||
font-size: 16px;
|
||||
color: #666666;
|
||||
line-height: 26px;
|
||||
font-weight: 500;
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
@ -0,0 +1,114 @@
|
||||
.endToEndIdentification {
|
||||
width: 1106px;
|
||||
height: 270px;
|
||||
// background-color: pink;
|
||||
padding-top: 40px;
|
||||
box-sizing: border-box;
|
||||
display: flex;
|
||||
// 开始识别
|
||||
.public_recognition_speech {
|
||||
width: 295px;
|
||||
height: 230px;
|
||||
padding-top: 32px;
|
||||
box-sizing: border-box;
|
||||
|
||||
.endToEndIdentification_start_recorder_img {
|
||||
width: 116px;
|
||||
height: 116px;
|
||||
background: #2932E1;
|
||||
background: url("../../../../assets/image/ic_开始聊天.svg");
|
||||
background-repeat: no-repeat;
|
||||
background-position: center;
|
||||
background-size: 116px 116px;
|
||||
margin-left: 98px;
|
||||
cursor: pointer;
|
||||
margin-bottom: 20px;
|
||||
&:hover {
|
||||
background: url("../../../../assets/image/ic_开始聊天_hover.svg");
|
||||
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
.endToEndIdentification_end_recorder_img {
|
||||
width: 116px;
|
||||
height: 116px;
|
||||
background: #2932E1;
|
||||
border-radius: 50%;
|
||||
display: flex;
|
||||
justify-content: center;
|
||||
align-items: center;
|
||||
margin-left: 98px;
|
||||
margin-bottom: 20px;
|
||||
cursor: pointer;
|
||||
.endToEndIdentification_end_recorder_img_back {
|
||||
width: 50px;
|
||||
height: 50px;
|
||||
background: url("../../../../assets/image/ic_大-声音波浪.svg");
|
||||
background-repeat: no-repeat;
|
||||
background-position: center;
|
||||
background-size: 50px 50px;
|
||||
|
||||
&:hover {
|
||||
opacity: 0.9;
|
||||
|
||||
};
|
||||
};
|
||||
|
||||
};
|
||||
.endToEndIdentification_prompt {
|
||||
height: 22px;
|
||||
font-family: PingFangSC-Medium;
|
||||
font-size: 16px;
|
||||
color: #000000;
|
||||
font-weight: 500;
|
||||
margin-left: 124px;
|
||||
margin-bottom: 10px;
|
||||
};
|
||||
.speech_text_prompt {
|
||||
height: 20px;
|
||||
font-family: PingFangSC-Regular;
|
||||
font-size: 14px;
|
||||
color: #999999;
|
||||
font-weight: 400;
|
||||
margin-left: 90px;
|
||||
};
|
||||
};
|
||||
// 指向
|
||||
.public_recognition_point_to {
|
||||
width: 47px;
|
||||
height: 67px;
|
||||
background: url("../../../../assets/image/步骤-箭头切图@2x.png") no-repeat;
|
||||
background-position: center;
|
||||
background-size: 47px 67px;
|
||||
margin-top: 91px;
|
||||
margin-right: 67px;
|
||||
};
|
||||
// 识别结果
|
||||
.public_recognition_result {
|
||||
width: 680px;
|
||||
height: 230px;
|
||||
background: #FAFAFA;
|
||||
padding: 40px 50px 0px 50px;
|
||||
div {
|
||||
&:nth-of-type(1) {
|
||||
height: 26px;
|
||||
font-family: PingFangSC-Medium;
|
||||
font-size: 16px;
|
||||
color: #666666;
|
||||
line-height: 26px;
|
||||
font-weight: 500;
|
||||
margin-bottom: 20px;
|
||||
};
|
||||
&:nth-of-type(2) {
|
||||
height: 26px;
|
||||
font-family: PingFangSC-Medium;
|
||||
font-size: 16px;
|
||||
color: #666666;
|
||||
line-height: 26px;
|
||||
font-weight: 500;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
};
|
@ -0,0 +1,112 @@
|
||||
.realTime{
|
||||
width: 1106px;
|
||||
height: 270px;
|
||||
// background-color: pink;
|
||||
padding-top: 40px;
|
||||
box-sizing: border-box;
|
||||
display: flex;
|
||||
// 开始识别
|
||||
.public_recognition_speech {
|
||||
width: 295px;
|
||||
height: 230px;
|
||||
padding-top: 32px;
|
||||
box-sizing: border-box;
|
||||
.endToEndIdentification_start_recorder_img {
|
||||
width: 116px;
|
||||
height: 116px;
|
||||
background: #2932E1;
|
||||
background: url("../../../../assets/image/ic_开始聊天.svg");
|
||||
background-repeat: no-repeat;
|
||||
background-position: center;
|
||||
background-size: 116px 116px;
|
||||
margin-left: 98px;
|
||||
cursor: pointer;
|
||||
margin-bottom: 20px;
|
||||
&:hover {
|
||||
background: url("../../../../assets/image/ic_开始聊天_hover.svg");
|
||||
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
.endToEndIdentification_end_recorder_img {
|
||||
width: 116px;
|
||||
height: 116px;
|
||||
background: #2932E1;
|
||||
border-radius: 50%;
|
||||
display: flex;
|
||||
justify-content: center;
|
||||
align-items: center;
|
||||
margin-left: 98px;
|
||||
margin-bottom: 20px;
|
||||
cursor: pointer;
|
||||
.endToEndIdentification_end_recorder_img_back {
|
||||
width: 50px;
|
||||
height: 50px;
|
||||
background: url("../../../../assets/image/ic_大-声音波浪.svg");
|
||||
background-repeat: no-repeat;
|
||||
background-position: center;
|
||||
background-size: 50px 50px;
|
||||
|
||||
&:hover {
|
||||
opacity: 0.9;
|
||||
|
||||
};
|
||||
};
|
||||
|
||||
};
|
||||
.endToEndIdentification_prompt {
|
||||
height: 22px;
|
||||
font-family: PingFangSC-Medium;
|
||||
font-size: 16px;
|
||||
color: #000000;
|
||||
font-weight: 500;
|
||||
margin-left: 124px;
|
||||
margin-bottom: 10px;
|
||||
};
|
||||
.speech_text_prompt {
|
||||
height: 20px;
|
||||
font-family: PingFangSC-Regular;
|
||||
font-size: 14px;
|
||||
color: #999999;
|
||||
font-weight: 400;
|
||||
margin-left: 105px;
|
||||
};
|
||||
};
|
||||
// 指向
|
||||
.public_recognition_point_to {
|
||||
width: 47px;
|
||||
height: 67px;
|
||||
background: url("../../../../assets/image/步骤-箭头切图@2x.png") no-repeat;
|
||||
background-position: center;
|
||||
background-size: 47px 67px;
|
||||
margin-top: 91px;
|
||||
margin-right: 67px;
|
||||
};
|
||||
// 识别结果
|
||||
.public_recognition_result {
|
||||
width: 680px;
|
||||
height: 230px;
|
||||
background: #FAFAFA;
|
||||
padding: 40px 50px 0px 50px;
|
||||
div {
|
||||
&:nth-of-type(1) {
|
||||
height: 26px;
|
||||
font-family: PingFangSC-Medium;
|
||||
font-size: 16px;
|
||||
color: #666666;
|
||||
line-height: 26px;
|
||||
font-weight: 500;
|
||||
margin-bottom: 20px;
|
||||
};
|
||||
&:nth-of-type(2) {
|
||||
height: 26px;
|
||||
font-family: PingFangSC-Medium;
|
||||
font-size: 16px;
|
||||
color: #666666;
|
||||
line-height: 26px;
|
||||
font-weight: 500;
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
@ -0,0 +1,76 @@
|
||||
.speech_recognition {
|
||||
width: 1200px;
|
||||
height: 410px;
|
||||
background: #FFFFFF;
|
||||
padding: 40px 50px 50px 44px;
|
||||
position: relative;
|
||||
.frame {
|
||||
width: 605px;
|
||||
height: 50px;
|
||||
border: 1px solid rgba(238,238,238,1);
|
||||
border-radius: 25px;
|
||||
position: absolute;
|
||||
}
|
||||
.speech_recognition_mytabs {
|
||||
.ant-tabs-tab {
|
||||
position: relative;
|
||||
display: inline-flex;
|
||||
align-items: center;
|
||||
// padding: 12px 0;
|
||||
font-size: 14px;
|
||||
background: transparent;
|
||||
border: 0;
|
||||
outline: none;
|
||||
cursor: pointer;
|
||||
padding: 12px 26px;
|
||||
box-sizing: border-box;
|
||||
}
|
||||
.ant-tabs-tab-active {
|
||||
height: 50px;
|
||||
background: #EEEFFD;
|
||||
border-radius: 25px;
|
||||
padding: 12px 26px;
|
||||
box-sizing: border-box;
|
||||
};
|
||||
.speech_recognition .speech_recognition_mytabs .ant-tabs-ink-bar {
|
||||
position: absolute;
|
||||
background: transparent !important;
|
||||
pointer-events: none;
|
||||
}
|
||||
.ant-tabs-ink-bar {
|
||||
position: absolute;
|
||||
background: transparent !important;
|
||||
pointer-events: none;
|
||||
}
|
||||
.experience .experience_wrapper .experience_content .experience_tabs .ant-tabs-nav::before {
|
||||
position: absolute;
|
||||
right: 0;
|
||||
left: 0;
|
||||
border-bottom: 1px solid transparent !important;
|
||||
// border: none;
|
||||
content: '';
|
||||
}
|
||||
.ant-tabs-top > .ant-tabs-nav::before, .ant-tabs-bottom > .ant-tabs-nav::before, .ant-tabs-top > div > .ant-tabs-nav::before, .ant-tabs-bottom > div > .ant-tabs-nav::before {
|
||||
position: absolute;
|
||||
right: 0;
|
||||
left: 0;
|
||||
border-bottom: 1px solid transparent !important;
|
||||
// border: none;
|
||||
content: '';
|
||||
}
|
||||
.ant-tabs-top > .ant-tabs-nav::before, .ant-tabs-bottom > .ant-tabs-nav::before, .ant-tabs-top > div > .ant-tabs-nav::before, .ant-tabs-bottom > div > .ant-tabs-nav::before {
|
||||
position: absolute;
|
||||
right: 0;
|
||||
left: 0;
|
||||
border-bottom: 1px solid transparent !important;
|
||||
content: '';
|
||||
}
|
||||
.ant-tabs-nav::before {
|
||||
position: absolute;
|
||||
right: 0;
|
||||
left: 0;
|
||||
border-bottom: 1px solid transparent !important;
|
||||
content: '';
|
||||
};
|
||||
};
|
||||
};
|
@ -0,0 +1,181 @@
|
||||
.voice_chat {
|
||||
width: 1200px;
|
||||
height: 410px;
|
||||
background: #FFFFFF;
|
||||
position: relative;
|
||||
// 开始聊天
|
||||
.voice_chat_wrapper {
|
||||
top: 50%;
|
||||
left: 50%;
|
||||
transform: translate(-50%,-50%);
|
||||
position: absolute;
|
||||
.voice_chat_btn {
|
||||
width: 116px;
|
||||
height: 116px;
|
||||
margin-left: 54px;
|
||||
// background: #2932E1;
|
||||
border-radius: 50%;
|
||||
cursor: pointer;
|
||||
background: url("../../../assets/image/ic_开始聊天.svg");
|
||||
background-repeat: no-repeat;
|
||||
background-position: center;
|
||||
background-size: 116px 116px;
|
||||
margin-bottom: 17px;
|
||||
&:hover {
|
||||
width: 116px;
|
||||
height: 116px;
|
||||
background: url("../../../assets/image/ic_开始聊天_hover.svg");
|
||||
background-repeat: no-repeat;
|
||||
background-position: center;
|
||||
background-size: 116px 116px;
|
||||
};
|
||||
|
||||
};
|
||||
.voice_chat_btn_title {
|
||||
height: 22px;
|
||||
font-family: PingFangSC-Medium;
|
||||
font-size: 16px;
|
||||
color: #000000;
|
||||
letter-spacing: 0;
|
||||
text-align: center;
|
||||
line-height: 22px;
|
||||
font-weight: 500;
|
||||
margin-bottom: 10px;
|
||||
};
|
||||
.voice_chat_btn_prompt {
|
||||
height: 24px;
|
||||
font-family: PingFangSC-Regular;
|
||||
font-size: 14px;
|
||||
color: #999999;
|
||||
letter-spacing: 0;
|
||||
text-align: center;
|
||||
line-height: 24px;
|
||||
font-weight: 400;
|
||||
};
|
||||
};
|
||||
.voice_chat_wrapper::after {
|
||||
content: "";
|
||||
display: block;
|
||||
clear: both;
|
||||
visibility: hidden;
|
||||
};
|
||||
// 结束聊天
|
||||
.voice_chat_dialog_wrapper {
|
||||
width: 1200px;
|
||||
height: 410px;
|
||||
background: #FFFFFF;
|
||||
position: relative;
|
||||
.dialog_box {
|
||||
width: 100%;
|
||||
height: 410px;
|
||||
padding: 50px 198px 82px 199px;
|
||||
box-sizing: border-box;
|
||||
|
||||
.dialog_content {
|
||||
width: 100%;
|
||||
height: 268px;
|
||||
// background: rgb(113, 144, 145);
|
||||
padding: 0px;
|
||||
overflow: auto;
|
||||
li {
|
||||
list-style-type: none;
|
||||
margin-bottom: 33px;
|
||||
display: flex;
|
||||
align-items: center;
|
||||
&:last-of-type(1) {
|
||||
margin-bottom: 0px;
|
||||
};
|
||||
.dialog_content_img_pp {
|
||||
width: 60px;
|
||||
height: 60px;
|
||||
// transform: scaleX(-1);
|
||||
background: url("../../../assets/image/飞桨头像@2x.png");
|
||||
background-repeat: no-repeat;
|
||||
background-position: center;
|
||||
background-size: 60px 60px;
|
||||
margin-right: 20px;
|
||||
};
|
||||
.dialog_content_img_user {
|
||||
width: 60px;
|
||||
height: 60px;
|
||||
transform: scaleX(-1);
|
||||
background: url("../../../assets/image/用户头像@2x.png");
|
||||
background-repeat: no-repeat;
|
||||
background-position: center;
|
||||
background-size: 60px 60px;
|
||||
margin-left: 20px;
|
||||
};
|
||||
.dialog_content_dialogue_pp {
|
||||
height: 50px;
|
||||
background: #F5F5F5;
|
||||
border-radius: 25px;
|
||||
font-family: PingFangSC-Regular;
|
||||
font-size: 14px;
|
||||
color: #000000;
|
||||
line-height: 50px;
|
||||
font-weight: 400;
|
||||
padding: 0px 16px;
|
||||
box-sizing: border-box;
|
||||
};
|
||||
.dialog_content_dialogue_user {
|
||||
height: 50px;
|
||||
background: rgba(41,50,225,0.90);
|
||||
border-radius: 25px;
|
||||
font-family: PingFangSC-Regular;
|
||||
font-size: 14px;
|
||||
color: #FFFFFF;
|
||||
line-height: 50px;
|
||||
font-weight: 400;
|
||||
padding: 0px 16px;
|
||||
box-sizing: border-box;
|
||||
};
|
||||
};
|
||||
};
|
||||
.move_dialogue {
|
||||
justify-content: flex-end;
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
.btn_end_dialog {
|
||||
width: 124px;
|
||||
height: 42px;
|
||||
line-height: 42px;
|
||||
background: #FFFFFF;
|
||||
box-shadow: 0px 4px 16px 0px rgba(0,0,0,0.09);
|
||||
border-radius: 21px;
|
||||
padding: 0px 24px;
|
||||
box-sizing: border-box;
|
||||
position: absolute;
|
||||
left: 50%;
|
||||
bottom: 40px;
|
||||
transform: translateX(-50%);
|
||||
display: flex;
|
||||
justify-content: space-between;
|
||||
align-items: center;
|
||||
cursor: pointer;
|
||||
span {
|
||||
display: inline-block;
|
||||
&:nth-of-type(1) {
|
||||
width: 16px;
|
||||
height: 16px;
|
||||
background: url("../../../assets/image/ic_小-结束.svg");
|
||||
background-repeat: no-repeat;
|
||||
background-position: center;
|
||||
background-size: 16px 16px;
|
||||
|
||||
};
|
||||
&:nth-of-type(2) {
|
||||
height: 20px;
|
||||
font-family: PingFangSC-Regular;
|
||||
font-size: 14px;
|
||||
color: #F33E3E;
|
||||
text-align: center;
|
||||
font-weight: 400;
|
||||
line-height: 20px;
|
||||
margin-left: 4px;
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|