Adding virtual device text to speech lab

pull/103/head
Jim Bennett 4 years ago
parent 9f3d1af3b2
commit 47fdbc47f7

@ -14,11 +14,11 @@ print('Connecting')
device_client.connect() device_client.connect()
print('Connected') print('Connected')
speech_config = SpeechConfig(subscription=api_key, recognizer_config = SpeechConfig(subscription=api_key,
region=location, region=location,
speech_recognition_language=language) speech_recognition_language=language)
recognizer = SpeechRecognizer(speech_config=speech_config) recognizer = SpeechRecognizer(speech_config=recognizer_config)
def recognized(args): def recognized(args):
if len(args.result.text) > 0: if len(args.result.text) > 0:

@ -5,11 +5,11 @@ api_key = '<key>'
location = '<location>' location = '<location>'
language = '<language>' language = '<language>'
speech_config = SpeechConfig(subscription=api_key, recognizer_config = SpeechConfig(subscription=api_key,
region=location, region=location,
speech_recognition_language=language) speech_recognition_language=language)
recognizer = SpeechRecognizer(speech_config=speech_config) recognizer = SpeechRecognizer(speech_config=recognizer_config)
def recognized(args): def recognized(args):
print(args.result.text) print(args.result.text)

@ -45,7 +45,7 @@ On Windows, Linux, and macOS, the speech services Python SDK can be used to list
location = '<location>' location = '<location>'
language = '<language>' language = '<language>'
speech_config = SpeechConfig(subscription=api_key, recognizer_config = SpeechConfig(subscription=api_key,
region=location, region=location,
speech_recognition_language=language) speech_recognition_language=language)
``` ```
@ -59,7 +59,7 @@ On Windows, Linux, and macOS, the speech services Python SDK can be used to list
1. Add the following code to create a speech recognizer: 1. Add the following code to create a speech recognizer:
```python ```python
recognizer = SpeechRecognizer(speech_config=speech_config) recognizer = SpeechRecognizer(speech_config=recognizer_config)
``` ```
1. The speech recognizer runs on a background thread, listening for audio and converting any speech in it to text. You can get the text using a callback function - a function you define and pass to the recognizer. Every time speech is detected, the callback is called. Add the following code to define a callback that prints the text to the console, and pass this callback to the recognizer: 1. The speech recognizer runs on a background thread, listening for audio and converting any speech in it to text. You can get the text using a callback function - a function you define and pass to the recognizer. Every time speech is detected, the callback is called. Add the following code to define a callback that prints the text to the console, and pass this callback to the recognizer:

@ -0,0 +1,78 @@
import json
import threading
import time
from azure.cognitiveservices.speech import SpeechConfig, SpeechRecognizer, SpeechSynthesizer
from azure.iot.device import IoTHubDeviceClient, Message, MethodResponse
api_key = '<key>'
location = '<location>'
language = '<language>'
connection_string = '<connection_string>'
device_client = IoTHubDeviceClient.create_from_connection_string(connection_string)
print('Connecting')
device_client.connect()
print('Connected')
recognizer_config = SpeechConfig(subscription=api_key,
region=location,
speech_recognition_language=language)
recognizer = SpeechRecognizer(speech_config=recognizer_config)
def recognized(args):
if len(args.result.text) > 0:
message = Message(json.dumps({ 'speech': args.result.text }))
device_client.send_message(message)
recognizer.recognized.connect(recognized)
recognizer.start_continuous_recognition()
speech_config = SpeechConfig(subscription=api_key,
region=location)
speech_config.speech_synthesis_language = language
speech_synthesizer = SpeechSynthesizer(speech_config=speech_config)
voices = speech_synthesizer.get_voices_async().get().voices
first_voice = next(x for x in voices if x.locale.lower() == language.lower())
speech_config.speech_synthesis_voice_name = first_voice.short_name
def say(text):
speech_synthesizer.speak_text(text)
def announce_timer(minutes, seconds):
announcement = 'Times up on your '
if minutes > 0:
announcement += f'{minutes} minute'
if seconds > 0:
announcement += f'{seconds} second'
announcement += ' timer.'
say(announcement)
def create_timer(total_seconds):
minutes, seconds = divmod(total_seconds, 60)
threading.Timer(total_seconds, announce_timer, args=[minutes, seconds]).start()
announcement = ''
if minutes > 0:
announcement += f'{minutes} minute'
if seconds > 0:
announcement += f'{seconds} second'
announcement += ' timer started.'
say(announcement)
def handle_method_request(request):
if request.name == 'set-timer':
payload = json.loads(request.payload)
seconds = payload['seconds']
if seconds > 0:
create_timer(payload['seconds'])
method_response = MethodResponse.create_from_method_request(request, 200)
device_client.send_method_response(method_response)
device_client.on_method_request_received = handle_method_request
while True:
time.sleep(1)

@ -15,11 +15,11 @@ print('Connecting')
device_client.connect() device_client.connect()
print('Connected') print('Connected')
speech_config = SpeechConfig(subscription=api_key, recognizer_config = SpeechConfig(subscription=api_key,
region=location, region=location,
speech_recognition_language=language) speech_recognition_language=language)
recognizer = SpeechRecognizer(speech_config=speech_config) recognizer = SpeechRecognizer(speech_config=recognizer_config)
def recognized(args): def recognized(args):
if len(args.result.text) > 0: if len(args.result.text) > 0:

@ -34,7 +34,11 @@ Each language supports a range of different voices, and you can make a REST requ
This function is then called to store the first voice, and the voice name is printed to the console. This voice can be requested once and the value used for every call to convert text to speech. This function is then called to store the first voice, and the voice name is printed to the console. This voice can be requested once and the value used for every call to convert text to speech.
> 💁 You can get the full list of supported voices from the [Language and voice support documentation on Microsoft Docs](https://docs.microsoft.com/azure/cognitive-services/speech-service/language-support?WT.mc_id=academic-17441-jabenn#text-to-speech). If you want to use a specific voice, then you can remove this function and hard code the voice to the voice name from this documentation. > 💁 You can get the full list of supported voices from the [Language and voice support documentation on Microsoft Docs](https://docs.microsoft.com/azure/cognitive-services/speech-service/language-support?WT.mc_id=academic-17441-jabenn#text-to-speech). If you want to use a specific voice, then you can remove this function and hard code the voice to the voice name from this documentation. For example:
>
> ```python
> voice = 'hi-IN-SwaraNeural'
> ```
### Task - convert text to speech ### Task - convert text to speech
@ -131,6 +135,6 @@ Each language supports a range of different voices, and you can make a REST requ
If you get `Invalid sample rate` errors, change the `playback_format` as described above. If you get `Invalid sample rate` errors, change the `playback_format` as described above.
> 💁 You can find this code in the [code-spoken-response/pi](code-timer/spoken-response) folder. > 💁 You can find this code in the [code-spoken-response/pi](code-spoken-response/pi) folder.
😀 Your timer program was a success! 😀 Your timer program was a success!

@ -1,4 +1,56 @@
# Text to speech - Virtual IoT device # Text to speech - Virtual IoT device
In this part of the lesson, you will write code to convert text to speech using the speech service.
https://docs.microsoft.com/en-us/dotnet/api/microsoft.cognitiveservices.speech.speechsynthesisoutputformat?view=azure-dotnet ## Convert text to speech
The speech services SDK that you used in the last lesson to convert speech to text can be used to convert text back to speech. When requesting speech, you need to provide the voice to use as speech can be generated using a variety of different voices.
Each language supports a range of different voices, and you can get the list of supported voices for each language from the speech services SDK.
### Task - convert text to speech
1. Import the `SpeechSynthesizer` from the `azure.cognitiveservices.speech` package by adding it to the existing imports:
```python
from azure.cognitiveservices.speech import SpeechConfig, SpeechRecognizer, SpeechSynthesizer
```
1. Above the `say` function, create a speech configuration to use with the speech synthesizer:
```python
speech_config = SpeechConfig(subscription=api_key,
region=location)
speech_config.speech_synthesis_language = language
speech_synthesizer = SpeechSynthesizer(speech_config=speech_config)
```
This uses the same API key, location and language that was used by the recognizer.
1. Below this, add the following code to get a voice and set it on the speech config:
```python
voices = speech_synthesizer.get_voices_async().get().voices
first_voice = next(x for x in voices if x.locale.lower() == language.lower())
speech_config.speech_synthesis_voice_name = first_voice.short_name
```
This retrieves a list of all the available voices, then finds the first voice that matches the language that is being used.
> 💁 You can get the full list of supported voices from the [Language and voice support documentation on Microsoft Docs](https://docs.microsoft.com/azure/cognitive-services/speech-service/language-support?WT.mc_id=academic-17441-jabenn#text-to-speech). If you want to use a specific voice, then you can remove this function and hard code the voice to the voice name from this documentation. For example:
>
> ```python
> speech_config.speech_synthesis_voice_name = 'hi-IN-SwaraNeural'
> ```
1. Finally update the contents of the `say` function to use the speech synthesizer to speak the response:
```python
speech_synthesizer.speak_text(text)
```
1. Run the app, and ensure the function app is also running. Set some timers, and you will hear a spoken response saying that your timer has been set, then another spoken response when the timer is complete.
> 💁 You can find this code in the [code-spoken-response/virtual-iot-device](code-spoken-response/virtual-iot-device) folder.
😀 Your timer program was a success!

Loading…
Cancel
Save