parent
9f3d1af3b2
commit
47fdbc47f7
@ -0,0 +1,78 @@
|
||||
import json
|
||||
import threading
|
||||
import time
|
||||
from azure.cognitiveservices.speech import SpeechConfig, SpeechRecognizer, SpeechSynthesizer
|
||||
from azure.iot.device import IoTHubDeviceClient, Message, MethodResponse
|
||||
|
||||
api_key = '<key>'
|
||||
location = '<location>'
|
||||
language = '<language>'
|
||||
connection_string = '<connection_string>'
|
||||
|
||||
device_client = IoTHubDeviceClient.create_from_connection_string(connection_string)
|
||||
|
||||
print('Connecting')
|
||||
device_client.connect()
|
||||
print('Connected')
|
||||
|
||||
recognizer_config = SpeechConfig(subscription=api_key,
|
||||
region=location,
|
||||
speech_recognition_language=language)
|
||||
|
||||
recognizer = SpeechRecognizer(speech_config=recognizer_config)
|
||||
|
||||
def recognized(args):
|
||||
if len(args.result.text) > 0:
|
||||
message = Message(json.dumps({ 'speech': args.result.text }))
|
||||
device_client.send_message(message)
|
||||
|
||||
recognizer.recognized.connect(recognized)
|
||||
|
||||
recognizer.start_continuous_recognition()
|
||||
|
||||
speech_config = SpeechConfig(subscription=api_key,
|
||||
region=location)
|
||||
speech_config.speech_synthesis_language = language
|
||||
speech_synthesizer = SpeechSynthesizer(speech_config=speech_config)
|
||||
|
||||
voices = speech_synthesizer.get_voices_async().get().voices
|
||||
first_voice = next(x for x in voices if x.locale.lower() == language.lower())
|
||||
speech_config.speech_synthesis_voice_name = first_voice.short_name
|
||||
|
||||
def say(text):
|
||||
speech_synthesizer.speak_text(text)
|
||||
|
||||
def announce_timer(minutes, seconds):
|
||||
announcement = 'Times up on your '
|
||||
if minutes > 0:
|
||||
announcement += f'{minutes} minute'
|
||||
if seconds > 0:
|
||||
announcement += f'{seconds} second'
|
||||
announcement += ' timer.'
|
||||
say(announcement)
|
||||
|
||||
def create_timer(total_seconds):
|
||||
minutes, seconds = divmod(total_seconds, 60)
|
||||
threading.Timer(total_seconds, announce_timer, args=[minutes, seconds]).start()
|
||||
announcement = ''
|
||||
if minutes > 0:
|
||||
announcement += f'{minutes} minute'
|
||||
if seconds > 0:
|
||||
announcement += f'{seconds} second'
|
||||
announcement += ' timer started.'
|
||||
say(announcement)
|
||||
|
||||
def handle_method_request(request):
|
||||
if request.name == 'set-timer':
|
||||
payload = json.loads(request.payload)
|
||||
seconds = payload['seconds']
|
||||
if seconds > 0:
|
||||
create_timer(payload['seconds'])
|
||||
|
||||
method_response = MethodResponse.create_from_method_request(request, 200)
|
||||
device_client.send_method_response(method_response)
|
||||
|
||||
device_client.on_method_request_received = handle_method_request
|
||||
|
||||
while True:
|
||||
time.sleep(1)
|
@ -1,4 +1,56 @@
|
||||
# Text to speech - Virtual IoT device
|
||||
|
||||
In this part of the lesson, you will write code to convert text to speech using the speech service.
|
||||
|
||||
https://docs.microsoft.com/en-us/dotnet/api/microsoft.cognitiveservices.speech.speechsynthesisoutputformat?view=azure-dotnet
|
||||
## Convert text to speech
|
||||
|
||||
The speech services SDK that you used in the last lesson to convert speech to text can be used to convert text back to speech. When requesting speech, you need to provide the voice to use as speech can be generated using a variety of different voices.
|
||||
|
||||
Each language supports a range of different voices, and you can get the list of supported voices for each language from the speech services SDK.
|
||||
|
||||
### Task - convert text to speech
|
||||
|
||||
1. Import the `SpeechSynthesizer` from the `azure.cognitiveservices.speech` package by adding it to the existing imports:
|
||||
|
||||
```python
|
||||
from azure.cognitiveservices.speech import SpeechConfig, SpeechRecognizer, SpeechSynthesizer
|
||||
```
|
||||
|
||||
1. Above the `say` function, create a speech configuration to use with the speech synthesizer:
|
||||
|
||||
```python
|
||||
speech_config = SpeechConfig(subscription=api_key,
|
||||
region=location)
|
||||
speech_config.speech_synthesis_language = language
|
||||
speech_synthesizer = SpeechSynthesizer(speech_config=speech_config)
|
||||
```
|
||||
|
||||
This uses the same API key, location and language that was used by the recognizer.
|
||||
|
||||
1. Below this, add the following code to get a voice and set it on the speech config:
|
||||
|
||||
```python
|
||||
voices = speech_synthesizer.get_voices_async().get().voices
|
||||
first_voice = next(x for x in voices if x.locale.lower() == language.lower())
|
||||
speech_config.speech_synthesis_voice_name = first_voice.short_name
|
||||
```
|
||||
|
||||
This retrieves a list of all the available voices, then finds the first voice that matches the language that is being used.
|
||||
|
||||
> 💁 You can get the full list of supported voices from the [Language and voice support documentation on Microsoft Docs](https://docs.microsoft.com/azure/cognitive-services/speech-service/language-support?WT.mc_id=academic-17441-jabenn#text-to-speech). If you want to use a specific voice, then you can remove this function and hard code the voice to the voice name from this documentation. For example:
|
||||
>
|
||||
> ```python
|
||||
> speech_config.speech_synthesis_voice_name = 'hi-IN-SwaraNeural'
|
||||
> ```
|
||||
|
||||
1. Finally update the contents of the `say` function to use the speech synthesizer to speak the response:
|
||||
|
||||
```python
|
||||
speech_synthesizer.speak_text(text)
|
||||
```
|
||||
|
||||
1. Run the app, and ensure the function app is also running. Set some timers, and you will hear a spoken response saying that your timer has been set, then another spoken response when the timer is complete.
|
||||
|
||||
> 💁 You can find this code in the [code-spoken-response/virtual-iot-device](code-spoken-response/virtual-iot-device) folder.
|
||||
|
||||
😀 Your timer program was a success!
|
||||
|
Loading…
Reference in new issue