diff --git a/1-getting-started/lessons/4-connect-internet/code-commands/wio-terminal/nightlight/platformio.ini b/1-getting-started/lessons/4-connect-internet/code-commands/wio-terminal/nightlight/platformio.ini index 0e141c71..1f170f4b 100644 --- a/1-getting-started/lessons/4-connect-internet/code-commands/wio-terminal/nightlight/platformio.ini +++ b/1-getting-started/lessons/4-connect-internet/code-commands/wio-terminal/nightlight/platformio.ini @@ -16,7 +16,7 @@ lib_deps = knolleary/PubSubClient @ 2.8 bblanchon/ArduinoJson @ 6.17.3 seeed-studio/Seeed Arduino rpcWiFi @ 1.0.5 - seeed-studio/Seeed Arduino FS @ 2.0.3 - seeed-studio/Seeed Arduino SFUD @ 2.0.1 + seeed-studio/Seeed Arduino FS @ 2.1.1 + seeed-studio/Seeed Arduino SFUD @ 2.0.2 seeed-studio/Seeed Arduino rpcUnified @ 2.1.3 seeed-studio/Seeed_Arduino_mbedtls @ 3.0.1 \ No newline at end of file diff --git a/1-getting-started/lessons/4-connect-internet/code-commands/wio-terminal/nightlight/src/main.cpp b/1-getting-started/lessons/4-connect-internet/code-commands/wio-terminal/nightlight/src/main.cpp index e8c1ac21..8f0fd6b5 100644 --- a/1-getting-started/lessons/4-connect-internet/code-commands/wio-terminal/nightlight/src/main.cpp +++ b/1-getting-started/lessons/4-connect-internet/code-commands/wio-terminal/nightlight/src/main.cpp @@ -100,8 +100,7 @@ void loop() doc["light"] = light; string telemetry; - JsonObject obj = doc.as(); - serializeJson(obj, telemetry); + serializeJson(doc, telemetry); Serial.print("Sending telemetry "); Serial.println(telemetry.c_str()); diff --git a/1-getting-started/lessons/4-connect-internet/code-mqtt/wio-terminal/nightlight/platformio.ini b/1-getting-started/lessons/4-connect-internet/code-mqtt/wio-terminal/nightlight/platformio.ini index f11c7019..3b572246 100644 --- a/1-getting-started/lessons/4-connect-internet/code-mqtt/wio-terminal/nightlight/platformio.ini +++ b/1-getting-started/lessons/4-connect-internet/code-mqtt/wio-terminal/nightlight/platformio.ini @@ -15,7 +15,7 @@ framework = arduino lib_deps = knolleary/PubSubClient @ 2.8 seeed-studio/Seeed Arduino rpcWiFi @ 1.0.5 - seeed-studio/Seeed Arduino FS @ 2.0.3 - seeed-studio/Seeed Arduino SFUD @ 2.0.1 + seeed-studio/Seeed Arduino FS @ 2.1.1 + seeed-studio/Seeed Arduino SFUD @ 2.0.2 seeed-studio/Seeed Arduino rpcUnified @ 2.1.3 seeed-studio/Seeed_Arduino_mbedtls @ 3.0.1 diff --git a/1-getting-started/lessons/4-connect-internet/code-telemetry/wio-terminal/nightlight/platformio.ini b/1-getting-started/lessons/4-connect-internet/code-telemetry/wio-terminal/nightlight/platformio.ini index db2bae01..f6f4f1c0 100644 --- a/1-getting-started/lessons/4-connect-internet/code-telemetry/wio-terminal/nightlight/platformio.ini +++ b/1-getting-started/lessons/4-connect-internet/code-telemetry/wio-terminal/nightlight/platformio.ini @@ -15,8 +15,8 @@ framework = arduino lib_deps = knolleary/PubSubClient @ 2.8 seeed-studio/Seeed Arduino rpcWiFi @ 1.0.5 - seeed-studio/Seeed Arduino FS @ 2.0.3 - seeed-studio/Seeed Arduino SFUD @ 2.0.1 + seeed-studio/Seeed Arduino FS @ 2.1.1 + seeed-studio/Seeed Arduino SFUD @ 2.0.2 seeed-studio/Seeed Arduino rpcUnified @ 2.1.3 seeed-studio/Seeed_Arduino_mbedtls @ 3.0.1 bblanchon/ArduinoJson @ 6.17.3 diff --git a/1-getting-started/lessons/4-connect-internet/code-telemetry/wio-terminal/nightlight/src/main.cpp b/1-getting-started/lessons/4-connect-internet/code-telemetry/wio-terminal/nightlight/src/main.cpp index 5bf64b3e..81bcfff4 100644 --- a/1-getting-started/lessons/4-connect-internet/code-telemetry/wio-terminal/nightlight/src/main.cpp +++ b/1-getting-started/lessons/4-connect-internet/code-telemetry/wio-terminal/nightlight/src/main.cpp @@ -74,8 +74,7 @@ void loop() doc["light"] = light; string telemetry; - JsonObject obj = doc.as(); - serializeJson(obj, telemetry); + serializeJson(doc, telemetry); Serial.print("Sending telemetry "); Serial.println(telemetry.c_str()); diff --git a/1-getting-started/lessons/4-connect-internet/wio-terminal-mqtt.md b/1-getting-started/lessons/4-connect-internet/wio-terminal-mqtt.md index f1dd6650..ee114562 100644 --- a/1-getting-started/lessons/4-connect-internet/wio-terminal-mqtt.md +++ b/1-getting-started/lessons/4-connect-internet/wio-terminal-mqtt.md @@ -25,8 +25,8 @@ Install the Arduino libraries. ```ini lib_deps = seeed-studio/Seeed Arduino rpcWiFi @ 1.0.5 - seeed-studio/Seeed Arduino FS @ 2.0.3 - seeed-studio/Seeed Arduino SFUD @ 2.0.1 + seeed-studio/Seeed Arduino FS @ 2.1.1 + seeed-studio/Seeed Arduino SFUD @ 2.0.2 seeed-studio/Seeed Arduino rpcUnified @ 2.1.3 seeed-studio/Seeed_Arduino_mbedtls @ 3.0.1 ``` diff --git a/1-getting-started/lessons/4-connect-internet/wio-terminal-telemetry.md b/1-getting-started/lessons/4-connect-internet/wio-terminal-telemetry.md index 12897cf1..86dbbe62 100644 --- a/1-getting-started/lessons/4-connect-internet/wio-terminal-telemetry.md +++ b/1-getting-started/lessons/4-connect-internet/wio-terminal-telemetry.md @@ -53,8 +53,7 @@ Publish telemetry to the MQTT broker. doc["light"] = light; string telemetry; - JsonObject obj = doc.as(); - serializeJson(obj, telemetry); + serializeJson(doc, telemetry); Serial.print("Sending telemetry "); Serial.println(telemetry.c_str()); diff --git a/2-farm/lessons/1-predict-plant-growth/code-publish-temperature/wio-terminal/temperature-sensor/platformio.ini b/2-farm/lessons/1-predict-plant-growth/code-publish-temperature/wio-terminal/temperature-sensor/platformio.ini index 916fd4b6..f12154eb 100644 --- a/2-farm/lessons/1-predict-plant-growth/code-publish-temperature/wio-terminal/temperature-sensor/platformio.ini +++ b/2-farm/lessons/1-predict-plant-growth/code-publish-temperature/wio-terminal/temperature-sensor/platformio.ini @@ -17,7 +17,7 @@ lib_deps = knolleary/PubSubClient @ 2.8 bblanchon/ArduinoJson @ 6.17.3 seeed-studio/Seeed Arduino rpcWiFi @ 1.0.5 - seeed-studio/Seeed Arduino FS @ 2.0.3 - seeed-studio/Seeed Arduino SFUD @ 2.0.1 + seeed-studio/Seeed Arduino FS @ 2.1.1 + seeed-studio/Seeed Arduino SFUD @ 2.0.2 seeed-studio/Seeed Arduino rpcUnified @ 2.1.3 seeed-studio/Seeed_Arduino_mbedtls @ 3.0.1 diff --git a/2-farm/lessons/1-predict-plant-growth/code-publish-temperature/wio-terminal/temperature-sensor/src/main.cpp b/2-farm/lessons/1-predict-plant-growth/code-publish-temperature/wio-terminal/temperature-sensor/src/main.cpp index c76c516b..21192256 100644 --- a/2-farm/lessons/1-predict-plant-growth/code-publish-temperature/wio-terminal/temperature-sensor/src/main.cpp +++ b/2-farm/lessons/1-predict-plant-growth/code-publish-temperature/wio-terminal/temperature-sensor/src/main.cpp @@ -77,8 +77,7 @@ void loop() doc["temperature"] = temp_hum_val[1]; string telemetry; - JsonObject obj = doc.as(); - serializeJson(obj, telemetry); + serializeJson(doc, telemetry); Serial.print("Sending telemetry "); Serial.println(telemetry.c_str()); diff --git a/2-farm/lessons/3-automated-plant-watering/code-mqtt/wio-terminal/soil-moisture-sensor/platformio.ini b/2-farm/lessons/3-automated-plant-watering/code-mqtt/wio-terminal/soil-moisture-sensor/platformio.ini index 2f148840..3888824e 100644 --- a/2-farm/lessons/3-automated-plant-watering/code-mqtt/wio-terminal/soil-moisture-sensor/platformio.ini +++ b/2-farm/lessons/3-automated-plant-watering/code-mqtt/wio-terminal/soil-moisture-sensor/platformio.ini @@ -16,7 +16,7 @@ lib_deps = knolleary/PubSubClient @ 2.8 bblanchon/ArduinoJson @ 6.17.3 seeed-studio/Seeed Arduino rpcWiFi @ 1.0.5 - seeed-studio/Seeed Arduino FS @ 2.0.3 - seeed-studio/Seeed Arduino SFUD @ 2.0.1 + seeed-studio/Seeed Arduino FS @ 2.1.1 + seeed-studio/Seeed Arduino SFUD @ 2.0.2 seeed-studio/Seeed Arduino rpcUnified @ 2.1.3 seeed-studio/Seeed_Arduino_mbedtls @ 3.0.1 diff --git a/2-farm/lessons/3-automated-plant-watering/code-mqtt/wio-terminal/soil-moisture-sensor/src/main.cpp b/2-farm/lessons/3-automated-plant-watering/code-mqtt/wio-terminal/soil-moisture-sensor/src/main.cpp index fd4bac4a..486d7db8 100644 --- a/2-farm/lessons/3-automated-plant-watering/code-mqtt/wio-terminal/soil-moisture-sensor/src/main.cpp +++ b/2-farm/lessons/3-automated-plant-watering/code-mqtt/wio-terminal/soil-moisture-sensor/src/main.cpp @@ -100,8 +100,7 @@ void loop() doc["soil_moisture"] = soil_moisture; string telemetry; - JsonObject obj = doc.as(); - serializeJson(obj, telemetry); + serializeJson(doc, telemetry); Serial.print("Sending telemetry "); Serial.println(telemetry.c_str()); diff --git a/2-farm/lessons/4-migrate-your-plant-to-the-cloud/code/wio-terminal/soil-moisture-sensor/platformio.ini b/2-farm/lessons/4-migrate-your-plant-to-the-cloud/code/wio-terminal/soil-moisture-sensor/platformio.ini index 3daba989..32966cc9 100644 --- a/2-farm/lessons/4-migrate-your-plant-to-the-cloud/code/wio-terminal/soil-moisture-sensor/platformio.ini +++ b/2-farm/lessons/4-migrate-your-plant-to-the-cloud/code/wio-terminal/soil-moisture-sensor/platformio.ini @@ -15,8 +15,8 @@ framework = arduino lib_deps = bblanchon/ArduinoJson @ 6.17.3 seeed-studio/Seeed Arduino rpcWiFi @ 1.0.5 - seeed-studio/Seeed Arduino FS @ 2.0.3 - seeed-studio/Seeed Arduino SFUD @ 2.0.1 + seeed-studio/Seeed Arduino FS @ 2.1.1 + seeed-studio/Seeed Arduino SFUD @ 2.0.2 seeed-studio/Seeed Arduino rpcUnified @ 2.1.3 seeed-studio/Seeed_Arduino_mbedtls @ 3.0.1 seeed-studio/Seeed Arduino RTC @ 2.0.0 diff --git a/2-farm/lessons/4-migrate-your-plant-to-the-cloud/code/wio-terminal/soil-moisture-sensor/src/main.cpp b/2-farm/lessons/4-migrate-your-plant-to-the-cloud/code/wio-terminal/soil-moisture-sensor/src/main.cpp index 23ef9191..7d18409f 100644 --- a/2-farm/lessons/4-migrate-your-plant-to-the-cloud/code/wio-terminal/soil-moisture-sensor/src/main.cpp +++ b/2-farm/lessons/4-migrate-your-plant-to-the-cloud/code/wio-terminal/soil-moisture-sensor/src/main.cpp @@ -120,8 +120,7 @@ void loop() doc["soil_moisture"] = soil_moisture; string telemetry; - JsonObject obj = doc.as(); - serializeJson(obj, telemetry); + serializeJson(doc, telemetry); Serial.print("Sending telemetry "); Serial.println(telemetry.c_str()); diff --git a/3-transport/lessons/2-store-location-data/code/wio-terminal/gps-sensor/platformio.ini b/3-transport/lessons/2-store-location-data/code/wio-terminal/gps-sensor/platformio.ini index 6aee0066..1f6ca207 100644 --- a/3-transport/lessons/2-store-location-data/code/wio-terminal/gps-sensor/platformio.ini +++ b/3-transport/lessons/2-store-location-data/code/wio-terminal/gps-sensor/platformio.ini @@ -15,8 +15,8 @@ framework = arduino lib_deps = bblanchon/ArduinoJson @ 6.17.3 seeed-studio/Seeed Arduino rpcWiFi @ 1.0.5 - seeed-studio/Seeed Arduino FS @ 2.0.3 - seeed-studio/Seeed Arduino SFUD @ 2.0.1 + seeed-studio/Seeed Arduino FS @ 2.1.1 + seeed-studio/Seeed Arduino SFUD @ 2.0.2 seeed-studio/Seeed Arduino rpcUnified @ 2.1.3 seeed-studio/Seeed_Arduino_mbedtls @ 3.0.1 seeed-studio/Seeed Arduino RTC @ 2.0.0 diff --git a/4-manufacturing/lessons/2-check-fruit-from-device/code-camera/wio-terminal/fruit-quality-detector/platformio.ini b/4-manufacturing/lessons/2-check-fruit-from-device/code-camera/wio-terminal/fruit-quality-detector/platformio.ini index d2d6f51d..ebe92e30 100644 --- a/4-manufacturing/lessons/2-check-fruit-from-device/code-camera/wio-terminal/fruit-quality-detector/platformio.ini +++ b/4-manufacturing/lessons/2-check-fruit-from-device/code-camera/wio-terminal/fruit-quality-detector/platformio.ini @@ -14,8 +14,8 @@ board = seeed_wio_terminal framework = arduino lib_deps = seeed-studio/Seeed Arduino rpcWiFi @ 1.0.5 - seeed-studio/Seeed Arduino FS @ 2.0.3 - seeed-studio/Seeed Arduino SFUD @ 2.0.1 + seeed-studio/Seeed Arduino FS @ 2.1.1 + seeed-studio/Seeed Arduino SFUD @ 2.0.2 seeed-studio/Seeed Arduino rpcUnified @ 2.1.3 seeed-studio/Seeed_Arduino_mbedtls @ 3.0.1 seeed-studio/Seeed Arduino RTC @ 2.0.0 diff --git a/4-manufacturing/lessons/2-check-fruit-from-device/code-classify/wio-terminal/fruit-quality-detector/platformio.ini b/4-manufacturing/lessons/2-check-fruit-from-device/code-classify/wio-terminal/fruit-quality-detector/platformio.ini index 5f3eb8a7..826cb386 100644 --- a/4-manufacturing/lessons/2-check-fruit-from-device/code-classify/wio-terminal/fruit-quality-detector/platformio.ini +++ b/4-manufacturing/lessons/2-check-fruit-from-device/code-classify/wio-terminal/fruit-quality-detector/platformio.ini @@ -14,8 +14,8 @@ board = seeed_wio_terminal framework = arduino lib_deps = seeed-studio/Seeed Arduino rpcWiFi @ 1.0.5 - seeed-studio/Seeed Arduino FS @ 2.0.3 - seeed-studio/Seeed Arduino SFUD @ 2.0.1 + seeed-studio/Seeed Arduino FS @ 2.1.1 + seeed-studio/Seeed Arduino SFUD @ 2.0.2 seeed-studio/Seeed Arduino rpcUnified @ 2.1.3 seeed-studio/Seeed_Arduino_mbedtls @ 3.0.1 seeed-studio/Seeed Arduino RTC @ 2.0.0 diff --git a/4-manufacturing/lessons/3-run-fruit-detector-edge/code-classify/wio-terminal/fruit-quality-detector/platformio.ini b/4-manufacturing/lessons/3-run-fruit-detector-edge/code-classify/wio-terminal/fruit-quality-detector/platformio.ini index 5f3eb8a7..826cb386 100644 --- a/4-manufacturing/lessons/3-run-fruit-detector-edge/code-classify/wio-terminal/fruit-quality-detector/platformio.ini +++ b/4-manufacturing/lessons/3-run-fruit-detector-edge/code-classify/wio-terminal/fruit-quality-detector/platformio.ini @@ -14,8 +14,8 @@ board = seeed_wio_terminal framework = arduino lib_deps = seeed-studio/Seeed Arduino rpcWiFi @ 1.0.5 - seeed-studio/Seeed Arduino FS @ 2.0.3 - seeed-studio/Seeed Arduino SFUD @ 2.0.1 + seeed-studio/Seeed Arduino FS @ 2.1.1 + seeed-studio/Seeed Arduino SFUD @ 2.0.2 seeed-studio/Seeed Arduino rpcUnified @ 2.1.3 seeed-studio/Seeed_Arduino_mbedtls @ 3.0.1 seeed-studio/Seeed Arduino RTC @ 2.0.0 diff --git a/5-retail/lessons/2-check-stock-device/code-count/wio-terminal/stock-counter/platformio.ini b/5-retail/lessons/2-check-stock-device/code-count/wio-terminal/stock-counter/platformio.ini index 5f3eb8a7..826cb386 100644 --- a/5-retail/lessons/2-check-stock-device/code-count/wio-terminal/stock-counter/platformio.ini +++ b/5-retail/lessons/2-check-stock-device/code-count/wio-terminal/stock-counter/platformio.ini @@ -14,8 +14,8 @@ board = seeed_wio_terminal framework = arduino lib_deps = seeed-studio/Seeed Arduino rpcWiFi @ 1.0.5 - seeed-studio/Seeed Arduino FS @ 2.0.3 - seeed-studio/Seeed Arduino SFUD @ 2.0.1 + seeed-studio/Seeed Arduino FS @ 2.1.1 + seeed-studio/Seeed Arduino SFUD @ 2.0.2 seeed-studio/Seeed Arduino rpcUnified @ 2.1.3 seeed-studio/Seeed_Arduino_mbedtls @ 3.0.1 seeed-studio/Seeed Arduino RTC @ 2.0.0 diff --git a/5-retail/lessons/2-check-stock-device/code-detect/wio-terminal/stock-counter/platformio.ini b/5-retail/lessons/2-check-stock-device/code-detect/wio-terminal/stock-counter/platformio.ini index 5f3eb8a7..826cb386 100644 --- a/5-retail/lessons/2-check-stock-device/code-detect/wio-terminal/stock-counter/platformio.ini +++ b/5-retail/lessons/2-check-stock-device/code-detect/wio-terminal/stock-counter/platformio.ini @@ -14,8 +14,8 @@ board = seeed_wio_terminal framework = arduino lib_deps = seeed-studio/Seeed Arduino rpcWiFi @ 1.0.5 - seeed-studio/Seeed Arduino FS @ 2.0.3 - seeed-studio/Seeed Arduino SFUD @ 2.0.1 + seeed-studio/Seeed Arduino FS @ 2.1.1 + seeed-studio/Seeed Arduino SFUD @ 2.0.2 seeed-studio/Seeed Arduino rpcUnified @ 2.1.3 seeed-studio/Seeed_Arduino_mbedtls @ 3.0.1 seeed-studio/Seeed Arduino RTC @ 2.0.0 diff --git a/6-consumer/README.md b/6-consumer/README.md index 4b986ad9..d3c8c9d8 100644 --- a/6-consumer/README.md +++ b/6-consumer/README.md @@ -6,6 +6,8 @@ The latest iterations are now part of our smart devices. In kitchens in homes al In these 4 lessons you'll learn how to build a smart timer, using AI to recognize your voice, understand what you are asking for, and reply with information about your timer. You'll also add support for multiple languages. +> ⚠️ Working with speech and microphone data uses a lot of memory, meaning it is easy to hit limits on microcontrollers. The project here works around these issues, but be aware the Wio Terminal labs are complex and may take more time that other labs in this curriculum. + > 💁 These lessons will use some cloud resources. If you don't complete all the lessons in this project, make sure you [clean up your project](../clean-up.md). ## Topics diff --git a/6-consumer/lessons/1-speech-recognition/code-record/wio-terminal/smart-timer/platformio.ini b/6-consumer/lessons/1-speech-recognition/code-record/wio-terminal/smart-timer/platformio.ini index c5999f17..c8443c8d 100644 --- a/6-consumer/lessons/1-speech-recognition/code-record/wio-terminal/smart-timer/platformio.ini +++ b/6-consumer/lessons/1-speech-recognition/code-record/wio-terminal/smart-timer/platformio.ini @@ -13,7 +13,7 @@ platform = atmelsam board = seeed_wio_terminal framework = arduino lib_deps = - seeed-studio/Seeed Arduino FS @ 2.0.3 - seeed-studio/Seeed Arduino SFUD @ 2.0.1 + seeed-studio/Seeed Arduino FS @ 2.1.1 + seeed-studio/Seeed Arduino SFUD @ 2.0.2 build_flags = -DSFUD_USING_QSPI diff --git a/6-consumer/lessons/1-speech-recognition/code-speech-to-text/wio-terminal/smart-timer/platformio.ini b/6-consumer/lessons/1-speech-recognition/code-speech-to-text/wio-terminal/smart-timer/platformio.ini index 5adbe733..9d79654b 100644 --- a/6-consumer/lessons/1-speech-recognition/code-speech-to-text/wio-terminal/smart-timer/platformio.ini +++ b/6-consumer/lessons/1-speech-recognition/code-speech-to-text/wio-terminal/smart-timer/platformio.ini @@ -13,8 +13,8 @@ platform = atmelsam board = seeed_wio_terminal framework = arduino lib_deps = - seeed-studio/Seeed Arduino FS @ 2.0.3 - seeed-studio/Seeed Arduino SFUD @ 2.0.1 + seeed-studio/Seeed Arduino FS @ 2.1.1 + seeed-studio/Seeed Arduino SFUD @ 2.0.2 seeed-studio/Seeed Arduino rpcWiFi @ 1.0.5 seeed-studio/Seeed Arduino rpcUnified @ 2.1.3 seeed-studio/Seeed_Arduino_mbedtls @ 3.0.1 diff --git a/6-consumer/lessons/1-speech-recognition/code-speech-to-text/wio-terminal/smart-timer/src/speech_to_text.h b/6-consumer/lessons/1-speech-recognition/code-speech-to-text/wio-terminal/smart-timer/src/speech_to_text.h index a7ce075f..74b4b4fd 100644 --- a/6-consumer/lessons/1-speech-recognition/code-speech-to-text/wio-terminal/smart-timer/src/speech_to_text.h +++ b/6-consumer/lessons/1-speech-recognition/code-speech-to-text/wio-terminal/smart-timer/src/speech_to_text.h @@ -67,6 +67,11 @@ public: return text; } + String AccessToken() + { + return _access_token; + } + private: String getAccessToken() { diff --git a/6-consumer/lessons/1-speech-recognition/wio-terminal-audio.md b/6-consumer/lessons/1-speech-recognition/wio-terminal-audio.md index 302d67d9..8b3a45bb 100644 --- a/6-consumer/lessons/1-speech-recognition/wio-terminal-audio.md +++ b/6-consumer/lessons/1-speech-recognition/wio-terminal-audio.md @@ -24,8 +24,8 @@ Once each buffer has been captured, it can be written to the flash memory. Flash ```ini lib_deps = - seeed-studio/Seeed Arduino FS @ 2.0.3 - seeed-studio/Seeed Arduino SFUD @ 2.0.1 + seeed-studio/Seeed Arduino FS @ 2.1.1 + seeed-studio/Seeed Arduino SFUD @ 2.0.2 ``` 1. Open the `main.cpp` file and add the following include directive for the flash memory library to the top of the file: diff --git a/6-consumer/lessons/1-speech-recognition/wio-terminal-microphone.md b/6-consumer/lessons/1-speech-recognition/wio-terminal-microphone.md index 66e65dc8..8cd8c32d 100644 --- a/6-consumer/lessons/1-speech-recognition/wio-terminal-microphone.md +++ b/6-consumer/lessons/1-speech-recognition/wio-terminal-microphone.md @@ -18,6 +18,8 @@ To connect the ReSpeaker 2-Mics Pi Hat you will need 40 pin-to-pin (also referre > 💁 If you are comfortable soldering, then you can use the [40 Pin Raspberry Pi Hat Adapter Board For Wio Terminal](https://www.seeedstudio.com/40-Pin-Raspberry-Pi-Hat-Adapter-Board-For-Wio-Terminal-p-4730.html) to connect the ReSpeaker. +You will also need an SD card to use to download and playback audio. The Wio Terminal only supports SD Cards up to 16GB in size, and these need to be formatted as FAT32 or exFAT. + ### Task - connect the ReSpeaker Pi Hat 1. With the Wio Terminal powered off, connect the ReSpeaker 2-Mics Pi Hat to the Wio Terminal using the jumper leads and the GPIO sockets on the back of the Wio Terminal: @@ -59,3 +61,15 @@ To connect the ReSpeaker 2-Mics Pi Hat you will need 40 pin-to-pin (also referre * If you are using a speaker with a 3.5mm jack, or headphones, insert them into the 3.5mm jack socket. ![A speaker connected to the ReSpeaker via the 3.5mm jack socket](../../../images/respeaker-35mm-speaker.png) + +### Task - set up the SD card + +1. Connect the SD Card to your computer, using na external reader if you don't have an SD Card slot. + +1. Format the SD Card using the appropriate tool on your computer, making sure to use a FAT32 or exFAT file system + +1. Insert the SD card into the SD Card slot on the left-hand side of the Wio Terminal, just below the power button. Make sure the card is all the way in and clicks in - you may need a thin tool or another SD Card to help push it all the way in. + + ![Inserting the SD card into the SD card slot below the power switch](../../../images/wio-sd-card.png) + + > 💁 To eject the SD Card, you need to push it in slightly and it will eject. You will need a thin tool to do this such as a flat-head screwdriver or another SD Card. diff --git a/6-consumer/lessons/1-speech-recognition/wio-terminal-speech-to-text.md b/6-consumer/lessons/1-speech-recognition/wio-terminal-speech-to-text.md index a1e2cddb..9010417e 100644 --- a/6-consumer/lessons/1-speech-recognition/wio-terminal-speech-to-text.md +++ b/6-consumer/lessons/1-speech-recognition/wio-terminal-speech-to-text.md @@ -180,6 +180,15 @@ The audio can be sent to the speech service using the REST API. To use the speec This code builds the URL for the token issuer API using the location of the speech resource. It then creates an `HTTPClient` to make the web request, setting it up to use the WiFi client configured with the token endpoints certificate. It sets the API key as a header for the call. It then makes a POST request to get the certificate, retrying if it gets any errors. Finally the access token is returned. +1. To the `public` section, add a method to get the access token. This will be needed in later lessons to convert text to speech. + + ```cpp + String AccessToken() + { + return _access_token; + } + ``` + 1. To the `public` section, add an `init` method that sets up the token client: ```cpp @@ -497,7 +506,7 @@ The audio can be sent to the speech service using the REST API. To use the speec Serial.println(text); ``` -1. Build this code, upload it to your Wio Terminal and test it out through the serial monitor. Press the C button (the one on the left-hand side, closest to the power switch), and speak. 4 seconds of audio will be captured, then converted to text. +1. Build this code, upload it to your Wio Terminal and test it out through the serial monitor. Once you see `Ready` in the serial monitor, press the C button (the one on the left-hand side, closest to the power switch), and speak. 4 seconds of audio will be captured, then converted to text. ```output --- Available filters and text transformations: colorize, debug, default, direct, hexlify, log2file, nocontrol, printable, send_on_enter, time diff --git a/6-consumer/lessons/2-language-understanding/README.md b/6-consumer/lessons/2-language-understanding/README.md index b2a7cc9f..0c3a5082 100644 --- a/6-consumer/lessons/2-language-understanding/README.md +++ b/6-consumer/lessons/2-language-understanding/README.md @@ -274,7 +274,7 @@ Rather than calling LUIS from the IoT device, you can use serverless code with a func new --name text-to-timer --template "HTTP trigger" ``` - This will crate an HTTP trigger called `text-to-timer`. + This will create an HTTP trigger called `text-to-timer`. 1. Test the HTTP trigger by running the functions app. When it runs you will see the endpoint listed in the output: @@ -493,9 +493,35 @@ Rather than calling LUIS from the IoT device, you can use serverless code with a ### Task - make your function available to your IoT device -1. For your IoT device to call your REST endpoint, it will need to know the URL. When you accessed it earlier, you used `localhost`, which is a shortcut to access REST endpoints on your local machine. To allow you IoT device to get access, you need to either: +1. For your IoT device to call your REST endpoint, it will need to know the URL. When you accessed it earlier, you used `localhost`, which is a shortcut to access REST endpoints on your local machine. To allow you IoT device to get access, you need to either publish to the cloud, or get your IP address to access it locally. + + > ⚠️ If you are using a Wio Terminal, it is easier to run the function app locally, as there will be a dependency on libraries that mean you cannot deploy the function app in the same way as you have done before. Run the function app locally and access it via your computers IP address. If you do want to deploy to the cloud, information will be provided in a later lesson on the way to do this. + + * Publish the Functions app - follow the instructions in earlier lessons to publish your functions app to the cloud. Once published, the URL will be `https://.azurewebsites.net/api/text-to-timer`, where `` will be the name of your functions app. Make sure to also publish your local settings. + + When working with HTTP triggers, they are secured by default with a function app key. To get this key, run the following command: + + ```sh + az functionapp keys list --resource-group smart-timer \ + --name + ``` + + Copy the value of the `default` entry from the `functionKeys` section. + + ```output + { + "functionKeys": { + "default": "sQO1LQaeK9N1qYD6SXeb/TctCmwQEkToLJU6Dw8TthNeUH8VA45hlA==" + }, + "masterKey": "RSKOAIlyvvQEQt9dfpabJT018scaLpQu9p1poHIMCxx5LYrIQZyQ/g==", + "systemKeys": {} + } + ``` + + This key will need to be added as a query parameter to the URL, so the final URL will be `https://.azurewebsites.net/api/text-to-timer?code=`, where `` will be the name of your functions app, and `` will be your default function key. + + > 💁 You can change the type of authorization of the HTTP trigger using `authlevel` setting in the `function.json` file. You can read more about this in the [configuration section of the Azure Functions HTTP trigger documentation on Microsoft docs](https://docs.microsoft.com/azure/azure-functions/functions-bindings-http-webhook-trigger?tabs=python&WT.mc_id=academic-17441-jabenn#configuration). - * Publish the Functions app - follow the instructions in earlier lessons to publish your functions app to the cloud. Once published, the URL will be `http://.azurewebsites.net/api/text-to-timer`, where `` will be the name of your functions app. * Run the functions app locally, and access using the IP address - you can get the IP address of your computer on your local network, and use that to build the URL. Find your IP address: @@ -504,11 +530,13 @@ Rather than calling LUIS from the IoT device, you can use serverless code with a * On macOS, follow the [how to find you IP address on a Mac guide](https://www.hellotech.com/guide/for/how-to-find-ip-address-on-mac) * On linux, follow the section on finding your private IP address in the [how to find your IP address in Linux guide](https://opensource.com/article/18/5/how-find-ip-address-linux) - Once you have your IP address, you will able to access the function at `http://:7071/api/text-to-timer`, where `` will be your IP address, for example `http://192.168.1.10:7071/api/text-to-timer`. + Once you have your IP address, you will able to access the function at `http://:7071/api/text-to-timer`, where `` will be your IP address, for example `http://192.168.1.10:7071/api/text-to-timer`. + + > 💁 Not that this uses port 7071, so after the IP address you will need to have `:7071`. - > 💁 This will only work if your IoT device is on the same network as your computer. + > 💁 This will only work if your IoT device is on the same network as your computer. -1. Test the endpoint by accessing it using your browser. +1. Test the endpoint by accessing it using curl. --- diff --git a/6-consumer/lessons/3-spoken-feedback/code-spoken-response/functions/smart-timer-trigger/get-voices/__init__.py b/6-consumer/lessons/3-spoken-feedback/code-spoken-response/functions/smart-timer-trigger/get-voices/__init__.py new file mode 100644 index 00000000..44c81900 --- /dev/null +++ b/6-consumer/lessons/3-spoken-feedback/code-spoken-response/functions/smart-timer-trigger/get-voices/__init__.py @@ -0,0 +1,26 @@ +import json +import os +import requests + +import azure.functions as func + +def main(req: func.HttpRequest) -> func.HttpResponse: + location = os.environ['SPEECH_LOCATION'] + speech_key = os.environ['SPEECH_KEY'] + + req_body = req.get_json() + language = req_body['language'] + + url = f'https://{location}.tts.speech.microsoft.com/cognitiveservices/voices/list' + + headers = { + 'Ocp-Apim-Subscription-Key': speech_key + } + + response = requests.get(url, headers=headers) + voices_json = json.loads(response.text) + + voices = filter(lambda x: x['Locale'].lower() == language.lower(), voices_json) + voices = map(lambda x: x['ShortName'], voices) + + return func.HttpResponse(json.dumps(list(voices)), status_code=200) diff --git a/6-consumer/lessons/3-spoken-feedback/code-spoken-response/functions/smart-timer-trigger/get-voices/function.json b/6-consumer/lessons/3-spoken-feedback/code-spoken-response/functions/smart-timer-trigger/get-voices/function.json new file mode 100644 index 00000000..d9019652 --- /dev/null +++ b/6-consumer/lessons/3-spoken-feedback/code-spoken-response/functions/smart-timer-trigger/get-voices/function.json @@ -0,0 +1,20 @@ +{ + "scriptFile": "__init__.py", + "bindings": [ + { + "authLevel": "function", + "type": "httpTrigger", + "direction": "in", + "name": "req", + "methods": [ + "get", + "post" + ] + }, + { + "type": "http", + "direction": "out", + "name": "$return" + } + ] +} \ No newline at end of file diff --git a/6-consumer/lessons/3-spoken-feedback/code-spoken-response/functions/smart-timer-trigger/host.json b/6-consumer/lessons/3-spoken-feedback/code-spoken-response/functions/smart-timer-trigger/host.json new file mode 100644 index 00000000..291065f8 --- /dev/null +++ b/6-consumer/lessons/3-spoken-feedback/code-spoken-response/functions/smart-timer-trigger/host.json @@ -0,0 +1,15 @@ +{ + "version": "2.0", + "logging": { + "applicationInsights": { + "samplingSettings": { + "isEnabled": true, + "excludedTypes": "Request" + } + } + }, + "extensionBundle": { + "id": "Microsoft.Azure.Functions.ExtensionBundle", + "version": "[2.*, 3.0.0)" + } +} \ No newline at end of file diff --git a/6-consumer/lessons/3-spoken-feedback/code-spoken-response/functions/smart-timer-trigger/local.settings.json b/6-consumer/lessons/3-spoken-feedback/code-spoken-response/functions/smart-timer-trigger/local.settings.json new file mode 100644 index 00000000..afc05864 --- /dev/null +++ b/6-consumer/lessons/3-spoken-feedback/code-spoken-response/functions/smart-timer-trigger/local.settings.json @@ -0,0 +1,12 @@ +{ + "IsEncrypted": false, + "Values": { + "FUNCTIONS_WORKER_RUNTIME": "python", + "AzureWebJobsStorage": "", + "LUIS_KEY": "", + "LUIS_ENDPOINT_URL": "", + "LUIS_APP_ID": "", + "SPEECH_KEY": "", + "SPEECH_LOCATION": "" + } +} \ No newline at end of file diff --git a/6-consumer/lessons/3-spoken-feedback/code-spoken-response/functions/smart-timer-trigger/requirements.txt b/6-consumer/lessons/3-spoken-feedback/code-spoken-response/functions/smart-timer-trigger/requirements.txt new file mode 100644 index 00000000..a2596be3 --- /dev/null +++ b/6-consumer/lessons/3-spoken-feedback/code-spoken-response/functions/smart-timer-trigger/requirements.txt @@ -0,0 +1,5 @@ +# Do not include azure-functions-worker as it may conflict with the Azure Functions platform + +azure-functions +azure-cognitiveservices-language-luis +librosa \ No newline at end of file diff --git a/6-consumer/lessons/3-spoken-feedback/code-spoken-response/functions/smart-timer-trigger/text-to-speech/__init__.py b/6-consumer/lessons/3-spoken-feedback/code-spoken-response/functions/smart-timer-trigger/text-to-speech/__init__.py new file mode 100644 index 00000000..f09404f3 --- /dev/null +++ b/6-consumer/lessons/3-spoken-feedback/code-spoken-response/functions/smart-timer-trigger/text-to-speech/__init__.py @@ -0,0 +1,52 @@ +import io +import os +import requests + +import librosa +import soundfile as sf +import azure.functions as func + +location = os.environ['SPEECH_LOCATION'] +speech_key = os.environ['SPEECH_KEY'] + +def get_access_token(): + headers = { + 'Ocp-Apim-Subscription-Key': speech_key + } + + token_endpoint = f'https://{location}.api.cognitive.microsoft.com/sts/v1.0/issuetoken' + response = requests.post(token_endpoint, headers=headers) + return str(response.text) + +playback_format = 'riff-48khz-16bit-mono-pcm' + +def main(req: func.HttpRequest) -> func.HttpResponse: + req_body = req.get_json() + language = req_body['language'] + voice = req_body['voice'] + text = req_body['text'] + + url = f'https://{location}.tts.speech.microsoft.com/cognitiveservices/v1' + + headers = { + 'Authorization': 'Bearer ' + get_access_token(), + 'Content-Type': 'application/ssml+xml', + 'X-Microsoft-OutputFormat': playback_format + } + + ssml = f'' + ssml += f'' + ssml += text + ssml += '' + ssml += '' + + response = requests.post(url, headers=headers, data=ssml.encode('utf-8')) + + raw_audio, sample_rate = librosa.load(io.BytesIO(response.content), sr=48000) + resampled = librosa.resample(raw_audio, sample_rate, 44100) + + output_buffer = io.BytesIO() + sf.write(output_buffer, resampled, 44100, 'PCM_16', format='wav') + output_buffer.seek(0) + + return func.HttpResponse(output_buffer.read(), status_code=200) diff --git a/6-consumer/lessons/3-spoken-feedback/code-spoken-response/functions/smart-timer-trigger/text-to-speech/function.json b/6-consumer/lessons/3-spoken-feedback/code-spoken-response/functions/smart-timer-trigger/text-to-speech/function.json new file mode 100644 index 00000000..d9019652 --- /dev/null +++ b/6-consumer/lessons/3-spoken-feedback/code-spoken-response/functions/smart-timer-trigger/text-to-speech/function.json @@ -0,0 +1,20 @@ +{ + "scriptFile": "__init__.py", + "bindings": [ + { + "authLevel": "function", + "type": "httpTrigger", + "direction": "in", + "name": "req", + "methods": [ + "get", + "post" + ] + }, + { + "type": "http", + "direction": "out", + "name": "$return" + } + ] +} \ No newline at end of file diff --git a/6-consumer/lessons/3-spoken-feedback/code-spoken-response/functions/smart-timer-trigger/text-to-timer/__init__.py b/6-consumer/lessons/3-spoken-feedback/code-spoken-response/functions/smart-timer-trigger/text-to-timer/__init__.py new file mode 100644 index 00000000..d15d6e68 --- /dev/null +++ b/6-consumer/lessons/3-spoken-feedback/code-spoken-response/functions/smart-timer-trigger/text-to-timer/__init__.py @@ -0,0 +1,46 @@ +import logging + +import azure.functions as func +import json +import os +from azure.cognitiveservices.language.luis.runtime import LUISRuntimeClient +from msrest.authentication import CognitiveServicesCredentials + + +def main(req: func.HttpRequest) -> func.HttpResponse: + luis_key = os.environ['LUIS_KEY'] + endpoint_url = os.environ['LUIS_ENDPOINT_URL'] + app_id = os.environ['LUIS_APP_ID'] + + credentials = CognitiveServicesCredentials(luis_key) + client = LUISRuntimeClient(endpoint=endpoint_url, credentials=credentials) + + req_body = req.get_json() + text = req_body['text'] + logging.info(f'Request - {text}') + prediction_request = { 'query' : text } + + prediction_response = client.prediction.get_slot_prediction(app_id, 'Staging', prediction_request) + + if prediction_response.prediction.top_intent == 'set timer': + numbers = prediction_response.prediction.entities['number'] + time_units = prediction_response.prediction.entities['time unit'] + total_seconds = 0 + + for i in range(0, len(numbers)): + number = numbers[i] + time_unit = time_units[i][0] + + if time_unit == 'minute': + total_seconds += number * 60 + else: + total_seconds += number + + logging.info(f'Timer required for {total_seconds} seconds') + + payload = { + 'seconds': total_seconds + } + return func.HttpResponse(json.dumps(payload), status_code=200) + + return func.HttpResponse(status_code=404) \ No newline at end of file diff --git a/6-consumer/lessons/3-spoken-feedback/code-spoken-response/functions/smart-timer-trigger/text-to-timer/function.json b/6-consumer/lessons/3-spoken-feedback/code-spoken-response/functions/smart-timer-trigger/text-to-timer/function.json new file mode 100644 index 00000000..d9019652 --- /dev/null +++ b/6-consumer/lessons/3-spoken-feedback/code-spoken-response/functions/smart-timer-trigger/text-to-timer/function.json @@ -0,0 +1,20 @@ +{ + "scriptFile": "__init__.py", + "bindings": [ + { + "authLevel": "function", + "type": "httpTrigger", + "direction": "in", + "name": "req", + "methods": [ + "get", + "post" + ] + }, + { + "type": "http", + "direction": "out", + "name": "$return" + } + ] +} \ No newline at end of file diff --git a/6-consumer/lessons/3-spoken-feedback/code-spoken-response/wio-terminal/smart-timer/include/README b/6-consumer/lessons/3-spoken-feedback/code-spoken-response/wio-terminal/smart-timer/include/README new file mode 100644 index 00000000..194dcd43 --- /dev/null +++ b/6-consumer/lessons/3-spoken-feedback/code-spoken-response/wio-terminal/smart-timer/include/README @@ -0,0 +1,39 @@ + +This directory is intended for project header files. + +A header file is a file containing C declarations and macro definitions +to be shared between several project source files. You request the use of a +header file in your project source file (C, C++, etc) located in `src` folder +by including it, with the C preprocessing directive `#include'. + +```src/main.c + +#include "header.h" + +int main (void) +{ + ... +} +``` + +Including a header file produces the same results as copying the header file +into each source file that needs it. Such copying would be time-consuming +and error-prone. With a header file, the related declarations appear +in only one place. If they need to be changed, they can be changed in one +place, and programs that include the header file will automatically use the +new version when next recompiled. The header file eliminates the labor of +finding and changing all the copies as well as the risk that a failure to +find one copy will result in inconsistencies within a program. + +In C, the usual convention is to give header files names that end with `.h'. +It is most portable to use only letters, digits, dashes, and underscores in +header file names, and at most one dot. + +Read more about using header files in official GCC documentation: + +* Include Syntax +* Include Operation +* Once-Only Headers +* Computed Includes + +https://gcc.gnu.org/onlinedocs/cpp/Header-Files.html diff --git a/6-consumer/lessons/3-spoken-feedback/code-spoken-response/wio-terminal/smart-timer/lib/README b/6-consumer/lessons/3-spoken-feedback/code-spoken-response/wio-terminal/smart-timer/lib/README new file mode 100644 index 00000000..6debab1e --- /dev/null +++ b/6-consumer/lessons/3-spoken-feedback/code-spoken-response/wio-terminal/smart-timer/lib/README @@ -0,0 +1,46 @@ + +This directory is intended for project specific (private) libraries. +PlatformIO will compile them to static libraries and link into executable file. + +The source code of each library should be placed in a an own separate directory +("lib/your_library_name/[here are source files]"). + +For example, see a structure of the following two libraries `Foo` and `Bar`: + +|--lib +| | +| |--Bar +| | |--docs +| | |--examples +| | |--src +| | |- Bar.c +| | |- Bar.h +| | |- library.json (optional, custom build options, etc) https://docs.platformio.org/page/librarymanager/config.html +| | +| |--Foo +| | |- Foo.c +| | |- Foo.h +| | +| |- README --> THIS FILE +| +|- platformio.ini +|--src + |- main.c + +and a contents of `src/main.c`: +``` +#include +#include + +int main (void) +{ + ... +} + +``` + +PlatformIO Library Dependency Finder will find automatically dependent +libraries scanning project source files. + +More information about PlatformIO Library Dependency Finder +- https://docs.platformio.org/page/librarymanager/ldf.html diff --git a/6-consumer/lessons/3-spoken-feedback/code-spoken-response/wio-terminal/smart-timer/platformio.ini b/6-consumer/lessons/3-spoken-feedback/code-spoken-response/wio-terminal/smart-timer/platformio.ini new file mode 100644 index 00000000..8836ab42 --- /dev/null +++ b/6-consumer/lessons/3-spoken-feedback/code-spoken-response/wio-terminal/smart-timer/platformio.ini @@ -0,0 +1,23 @@ +; PlatformIO Project Configuration File +; +; Build options: build flags, source filter +; Upload options: custom upload port, speed and extra flags +; Library options: dependencies, extra library storages +; Advanced options: extra scripting +; +; Please visit documentation for the other options and examples +; https://docs.platformio.org/page/projectconf.html + +[env:seeed_wio_terminal] +platform = atmelsam +board = seeed_wio_terminal +framework = arduino +lib_deps = + seeed-studio/Seeed Arduino FS @ 2.1.1 + seeed-studio/Seeed Arduino SFUD @ 2.0.2 + seeed-studio/Seeed Arduino rpcWiFi @ 1.0.5 + seeed-studio/Seeed Arduino rpcUnified @ 2.1.3 + seeed-studio/Seeed_Arduino_mbedtls @ 3.0.1 + seeed-studio/Seeed Arduino RTC @ 2.0.0 + bblanchon/ArduinoJson @ 6.17.3 + contrem/arduino-timer @ 2.3.0 diff --git a/6-consumer/lessons/3-spoken-feedback/code-spoken-response/wio-terminal/smart-timer/src/config.h b/6-consumer/lessons/3-spoken-feedback/code-spoken-response/wio-terminal/smart-timer/src/config.h new file mode 100644 index 00000000..f2e912c4 --- /dev/null +++ b/6-consumer/lessons/3-spoken-feedback/code-spoken-response/wio-terminal/smart-timer/src/config.h @@ -0,0 +1,93 @@ +#pragma once + +#define RATE 16000 +#define SAMPLE_LENGTH_SECONDS 4 +#define SAMPLES RATE * SAMPLE_LENGTH_SECONDS +#define BUFFER_SIZE (SAMPLES * 2) + 44 +#define ADC_BUF_LEN 1600 + +const char *SSID = ""; +const char *PASSWORD = ""; + +const char *SPEECH_API_KEY = ""; +const char *SPEECH_LOCATION = ""; +const char *LANGUAGE = ""; + +const char *TOKEN_URL = "https://%s.api.cognitive.microsoft.com/sts/v1.0/issuetoken"; +const char *SPEECH_URL = "https://%s.stt.speech.microsoft.com/speech/recognition/conversation/cognitiveservices/v1?language=%s"; + +const char *TEXT_TO_TIMER_FUNCTION_URL = "http://:7071/api/text-to-timer"; +const char *GET_VOICES_FUNCTION_URL = "http://:7071/api/get-voices"; +const char *TEXT_TO_SPEECH_FUNCTION_URL = "http://:7071/api/text-to-speech"; + +const char *TOKEN_CERTIFICATE = + "-----BEGIN CERTIFICATE-----\r\n" + "MIIF8zCCBNugAwIBAgIQAueRcfuAIek/4tmDg0xQwDANBgkqhkiG9w0BAQwFADBh\r\n" + "MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3\r\n" + "d3cuZGlnaWNlcnQuY29tMSAwHgYDVQQDExdEaWdpQ2VydCBHbG9iYWwgUm9vdCBH\r\n" + "MjAeFw0yMDA3MjkxMjMwMDBaFw0yNDA2MjcyMzU5NTlaMFkxCzAJBgNVBAYTAlVT\r\n" + "MR4wHAYDVQQKExVNaWNyb3NvZnQgQ29ycG9yYXRpb24xKjAoBgNVBAMTIU1pY3Jv\r\n" + "c29mdCBBenVyZSBUTFMgSXNzdWluZyBDQSAwNjCCAiIwDQYJKoZIhvcNAQEBBQAD\r\n" + "ggIPADCCAgoCggIBALVGARl56bx3KBUSGuPc4H5uoNFkFH4e7pvTCxRi4j/+z+Xb\r\n" + "wjEz+5CipDOqjx9/jWjskL5dk7PaQkzItidsAAnDCW1leZBOIi68Lff1bjTeZgMY\r\n" + "iwdRd3Y39b/lcGpiuP2d23W95YHkMMT8IlWosYIX0f4kYb62rphyfnAjYb/4Od99\r\n" + "ThnhlAxGtfvSbXcBVIKCYfZgqRvV+5lReUnd1aNjRYVzPOoifgSx2fRyy1+pO1Uz\r\n" + "aMMNnIOE71bVYW0A1hr19w7kOb0KkJXoALTDDj1ukUEDqQuBfBxReL5mXiu1O7WG\r\n" + "0vltg0VZ/SZzctBsdBlx1BkmWYBW261KZgBivrql5ELTKKd8qgtHcLQA5fl6JB0Q\r\n" + "gs5XDaWehN86Gps5JW8ArjGtjcWAIP+X8CQaWfaCnuRm6Bk/03PQWhgdi84qwA0s\r\n" + "sRfFJwHUPTNSnE8EiGVk2frt0u8PG1pwSQsFuNJfcYIHEv1vOzP7uEOuDydsmCjh\r\n" + "lxuoK2n5/2aVR3BMTu+p4+gl8alXoBycyLmj3J/PUgqD8SL5fTCUegGsdia/Sa60\r\n" + "N2oV7vQ17wjMN+LXa2rjj/b4ZlZgXVojDmAjDwIRdDUujQu0RVsJqFLMzSIHpp2C\r\n" + "Zp7mIoLrySay2YYBu7SiNwL95X6He2kS8eefBBHjzwW/9FxGqry57i71c2cDAgMB\r\n" + "AAGjggGtMIIBqTAdBgNVHQ4EFgQU1cFnOsKjnfR3UltZEjgp5lVou6UwHwYDVR0j\r\n" + "BBgwFoAUTiJUIBiV5uNu5g/6+rkS7QYXjzkwDgYDVR0PAQH/BAQDAgGGMB0GA1Ud\r\n" + "JQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjASBgNVHRMBAf8ECDAGAQH/AgEAMHYG\r\n" + "CCsGAQUFBwEBBGowaDAkBggrBgEFBQcwAYYYaHR0cDovL29jc3AuZGlnaWNlcnQu\r\n" + "Y29tMEAGCCsGAQUFBzAChjRodHRwOi8vY2FjZXJ0cy5kaWdpY2VydC5jb20vRGln\r\n" + "aUNlcnRHbG9iYWxSb290RzIuY3J0MHsGA1UdHwR0MHIwN6A1oDOGMWh0dHA6Ly9j\r\n" + "cmwzLmRpZ2ljZXJ0LmNvbS9EaWdpQ2VydEdsb2JhbFJvb3RHMi5jcmwwN6A1oDOG\r\n" + "MWh0dHA6Ly9jcmw0LmRpZ2ljZXJ0LmNvbS9EaWdpQ2VydEdsb2JhbFJvb3RHMi5j\r\n" + "cmwwHQYDVR0gBBYwFDAIBgZngQwBAgEwCAYGZ4EMAQICMBAGCSsGAQQBgjcVAQQD\r\n" + "AgEAMA0GCSqGSIb3DQEBDAUAA4IBAQB2oWc93fB8esci/8esixj++N22meiGDjgF\r\n" + "+rA2LUK5IOQOgcUSTGKSqF9lYfAxPjrqPjDCUPHCURv+26ad5P/BYtXtbmtxJWu+\r\n" + "cS5BhMDPPeG3oPZwXRHBJFAkY4O4AF7RIAAUW6EzDflUoDHKv83zOiPfYGcpHc9s\r\n" + "kxAInCedk7QSgXvMARjjOqdakor21DTmNIUotxo8kHv5hwRlGhBJwps6fEVi1Bt0\r\n" + "trpM/3wYxlr473WSPUFZPgP1j519kLpWOJ8z09wxay+Br29irPcBYv0GMXlHqThy\r\n" + "8y4m/HyTQeI2IMvMrQnwqPpY+rLIXyviI2vLoI+4xKE4Rn38ZZ8m\r\n" + "-----END CERTIFICATE-----\r\n"; + +const char *SPEECH_CERTIFICATE = + "-----BEGIN CERTIFICATE-----\r\n" + "MIIF8zCCBNugAwIBAgIQCq+mxcpjxFFB6jvh98dTFzANBgkqhkiG9w0BAQwFADBh\r\n" + "MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3\r\n" + "d3cuZGlnaWNlcnQuY29tMSAwHgYDVQQDExdEaWdpQ2VydCBHbG9iYWwgUm9vdCBH\r\n" + "MjAeFw0yMDA3MjkxMjMwMDBaFw0yNDA2MjcyMzU5NTlaMFkxCzAJBgNVBAYTAlVT\r\n" + "MR4wHAYDVQQKExVNaWNyb3NvZnQgQ29ycG9yYXRpb24xKjAoBgNVBAMTIU1pY3Jv\r\n" + "c29mdCBBenVyZSBUTFMgSXNzdWluZyBDQSAwMTCCAiIwDQYJKoZIhvcNAQEBBQAD\r\n" + "ggIPADCCAgoCggIBAMedcDrkXufP7pxVm1FHLDNA9IjwHaMoaY8arqqZ4Gff4xyr\r\n" + "RygnavXL7g12MPAx8Q6Dd9hfBzrfWxkF0Br2wIvlvkzW01naNVSkHp+OS3hL3W6n\r\n" + "l/jYvZnVeJXjtsKYcXIf/6WtspcF5awlQ9LZJcjwaH7KoZuK+THpXCMtzD8XNVdm\r\n" + "GW/JI0C/7U/E7evXn9XDio8SYkGSM63aLO5BtLCv092+1d4GGBSQYolRq+7Pd1kR\r\n" + "EkWBPm0ywZ2Vb8GIS5DLrjelEkBnKCyy3B0yQud9dpVsiUeE7F5sY8Me96WVxQcb\r\n" + "OyYdEY/j/9UpDlOG+vA+YgOvBhkKEjiqygVpP8EZoMMijephzg43b5Qi9r5UrvYo\r\n" + "o19oR/8pf4HJNDPF0/FJwFVMW8PmCBLGstin3NE1+NeWTkGt0TzpHjgKyfaDP2tO\r\n" + "4bCk1G7pP2kDFT7SYfc8xbgCkFQ2UCEXsaH/f5YmpLn4YPiNFCeeIida7xnfTvc4\r\n" + "7IxyVccHHq1FzGygOqemrxEETKh8hvDR6eBdrBwmCHVgZrnAqnn93JtGyPLi6+cj\r\n" + "WGVGtMZHwzVvX1HvSFG771sskcEjJxiQNQDQRWHEh3NxvNb7kFlAXnVdRkkvhjpR\r\n" + "GchFhTAzqmwltdWhWDEyCMKC2x/mSZvZtlZGY+g37Y72qHzidwtyW7rBetZJAgMB\r\n" + "AAGjggGtMIIBqTAdBgNVHQ4EFgQUDyBd16FXlduSzyvQx8J3BM5ygHYwHwYDVR0j\r\n" + "BBgwFoAUTiJUIBiV5uNu5g/6+rkS7QYXjzkwDgYDVR0PAQH/BAQDAgGGMB0GA1Ud\r\n" + "JQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjASBgNVHRMBAf8ECDAGAQH/AgEAMHYG\r\n" + "CCsGAQUFBwEBBGowaDAkBggrBgEFBQcwAYYYaHR0cDovL29jc3AuZGlnaWNlcnQu\r\n" + "Y29tMEAGCCsGAQUFBzAChjRodHRwOi8vY2FjZXJ0cy5kaWdpY2VydC5jb20vRGln\r\n" + "aUNlcnRHbG9iYWxSb290RzIuY3J0MHsGA1UdHwR0MHIwN6A1oDOGMWh0dHA6Ly9j\r\n" + "cmwzLmRpZ2ljZXJ0LmNvbS9EaWdpQ2VydEdsb2JhbFJvb3RHMi5jcmwwN6A1oDOG\r\n" + "MWh0dHA6Ly9jcmw0LmRpZ2ljZXJ0LmNvbS9EaWdpQ2VydEdsb2JhbFJvb3RHMi5j\r\n" + "cmwwHQYDVR0gBBYwFDAIBgZngQwBAgEwCAYGZ4EMAQICMBAGCSsGAQQBgjcVAQQD\r\n" + "AgEAMA0GCSqGSIb3DQEBDAUAA4IBAQAlFvNh7QgXVLAZSsNR2XRmIn9iS8OHFCBA\r\n" + "WxKJoi8YYQafpMTkMqeuzoL3HWb1pYEipsDkhiMnrpfeYZEA7Lz7yqEEtfgHcEBs\r\n" + "K9KcStQGGZRfmWU07hPXHnFz+5gTXqzCE2PBMlRgVUYJiA25mJPXfB00gDvGhtYa\r\n" + "+mENwM9Bq1B9YYLyLjRtUz8cyGsdyTIG/bBM/Q9jcV8JGqMU/UjAdh1pFyTnnHEl\r\n" + "Y59Npi7F87ZqYYJEHJM2LGD+le8VsHjgeWX2CJQko7klXvcizuZvUEDTjHaQcs2J\r\n" + "+kPgfyMIOY1DMJ21NxOJ2xPRC/wAh/hzSBRVtoAnyuxtkZ4VjIOh\r\n" + "-----END CERTIFICATE-----\r\n"; diff --git a/6-consumer/lessons/3-spoken-feedback/code-spoken-response/wio-terminal/smart-timer/src/flash_stream.h b/6-consumer/lessons/3-spoken-feedback/code-spoken-response/wio-terminal/smart-timer/src/flash_stream.h new file mode 100644 index 00000000..b841f1d0 --- /dev/null +++ b/6-consumer/lessons/3-spoken-feedback/code-spoken-response/wio-terminal/smart-timer/src/flash_stream.h @@ -0,0 +1,69 @@ +#pragma once + +#include +#include +#include + +#include "config.h" + +class FlashStream : public Stream +{ +public: + FlashStream() + { + _pos = 0; + _flash_address = 0; + _flash = sfud_get_device_table() + 0; + + populateBuffer(); + } + + virtual size_t write(uint8_t val) + { + return 0; + } + + virtual int available() + { + int remaining = BUFFER_SIZE - ((_flash_address - HTTP_TCP_BUFFER_SIZE) + _pos); + int bytes_available = min(HTTP_TCP_BUFFER_SIZE, remaining); + + if (bytes_available == 0) + { + bytes_available = -1; + } + + return bytes_available; + } + + virtual int read() + { + int retVal = _buffer[_pos++]; + + if (_pos == HTTP_TCP_BUFFER_SIZE) + { + populateBuffer(); + } + + return retVal; + } + + virtual int peek() + { + return _buffer[_pos]; + } + +private: + void populateBuffer() + { + sfud_read(_flash, _flash_address, HTTP_TCP_BUFFER_SIZE, _buffer); + _flash_address += HTTP_TCP_BUFFER_SIZE; + _pos = 0; + } + + size_t _pos; + size_t _flash_address; + const sfud_flash *_flash; + + byte _buffer[HTTP_TCP_BUFFER_SIZE]; +}; diff --git a/6-consumer/lessons/3-spoken-feedback/code-spoken-response/wio-terminal/smart-timer/src/flash_writer.h b/6-consumer/lessons/3-spoken-feedback/code-spoken-response/wio-terminal/smart-timer/src/flash_writer.h new file mode 100644 index 00000000..87fdff29 --- /dev/null +++ b/6-consumer/lessons/3-spoken-feedback/code-spoken-response/wio-terminal/smart-timer/src/flash_writer.h @@ -0,0 +1,60 @@ +#pragma once + +#include +#include + +class FlashWriter +{ +public: + void init() + { + _flash = sfud_get_device_table() + 0; + _sfudBufferSize = _flash->chip.erase_gran; + _sfudBuffer = new byte[_sfudBufferSize]; + _sfudBufferPos = 0; + _sfudBufferWritePos = 0; + } + + void reset() + { + _sfudBufferPos = 0; + _sfudBufferWritePos = 0; + } + + void writeSfudBuffer(byte b) + { + _sfudBuffer[_sfudBufferPos++] = b; + if (_sfudBufferPos == _sfudBufferSize) + { + sfud_erase_write(_flash, _sfudBufferWritePos, _sfudBufferSize, _sfudBuffer); + _sfudBufferWritePos += _sfudBufferSize; + _sfudBufferPos = 0; + } + } + + void flushSfudBuffer() + { + if (_sfudBufferPos > 0) + { + sfud_erase_write(_flash, _sfudBufferWritePos, _sfudBufferSize, _sfudBuffer); + _sfudBufferWritePos += _sfudBufferSize; + _sfudBufferPos = 0; + } + } + + void writeSfudBuffer(byte *b, size_t len) + { + for (size_t i = 0; i < len; ++i) + { + writeSfudBuffer(b[i]); + } + } + +private: + byte *_sfudBuffer; + size_t _sfudBufferSize; + size_t _sfudBufferPos; + size_t _sfudBufferWritePos; + + const sfud_flash *_flash; +}; \ No newline at end of file diff --git a/6-consumer/lessons/3-spoken-feedback/code-spoken-response/wio-terminal/smart-timer/src/language_understanding.h b/6-consumer/lessons/3-spoken-feedback/code-spoken-response/wio-terminal/smart-timer/src/language_understanding.h new file mode 100644 index 00000000..1c8d8653 --- /dev/null +++ b/6-consumer/lessons/3-spoken-feedback/code-spoken-response/wio-terminal/smart-timer/src/language_understanding.h @@ -0,0 +1,53 @@ +#pragma once + +#include +#include +#include +#include + +#include "config.h" + +class LanguageUnderstanding +{ +public: + int GetTimerDuration(String text) + { + DynamicJsonDocument doc(1024); + doc["text"] = text; + + String body; + serializeJson(doc, body); + + HTTPClient httpClient; + httpClient.begin(_client, TEXT_TO_TIMER_FUNCTION_URL); + + int httpResponseCode = httpClient.POST(body); + + int seconds = 0; + if (httpResponseCode == 200) + { + String result = httpClient.getString(); + Serial.println(result); + + DynamicJsonDocument doc(1024); + deserializeJson(doc, result.c_str()); + + JsonObject obj = doc.as(); + seconds = obj["seconds"].as(); + } + else + { + Serial.print("Failed to understand text - error "); + Serial.println(httpResponseCode); + } + + httpClient.end(); + + return seconds; + } + +private: + WiFiClient _client; +}; + +LanguageUnderstanding languageUnderstanding; \ No newline at end of file diff --git a/6-consumer/lessons/3-spoken-feedback/code-spoken-response/wio-terminal/smart-timer/src/main.cpp b/6-consumer/lessons/3-spoken-feedback/code-spoken-response/wio-terminal/smart-timer/src/main.cpp new file mode 100644 index 00000000..a075f6cb --- /dev/null +++ b/6-consumer/lessons/3-spoken-feedback/code-spoken-response/wio-terminal/smart-timer/src/main.cpp @@ -0,0 +1,130 @@ +#include +#include +#include +#include +#include + +#include "config.h" +#include "language_understanding.h" +#include "mic.h" +#include "speech_to_text.h" +#include "text_to_speech.h" + +void connectWiFi() +{ + while (WiFi.status() != WL_CONNECTED) + { + Serial.println("Connecting to WiFi.."); + WiFi.begin(SSID, PASSWORD); + delay(500); + } + + Serial.println("Connected!"); +} + +void setup() +{ + Serial.begin(9600); + + while (!Serial) + ; // Wait for Serial to be ready + + delay(1000); + + connectWiFi(); + + while (!(sfud_init() == SFUD_SUCCESS)) + ; + + sfud_qspi_fast_read_enable(sfud_get_device(SFUD_W25Q32_DEVICE_INDEX), 2); + + pinMode(WIO_KEY_C, INPUT_PULLUP); + + mic.init(); + + speechToText.init(); + textToSpeech.init(); + + Serial.println("Ready."); +} + +auto timer = timer_create_default(); + +void say(String text) +{ + Serial.println(text); + textToSpeech.convertTextToSpeech(text); +} + +bool timerExpired(void *announcement) +{ + say((char *)announcement); + return false; +} + +void processAudio() +{ + String text = speechToText.convertSpeechToText(); + Serial.println(text); + + int total_seconds = languageUnderstanding.GetTimerDuration(text); + if (total_seconds == 0) + { + return; + } + + int minutes = total_seconds / 60; + int seconds = total_seconds % 60; + + String begin_message; + if (minutes > 0) + { + begin_message += minutes; + begin_message += " minute "; + } + if (seconds > 0) + { + begin_message += seconds; + begin_message += " second "; + } + + begin_message += "timer started."; + + String end_message("Times up on your "); + if (minutes > 0) + { + end_message += minutes; + end_message += " minute "; + } + if (seconds > 0) + { + end_message += seconds; + end_message += " second "; + } + + end_message += "timer."; + + say(begin_message); + + timer.in(total_seconds * 1000, timerExpired, (void *)(end_message.c_str())); +} + +void loop() +{ + if (digitalRead(WIO_KEY_C) == LOW && !mic.isRecording()) + { + Serial.println("Starting recording..."); + mic.startRecording(); + } + + if (!mic.isRecording() && mic.isRecordingReady()) + { + Serial.println("Finished recording"); + + processAudio(); + + mic.reset(); + } + + timer.tick(); +} diff --git a/6-consumer/lessons/3-spoken-feedback/code-spoken-response/wio-terminal/smart-timer/src/mic.h b/6-consumer/lessons/3-spoken-feedback/code-spoken-response/wio-terminal/smart-timer/src/mic.h new file mode 100644 index 00000000..5f0815de --- /dev/null +++ b/6-consumer/lessons/3-spoken-feedback/code-spoken-response/wio-terminal/smart-timer/src/mic.h @@ -0,0 +1,242 @@ +#pragma once + +#include + +#include "config.h" +#include "flash_writer.h" + +class Mic +{ +public: + Mic() + { + _isRecording = false; + _isRecordingReady = false; + } + + void startRecording() + { + _isRecording = true; + _isRecordingReady = false; + } + + bool isRecording() + { + return _isRecording; + } + + bool isRecordingReady() + { + return _isRecordingReady; + } + + void init() + { + analogReference(AR_INTERNAL2V23); + + _writer.init(); + + initBufferHeader(); + configureDmaAdc(); + } + + void reset() + { + _isRecordingReady = false; + _isRecording = false; + + _writer.reset(); + + initBufferHeader(); + } + + void dmaHandler() + { + static uint8_t count = 0; + + if (DMAC->Channel[1].CHINTFLAG.bit.SUSP) + { + DMAC->Channel[1].CHCTRLB.reg = DMAC_CHCTRLB_CMD_RESUME; + DMAC->Channel[1].CHINTFLAG.bit.SUSP = 1; + + if (count) + { + audioCallback(_adc_buf_0, ADC_BUF_LEN); + } + else + { + audioCallback(_adc_buf_1, ADC_BUF_LEN); + } + + count = (count + 1) % 2; + } + } + +private: + volatile bool _isRecording; + volatile bool _isRecordingReady; + FlashWriter _writer; + +typedef struct + { + uint16_t btctrl; + uint16_t btcnt; + uint32_t srcaddr; + uint32_t dstaddr; + uint32_t descaddr; + } dmacdescriptor; + + // Globals - DMA and ADC + volatile dmacdescriptor _wrb[DMAC_CH_NUM] __attribute__((aligned(16))); + dmacdescriptor _descriptor_section[DMAC_CH_NUM] __attribute__((aligned(16))); + dmacdescriptor _descriptor __attribute__((aligned(16))); + + void configureDmaAdc() + { + // Configure DMA to sample from ADC at a regular interval (triggered by timer/counter) + DMAC->BASEADDR.reg = (uint32_t)_descriptor_section; // Specify the location of the descriptors + DMAC->WRBADDR.reg = (uint32_t)_wrb; // Specify the location of the write back descriptors + DMAC->CTRL.reg = DMAC_CTRL_DMAENABLE | DMAC_CTRL_LVLEN(0xf); // Enable the DMAC peripheral + DMAC->Channel[1].CHCTRLA.reg = DMAC_CHCTRLA_TRIGSRC(TC5_DMAC_ID_OVF) | // Set DMAC to trigger on TC5 timer overflow + DMAC_CHCTRLA_TRIGACT_BURST; // DMAC burst transfer + + _descriptor.descaddr = (uint32_t)&_descriptor_section[1]; // Set up a circular descriptor + _descriptor.srcaddr = (uint32_t)&ADC1->RESULT.reg; // Take the result from the ADC0 RESULT register + _descriptor.dstaddr = (uint32_t)_adc_buf_0 + sizeof(uint16_t) * ADC_BUF_LEN; // Place it in the adc_buf_0 array + _descriptor.btcnt = ADC_BUF_LEN; // Beat count + _descriptor.btctrl = DMAC_BTCTRL_BEATSIZE_HWORD | // Beat size is HWORD (16-bits) + DMAC_BTCTRL_DSTINC | // Increment the destination address + DMAC_BTCTRL_VALID | // Descriptor is valid + DMAC_BTCTRL_BLOCKACT_SUSPEND; // Suspend DMAC channel 0 after block transfer + memcpy(&_descriptor_section[0], &_descriptor, sizeof(_descriptor)); // Copy the descriptor to the descriptor section + + _descriptor.descaddr = (uint32_t)&_descriptor_section[0]; // Set up a circular descriptor + _descriptor.srcaddr = (uint32_t)&ADC1->RESULT.reg; // Take the result from the ADC0 RESULT register + _descriptor.dstaddr = (uint32_t)_adc_buf_1 + sizeof(uint16_t) * ADC_BUF_LEN; // Place it in the adc_buf_1 array + _descriptor.btcnt = ADC_BUF_LEN; // Beat count + _descriptor.btctrl = DMAC_BTCTRL_BEATSIZE_HWORD | // Beat size is HWORD (16-bits) + DMAC_BTCTRL_DSTINC | // Increment the destination address + DMAC_BTCTRL_VALID | // Descriptor is valid + DMAC_BTCTRL_BLOCKACT_SUSPEND; // Suspend DMAC channel 0 after block transfer + memcpy(&_descriptor_section[1], &_descriptor, sizeof(_descriptor)); // Copy the descriptor to the descriptor section + + // Configure NVIC + NVIC_SetPriority(DMAC_1_IRQn, 0); // Set the Nested Vector Interrupt Controller (NVIC) priority for DMAC1 to 0 (highest) + NVIC_EnableIRQ(DMAC_1_IRQn); // Connect DMAC1 to Nested Vector Interrupt Controller (NVIC) + + // Activate the suspend (SUSP) interrupt on DMAC channel 1 + DMAC->Channel[1].CHINTENSET.reg = DMAC_CHINTENSET_SUSP; + + // Configure ADC + ADC1->INPUTCTRL.bit.MUXPOS = ADC_INPUTCTRL_MUXPOS_AIN12_Val; // Set the analog input to ADC0/AIN2 (PB08 - A4 on Metro M4) + while (ADC1->SYNCBUSY.bit.INPUTCTRL) + ; // Wait for synchronization + ADC1->SAMPCTRL.bit.SAMPLEN = 0x00; // Set max Sampling Time Length to half divided ADC clock pulse (2.66us) + while (ADC1->SYNCBUSY.bit.SAMPCTRL) + ; // Wait for synchronization + ADC1->CTRLA.reg = ADC_CTRLA_PRESCALER_DIV128; // Divide Clock ADC GCLK by 128 (48MHz/128 = 375kHz) + ADC1->CTRLB.reg = ADC_CTRLB_RESSEL_12BIT | // Set ADC resolution to 12 bits + ADC_CTRLB_FREERUN; // Set ADC to free run mode + while (ADC1->SYNCBUSY.bit.CTRLB) + ; // Wait for synchronization + ADC1->CTRLA.bit.ENABLE = 1; // Enable the ADC + while (ADC1->SYNCBUSY.bit.ENABLE) + ; // Wait for synchronization + ADC1->SWTRIG.bit.START = 1; // Initiate a software trigger to start an ADC conversion + while (ADC1->SYNCBUSY.bit.SWTRIG) + ; // Wait for synchronization + + // Enable DMA channel 1 + DMAC->Channel[1].CHCTRLA.bit.ENABLE = 1; + + // Configure Timer/Counter 5 + GCLK->PCHCTRL[TC5_GCLK_ID].reg = GCLK_PCHCTRL_CHEN | // Enable perhipheral channel for TC5 + GCLK_PCHCTRL_GEN_GCLK1; // Connect generic clock 0 at 48MHz + + TC5->COUNT16.WAVE.reg = TC_WAVE_WAVEGEN_MFRQ; // Set TC5 to Match Frequency (MFRQ) mode + TC5->COUNT16.CC[0].reg = 3000 - 1; // Set the trigger to 16 kHz: (4Mhz / 16000) - 1 + while (TC5->COUNT16.SYNCBUSY.bit.CC0) + ; // Wait for synchronization + + // Start Timer/Counter 5 + TC5->COUNT16.CTRLA.bit.ENABLE = 1; // Enable the TC5 timer + while (TC5->COUNT16.SYNCBUSY.bit.ENABLE) + ; // Wait for synchronization + } + + uint16_t _adc_buf_0[ADC_BUF_LEN]; + uint16_t _adc_buf_1[ADC_BUF_LEN]; + + // WAV files have a header. This struct defines that header + struct wavFileHeader + { + char riff[4]; /* "RIFF" */ + long flength; /* file length in bytes */ + char wave[4]; /* "WAVE" */ + char fmt[4]; /* "fmt " */ + long chunk_size; /* size of FMT chunk in bytes (usually 16) */ + short format_tag; /* 1=PCM, 257=Mu-Law, 258=A-Law, 259=ADPCM */ + short num_chans; /* 1=mono, 2=stereo */ + long srate; /* Sampling rate in samples per second */ + long bytes_per_sec; /* bytes per second = srate*bytes_per_samp */ + short bytes_per_samp; /* 2=16-bit mono, 4=16-bit stereo */ + short bits_per_samp; /* Number of bits per sample */ + char data[4]; /* "data" */ + long dlength; /* data length in bytes (filelength - 44) */ + }; + + void initBufferHeader() + { + wavFileHeader wavh; + + strncpy(wavh.riff, "RIFF", 4); + strncpy(wavh.wave, "WAVE", 4); + strncpy(wavh.fmt, "fmt ", 4); + strncpy(wavh.data, "data", 4); + + wavh.chunk_size = 16; + wavh.format_tag = 1; // PCM + wavh.num_chans = 1; // mono + wavh.srate = RATE; + wavh.bytes_per_sec = (RATE * 1 * 16 * 1) / 8; + wavh.bytes_per_samp = 2; + wavh.bits_per_samp = 16; + wavh.dlength = RATE * 2 * 1 * 16 / 2; + wavh.flength = wavh.dlength + 44; + + _writer.writeSfudBuffer((byte *)&wavh, 44); + } + + void audioCallback(uint16_t *buf, uint32_t buf_len) + { + static uint32_t idx = 44; + + if (_isRecording) + { + for (uint32_t i = 0; i < buf_len; i++) + { + int16_t audio_value = ((int16_t)buf[i] - 2048) * 16; + + _writer.writeSfudBuffer(audio_value & 0xFF); + _writer.writeSfudBuffer((audio_value >> 8) & 0xFF); + } + + idx += buf_len; + + if (idx >= BUFFER_SIZE) + { + _writer.flushSfudBuffer(); + idx = 44; + _isRecording = false; + _isRecordingReady = true; + } + } + } +}; + +Mic mic; + +void DMAC_1_Handler() +{ + mic.dmaHandler(); +} diff --git a/6-consumer/lessons/3-spoken-feedback/code-spoken-response/wio-terminal/smart-timer/src/speech_to_text.h b/6-consumer/lessons/3-spoken-feedback/code-spoken-response/wio-terminal/smart-timer/src/speech_to_text.h new file mode 100644 index 00000000..a7ce075f --- /dev/null +++ b/6-consumer/lessons/3-spoken-feedback/code-spoken-response/wio-terminal/smart-timer/src/speech_to_text.h @@ -0,0 +1,102 @@ +#pragma once + +#include +#include +#include +#include + +#include "config.h" +#include "flash_stream.h" + +class SpeechToText +{ +public: + void init() + { + _token_client.setCACert(TOKEN_CERTIFICATE); + _speech_client.setCACert(SPEECH_CERTIFICATE); + _access_token = getAccessToken(); + } + + String convertSpeechToText() + { + char url[128]; + sprintf(url, SPEECH_URL, SPEECH_LOCATION, LANGUAGE); + + HTTPClient httpClient; + httpClient.begin(_speech_client, url); + + httpClient.addHeader("Authorization", String("Bearer ") + _access_token); + httpClient.addHeader("Content-Type", String("audio/wav; codecs=audio/pcm; samplerate=") + String(RATE)); + httpClient.addHeader("Accept", "application/json;text/xml"); + + Serial.println("Sending speech..."); + + FlashStream stream; + int httpResponseCode = httpClient.sendRequest("POST", &stream, BUFFER_SIZE); + + Serial.println("Speech sent!"); + + String text = ""; + + if (httpResponseCode == 200) + { + String result = httpClient.getString(); + Serial.println(result); + + DynamicJsonDocument doc(1024); + deserializeJson(doc, result.c_str()); + + JsonObject obj = doc.as(); + text = obj["DisplayText"].as(); + } + else if (httpResponseCode == 401) + { + Serial.println("Access token expired, trying again with a new token"); + _access_token = getAccessToken(); + return convertSpeechToText(); + } + else + { + Serial.print("Failed to convert text to speech - error "); + Serial.println(httpResponseCode); + } + + httpClient.end(); + + return text; + } + +private: + String getAccessToken() + { + char url[128]; + sprintf(url, TOKEN_URL, SPEECH_LOCATION); + + HTTPClient httpClient; + httpClient.begin(_token_client, url); + + httpClient.addHeader("Ocp-Apim-Subscription-Key", SPEECH_API_KEY); + int httpResultCode = httpClient.POST("{}"); + + if (httpResultCode != 200) + { + Serial.println("Error getting access token, trying again..."); + delay(10000); + return getAccessToken(); + } + + Serial.println("Got access token."); + String result = httpClient.getString(); + + httpClient.end(); + + return result; + } + + WiFiClientSecure _token_client; + WiFiClientSecure _speech_client; + String _access_token; +}; + +SpeechToText speechToText; diff --git a/6-consumer/lessons/3-spoken-feedback/code-spoken-response/wio-terminal/smart-timer/src/text_to_speech.h b/6-consumer/lessons/3-spoken-feedback/code-spoken-response/wio-terminal/smart-timer/src/text_to_speech.h new file mode 100644 index 00000000..d7174fcd --- /dev/null +++ b/6-consumer/lessons/3-spoken-feedback/code-spoken-response/wio-terminal/smart-timer/src/text_to_speech.h @@ -0,0 +1,86 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +#include "config.h" + +class TextToSpeech +{ +public: + void init() + { + DynamicJsonDocument doc(1024); + doc["language"] = LANGUAGE; + + String body; + serializeJson(doc, body); + + HTTPClient httpClient; + httpClient.begin(_client, GET_VOICES_FUNCTION_URL); + + int httpResponseCode = httpClient.POST(body); + + if (httpResponseCode == 200) + { + String result = httpClient.getString(); + Serial.println(result); + + DynamicJsonDocument doc(1024); + deserializeJson(doc, result.c_str()); + + JsonArray obj = doc.as(); + _voice = obj[0].as(); + + Serial.print("Using voice "); + Serial.println(_voice); + } + else + { + Serial.print("Failed to get voices - error "); + Serial.println(httpResponseCode); + } + + httpClient.end(); + } + + void convertTextToSpeech(String text) + { + DynamicJsonDocument doc(1024); + doc["language"] = LANGUAGE; + doc["voice"] = _voice; + doc["text"] = text; + + String body; + serializeJson(doc, body); + + HTTPClient httpClient; + httpClient.begin(_client, TEXT_TO_SPEECH_FUNCTION_URL); + + int httpResponseCode = httpClient.POST(body); + + if (httpResponseCode == 200) + { + File wav_file = SD.open("SPEECH.WAV", FILE_WRITE); + httpClient.writeToStream(&wav_file); + wav_file.close(); + } + else + { + Serial.print("Failed to get speech - error "); + Serial.println(httpResponseCode); + } + + httpClient.end(); + } +private: + WiFiClient _client; + String _voice; +}; + +TextToSpeech textToSpeech; \ No newline at end of file diff --git a/6-consumer/lessons/3-spoken-feedback/code-spoken-response/wio-terminal/smart-timer/test/README b/6-consumer/lessons/3-spoken-feedback/code-spoken-response/wio-terminal/smart-timer/test/README new file mode 100644 index 00000000..b94d0890 --- /dev/null +++ b/6-consumer/lessons/3-spoken-feedback/code-spoken-response/wio-terminal/smart-timer/test/README @@ -0,0 +1,11 @@ + +This directory is intended for PlatformIO Unit Testing and project tests. + +Unit Testing is a software testing method by which individual units of +source code, sets of one or more MCU program modules together with associated +control data, usage procedures, and operating procedures, are tested to +determine whether they are fit for use. Unit testing finds problems early +in the development cycle. + +More information about PlatformIO Unit Testing: +- https://docs.platformio.org/page/plus/unit-testing.html diff --git a/6-consumer/lessons/3-spoken-feedback/code-timer/wio-terminal/smart-timer/include/README b/6-consumer/lessons/3-spoken-feedback/code-timer/wio-terminal/smart-timer/include/README new file mode 100644 index 00000000..194dcd43 --- /dev/null +++ b/6-consumer/lessons/3-spoken-feedback/code-timer/wio-terminal/smart-timer/include/README @@ -0,0 +1,39 @@ + +This directory is intended for project header files. + +A header file is a file containing C declarations and macro definitions +to be shared between several project source files. You request the use of a +header file in your project source file (C, C++, etc) located in `src` folder +by including it, with the C preprocessing directive `#include'. + +```src/main.c + +#include "header.h" + +int main (void) +{ + ... +} +``` + +Including a header file produces the same results as copying the header file +into each source file that needs it. Such copying would be time-consuming +and error-prone. With a header file, the related declarations appear +in only one place. If they need to be changed, they can be changed in one +place, and programs that include the header file will automatically use the +new version when next recompiled. The header file eliminates the labor of +finding and changing all the copies as well as the risk that a failure to +find one copy will result in inconsistencies within a program. + +In C, the usual convention is to give header files names that end with `.h'. +It is most portable to use only letters, digits, dashes, and underscores in +header file names, and at most one dot. + +Read more about using header files in official GCC documentation: + +* Include Syntax +* Include Operation +* Once-Only Headers +* Computed Includes + +https://gcc.gnu.org/onlinedocs/cpp/Header-Files.html diff --git a/6-consumer/lessons/3-spoken-feedback/code-timer/wio-terminal/smart-timer/lib/README b/6-consumer/lessons/3-spoken-feedback/code-timer/wio-terminal/smart-timer/lib/README new file mode 100644 index 00000000..6debab1e --- /dev/null +++ b/6-consumer/lessons/3-spoken-feedback/code-timer/wio-terminal/smart-timer/lib/README @@ -0,0 +1,46 @@ + +This directory is intended for project specific (private) libraries. +PlatformIO will compile them to static libraries and link into executable file. + +The source code of each library should be placed in a an own separate directory +("lib/your_library_name/[here are source files]"). + +For example, see a structure of the following two libraries `Foo` and `Bar`: + +|--lib +| | +| |--Bar +| | |--docs +| | |--examples +| | |--src +| | |- Bar.c +| | |- Bar.h +| | |- library.json (optional, custom build options, etc) https://docs.platformio.org/page/librarymanager/config.html +| | +| |--Foo +| | |- Foo.c +| | |- Foo.h +| | +| |- README --> THIS FILE +| +|- platformio.ini +|--src + |- main.c + +and a contents of `src/main.c`: +``` +#include +#include + +int main (void) +{ + ... +} + +``` + +PlatformIO Library Dependency Finder will find automatically dependent +libraries scanning project source files. + +More information about PlatformIO Library Dependency Finder +- https://docs.platformio.org/page/librarymanager/ldf.html diff --git a/6-consumer/lessons/3-spoken-feedback/code-timer/wio-terminal/smart-timer/platformio.ini b/6-consumer/lessons/3-spoken-feedback/code-timer/wio-terminal/smart-timer/platformio.ini new file mode 100644 index 00000000..8836ab42 --- /dev/null +++ b/6-consumer/lessons/3-spoken-feedback/code-timer/wio-terminal/smart-timer/platformio.ini @@ -0,0 +1,23 @@ +; PlatformIO Project Configuration File +; +; Build options: build flags, source filter +; Upload options: custom upload port, speed and extra flags +; Library options: dependencies, extra library storages +; Advanced options: extra scripting +; +; Please visit documentation for the other options and examples +; https://docs.platformio.org/page/projectconf.html + +[env:seeed_wio_terminal] +platform = atmelsam +board = seeed_wio_terminal +framework = arduino +lib_deps = + seeed-studio/Seeed Arduino FS @ 2.1.1 + seeed-studio/Seeed Arduino SFUD @ 2.0.2 + seeed-studio/Seeed Arduino rpcWiFi @ 1.0.5 + seeed-studio/Seeed Arduino rpcUnified @ 2.1.3 + seeed-studio/Seeed_Arduino_mbedtls @ 3.0.1 + seeed-studio/Seeed Arduino RTC @ 2.0.0 + bblanchon/ArduinoJson @ 6.17.3 + contrem/arduino-timer @ 2.3.0 diff --git a/6-consumer/lessons/3-spoken-feedback/code-timer/wio-terminal/smart-timer/src/config.h b/6-consumer/lessons/3-spoken-feedback/code-timer/wio-terminal/smart-timer/src/config.h new file mode 100644 index 00000000..4c06dc6a --- /dev/null +++ b/6-consumer/lessons/3-spoken-feedback/code-timer/wio-terminal/smart-timer/src/config.h @@ -0,0 +1,91 @@ +#pragma once + +#define RATE 16000 +#define SAMPLE_LENGTH_SECONDS 4 +#define SAMPLES RATE * SAMPLE_LENGTH_SECONDS +#define BUFFER_SIZE (SAMPLES * 2) + 44 +#define ADC_BUF_LEN 1600 + +const char *SSID = ""; +const char *PASSWORD = ""; + +const char *SPEECH_API_KEY = ""; +const char *SPEECH_LOCATION = ""; +const char *LANGUAGE = ""; + +const char *TOKEN_URL = "https://%s.api.cognitive.microsoft.com/sts/v1.0/issuetoken"; +const char *SPEECH_URL = "https://%s.stt.speech.microsoft.com/speech/recognition/conversation/cognitiveservices/v1?language=%s"; + +const char *TEXT_TO_TIMER_FUNCTION_URL = "http://:7071/api/text-to-timer"; + +const char *TOKEN_CERTIFICATE = + "-----BEGIN CERTIFICATE-----\r\n" + "MIIF8zCCBNugAwIBAgIQAueRcfuAIek/4tmDg0xQwDANBgkqhkiG9w0BAQwFADBh\r\n" + "MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3\r\n" + "d3cuZGlnaWNlcnQuY29tMSAwHgYDVQQDExdEaWdpQ2VydCBHbG9iYWwgUm9vdCBH\r\n" + "MjAeFw0yMDA3MjkxMjMwMDBaFw0yNDA2MjcyMzU5NTlaMFkxCzAJBgNVBAYTAlVT\r\n" + "MR4wHAYDVQQKExVNaWNyb3NvZnQgQ29ycG9yYXRpb24xKjAoBgNVBAMTIU1pY3Jv\r\n" + "c29mdCBBenVyZSBUTFMgSXNzdWluZyBDQSAwNjCCAiIwDQYJKoZIhvcNAQEBBQAD\r\n" + "ggIPADCCAgoCggIBALVGARl56bx3KBUSGuPc4H5uoNFkFH4e7pvTCxRi4j/+z+Xb\r\n" + "wjEz+5CipDOqjx9/jWjskL5dk7PaQkzItidsAAnDCW1leZBOIi68Lff1bjTeZgMY\r\n" + "iwdRd3Y39b/lcGpiuP2d23W95YHkMMT8IlWosYIX0f4kYb62rphyfnAjYb/4Od99\r\n" + "ThnhlAxGtfvSbXcBVIKCYfZgqRvV+5lReUnd1aNjRYVzPOoifgSx2fRyy1+pO1Uz\r\n" + "aMMNnIOE71bVYW0A1hr19w7kOb0KkJXoALTDDj1ukUEDqQuBfBxReL5mXiu1O7WG\r\n" + "0vltg0VZ/SZzctBsdBlx1BkmWYBW261KZgBivrql5ELTKKd8qgtHcLQA5fl6JB0Q\r\n" + "gs5XDaWehN86Gps5JW8ArjGtjcWAIP+X8CQaWfaCnuRm6Bk/03PQWhgdi84qwA0s\r\n" + "sRfFJwHUPTNSnE8EiGVk2frt0u8PG1pwSQsFuNJfcYIHEv1vOzP7uEOuDydsmCjh\r\n" + "lxuoK2n5/2aVR3BMTu+p4+gl8alXoBycyLmj3J/PUgqD8SL5fTCUegGsdia/Sa60\r\n" + "N2oV7vQ17wjMN+LXa2rjj/b4ZlZgXVojDmAjDwIRdDUujQu0RVsJqFLMzSIHpp2C\r\n" + "Zp7mIoLrySay2YYBu7SiNwL95X6He2kS8eefBBHjzwW/9FxGqry57i71c2cDAgMB\r\n" + "AAGjggGtMIIBqTAdBgNVHQ4EFgQU1cFnOsKjnfR3UltZEjgp5lVou6UwHwYDVR0j\r\n" + "BBgwFoAUTiJUIBiV5uNu5g/6+rkS7QYXjzkwDgYDVR0PAQH/BAQDAgGGMB0GA1Ud\r\n" + "JQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjASBgNVHRMBAf8ECDAGAQH/AgEAMHYG\r\n" + "CCsGAQUFBwEBBGowaDAkBggrBgEFBQcwAYYYaHR0cDovL29jc3AuZGlnaWNlcnQu\r\n" + "Y29tMEAGCCsGAQUFBzAChjRodHRwOi8vY2FjZXJ0cy5kaWdpY2VydC5jb20vRGln\r\n" + "aUNlcnRHbG9iYWxSb290RzIuY3J0MHsGA1UdHwR0MHIwN6A1oDOGMWh0dHA6Ly9j\r\n" + "cmwzLmRpZ2ljZXJ0LmNvbS9EaWdpQ2VydEdsb2JhbFJvb3RHMi5jcmwwN6A1oDOG\r\n" + "MWh0dHA6Ly9jcmw0LmRpZ2ljZXJ0LmNvbS9EaWdpQ2VydEdsb2JhbFJvb3RHMi5j\r\n" + "cmwwHQYDVR0gBBYwFDAIBgZngQwBAgEwCAYGZ4EMAQICMBAGCSsGAQQBgjcVAQQD\r\n" + "AgEAMA0GCSqGSIb3DQEBDAUAA4IBAQB2oWc93fB8esci/8esixj++N22meiGDjgF\r\n" + "+rA2LUK5IOQOgcUSTGKSqF9lYfAxPjrqPjDCUPHCURv+26ad5P/BYtXtbmtxJWu+\r\n" + "cS5BhMDPPeG3oPZwXRHBJFAkY4O4AF7RIAAUW6EzDflUoDHKv83zOiPfYGcpHc9s\r\n" + "kxAInCedk7QSgXvMARjjOqdakor21DTmNIUotxo8kHv5hwRlGhBJwps6fEVi1Bt0\r\n" + "trpM/3wYxlr473WSPUFZPgP1j519kLpWOJ8z09wxay+Br29irPcBYv0GMXlHqThy\r\n" + "8y4m/HyTQeI2IMvMrQnwqPpY+rLIXyviI2vLoI+4xKE4Rn38ZZ8m\r\n" + "-----END CERTIFICATE-----\r\n"; + +const char *SPEECH_CERTIFICATE = + "-----BEGIN CERTIFICATE-----\r\n" + "MIIF8zCCBNugAwIBAgIQCq+mxcpjxFFB6jvh98dTFzANBgkqhkiG9w0BAQwFADBh\r\n" + "MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3\r\n" + "d3cuZGlnaWNlcnQuY29tMSAwHgYDVQQDExdEaWdpQ2VydCBHbG9iYWwgUm9vdCBH\r\n" + "MjAeFw0yMDA3MjkxMjMwMDBaFw0yNDA2MjcyMzU5NTlaMFkxCzAJBgNVBAYTAlVT\r\n" + "MR4wHAYDVQQKExVNaWNyb3NvZnQgQ29ycG9yYXRpb24xKjAoBgNVBAMTIU1pY3Jv\r\n" + "c29mdCBBenVyZSBUTFMgSXNzdWluZyBDQSAwMTCCAiIwDQYJKoZIhvcNAQEBBQAD\r\n" + "ggIPADCCAgoCggIBAMedcDrkXufP7pxVm1FHLDNA9IjwHaMoaY8arqqZ4Gff4xyr\r\n" + "RygnavXL7g12MPAx8Q6Dd9hfBzrfWxkF0Br2wIvlvkzW01naNVSkHp+OS3hL3W6n\r\n" + "l/jYvZnVeJXjtsKYcXIf/6WtspcF5awlQ9LZJcjwaH7KoZuK+THpXCMtzD8XNVdm\r\n" + "GW/JI0C/7U/E7evXn9XDio8SYkGSM63aLO5BtLCv092+1d4GGBSQYolRq+7Pd1kR\r\n" + "EkWBPm0ywZ2Vb8GIS5DLrjelEkBnKCyy3B0yQud9dpVsiUeE7F5sY8Me96WVxQcb\r\n" + "OyYdEY/j/9UpDlOG+vA+YgOvBhkKEjiqygVpP8EZoMMijephzg43b5Qi9r5UrvYo\r\n" + "o19oR/8pf4HJNDPF0/FJwFVMW8PmCBLGstin3NE1+NeWTkGt0TzpHjgKyfaDP2tO\r\n" + "4bCk1G7pP2kDFT7SYfc8xbgCkFQ2UCEXsaH/f5YmpLn4YPiNFCeeIida7xnfTvc4\r\n" + "7IxyVccHHq1FzGygOqemrxEETKh8hvDR6eBdrBwmCHVgZrnAqnn93JtGyPLi6+cj\r\n" + "WGVGtMZHwzVvX1HvSFG771sskcEjJxiQNQDQRWHEh3NxvNb7kFlAXnVdRkkvhjpR\r\n" + "GchFhTAzqmwltdWhWDEyCMKC2x/mSZvZtlZGY+g37Y72qHzidwtyW7rBetZJAgMB\r\n" + "AAGjggGtMIIBqTAdBgNVHQ4EFgQUDyBd16FXlduSzyvQx8J3BM5ygHYwHwYDVR0j\r\n" + "BBgwFoAUTiJUIBiV5uNu5g/6+rkS7QYXjzkwDgYDVR0PAQH/BAQDAgGGMB0GA1Ud\r\n" + "JQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjASBgNVHRMBAf8ECDAGAQH/AgEAMHYG\r\n" + "CCsGAQUFBwEBBGowaDAkBggrBgEFBQcwAYYYaHR0cDovL29jc3AuZGlnaWNlcnQu\r\n" + "Y29tMEAGCCsGAQUFBzAChjRodHRwOi8vY2FjZXJ0cy5kaWdpY2VydC5jb20vRGln\r\n" + "aUNlcnRHbG9iYWxSb290RzIuY3J0MHsGA1UdHwR0MHIwN6A1oDOGMWh0dHA6Ly9j\r\n" + "cmwzLmRpZ2ljZXJ0LmNvbS9EaWdpQ2VydEdsb2JhbFJvb3RHMi5jcmwwN6A1oDOG\r\n" + "MWh0dHA6Ly9jcmw0LmRpZ2ljZXJ0LmNvbS9EaWdpQ2VydEdsb2JhbFJvb3RHMi5j\r\n" + "cmwwHQYDVR0gBBYwFDAIBgZngQwBAgEwCAYGZ4EMAQICMBAGCSsGAQQBgjcVAQQD\r\n" + "AgEAMA0GCSqGSIb3DQEBDAUAA4IBAQAlFvNh7QgXVLAZSsNR2XRmIn9iS8OHFCBA\r\n" + "WxKJoi8YYQafpMTkMqeuzoL3HWb1pYEipsDkhiMnrpfeYZEA7Lz7yqEEtfgHcEBs\r\n" + "K9KcStQGGZRfmWU07hPXHnFz+5gTXqzCE2PBMlRgVUYJiA25mJPXfB00gDvGhtYa\r\n" + "+mENwM9Bq1B9YYLyLjRtUz8cyGsdyTIG/bBM/Q9jcV8JGqMU/UjAdh1pFyTnnHEl\r\n" + "Y59Npi7F87ZqYYJEHJM2LGD+le8VsHjgeWX2CJQko7klXvcizuZvUEDTjHaQcs2J\r\n" + "+kPgfyMIOY1DMJ21NxOJ2xPRC/wAh/hzSBRVtoAnyuxtkZ4VjIOh\r\n" + "-----END CERTIFICATE-----\r\n"; diff --git a/6-consumer/lessons/3-spoken-feedback/code-timer/wio-terminal/smart-timer/src/flash_stream.h b/6-consumer/lessons/3-spoken-feedback/code-timer/wio-terminal/smart-timer/src/flash_stream.h new file mode 100644 index 00000000..b841f1d0 --- /dev/null +++ b/6-consumer/lessons/3-spoken-feedback/code-timer/wio-terminal/smart-timer/src/flash_stream.h @@ -0,0 +1,69 @@ +#pragma once + +#include +#include +#include + +#include "config.h" + +class FlashStream : public Stream +{ +public: + FlashStream() + { + _pos = 0; + _flash_address = 0; + _flash = sfud_get_device_table() + 0; + + populateBuffer(); + } + + virtual size_t write(uint8_t val) + { + return 0; + } + + virtual int available() + { + int remaining = BUFFER_SIZE - ((_flash_address - HTTP_TCP_BUFFER_SIZE) + _pos); + int bytes_available = min(HTTP_TCP_BUFFER_SIZE, remaining); + + if (bytes_available == 0) + { + bytes_available = -1; + } + + return bytes_available; + } + + virtual int read() + { + int retVal = _buffer[_pos++]; + + if (_pos == HTTP_TCP_BUFFER_SIZE) + { + populateBuffer(); + } + + return retVal; + } + + virtual int peek() + { + return _buffer[_pos]; + } + +private: + void populateBuffer() + { + sfud_read(_flash, _flash_address, HTTP_TCP_BUFFER_SIZE, _buffer); + _flash_address += HTTP_TCP_BUFFER_SIZE; + _pos = 0; + } + + size_t _pos; + size_t _flash_address; + const sfud_flash *_flash; + + byte _buffer[HTTP_TCP_BUFFER_SIZE]; +}; diff --git a/6-consumer/lessons/3-spoken-feedback/code-timer/wio-terminal/smart-timer/src/flash_writer.h b/6-consumer/lessons/3-spoken-feedback/code-timer/wio-terminal/smart-timer/src/flash_writer.h new file mode 100644 index 00000000..87fdff29 --- /dev/null +++ b/6-consumer/lessons/3-spoken-feedback/code-timer/wio-terminal/smart-timer/src/flash_writer.h @@ -0,0 +1,60 @@ +#pragma once + +#include +#include + +class FlashWriter +{ +public: + void init() + { + _flash = sfud_get_device_table() + 0; + _sfudBufferSize = _flash->chip.erase_gran; + _sfudBuffer = new byte[_sfudBufferSize]; + _sfudBufferPos = 0; + _sfudBufferWritePos = 0; + } + + void reset() + { + _sfudBufferPos = 0; + _sfudBufferWritePos = 0; + } + + void writeSfudBuffer(byte b) + { + _sfudBuffer[_sfudBufferPos++] = b; + if (_sfudBufferPos == _sfudBufferSize) + { + sfud_erase_write(_flash, _sfudBufferWritePos, _sfudBufferSize, _sfudBuffer); + _sfudBufferWritePos += _sfudBufferSize; + _sfudBufferPos = 0; + } + } + + void flushSfudBuffer() + { + if (_sfudBufferPos > 0) + { + sfud_erase_write(_flash, _sfudBufferWritePos, _sfudBufferSize, _sfudBuffer); + _sfudBufferWritePos += _sfudBufferSize; + _sfudBufferPos = 0; + } + } + + void writeSfudBuffer(byte *b, size_t len) + { + for (size_t i = 0; i < len; ++i) + { + writeSfudBuffer(b[i]); + } + } + +private: + byte *_sfudBuffer; + size_t _sfudBufferSize; + size_t _sfudBufferPos; + size_t _sfudBufferWritePos; + + const sfud_flash *_flash; +}; \ No newline at end of file diff --git a/6-consumer/lessons/3-spoken-feedback/code-timer/wio-terminal/smart-timer/src/language_understanding.h b/6-consumer/lessons/3-spoken-feedback/code-timer/wio-terminal/smart-timer/src/language_understanding.h new file mode 100644 index 00000000..1c8d8653 --- /dev/null +++ b/6-consumer/lessons/3-spoken-feedback/code-timer/wio-terminal/smart-timer/src/language_understanding.h @@ -0,0 +1,53 @@ +#pragma once + +#include +#include +#include +#include + +#include "config.h" + +class LanguageUnderstanding +{ +public: + int GetTimerDuration(String text) + { + DynamicJsonDocument doc(1024); + doc["text"] = text; + + String body; + serializeJson(doc, body); + + HTTPClient httpClient; + httpClient.begin(_client, TEXT_TO_TIMER_FUNCTION_URL); + + int httpResponseCode = httpClient.POST(body); + + int seconds = 0; + if (httpResponseCode == 200) + { + String result = httpClient.getString(); + Serial.println(result); + + DynamicJsonDocument doc(1024); + deserializeJson(doc, result.c_str()); + + JsonObject obj = doc.as(); + seconds = obj["seconds"].as(); + } + else + { + Serial.print("Failed to understand text - error "); + Serial.println(httpResponseCode); + } + + httpClient.end(); + + return seconds; + } + +private: + WiFiClient _client; +}; + +LanguageUnderstanding languageUnderstanding; \ No newline at end of file diff --git a/6-consumer/lessons/3-spoken-feedback/code-timer/wio-terminal/smart-timer/src/main.cpp b/6-consumer/lessons/3-spoken-feedback/code-timer/wio-terminal/smart-timer/src/main.cpp new file mode 100644 index 00000000..1f538015 --- /dev/null +++ b/6-consumer/lessons/3-spoken-feedback/code-timer/wio-terminal/smart-timer/src/main.cpp @@ -0,0 +1,127 @@ +#include +#include +#include +#include +#include + +#include "config.h" +#include "language_understanding.h" +#include "mic.h" +#include "speech_to_text.h" + +void connectWiFi() +{ + while (WiFi.status() != WL_CONNECTED) + { + Serial.println("Connecting to WiFi.."); + WiFi.begin(SSID, PASSWORD); + delay(500); + } + + Serial.println("Connected!"); +} + +void setup() +{ + Serial.begin(9600); + + while (!Serial) + ; // Wait for Serial to be ready + + delay(1000); + + connectWiFi(); + + while (!(sfud_init() == SFUD_SUCCESS)) + ; + + sfud_qspi_fast_read_enable(sfud_get_device(SFUD_W25Q32_DEVICE_INDEX), 2); + + pinMode(WIO_KEY_C, INPUT_PULLUP); + + mic.init(); + + speechToText.init(); + + Serial.println("Ready."); +} + +auto timer = timer_create_default(); + +void say(String text) +{ + Serial.println(text); +} + +bool timerExpired(void *announcement) +{ + say((char *)announcement); + return false; +} + +void processAudio() +{ + String text = speechToText.convertSpeechToText(); + Serial.println(text); + + int total_seconds = languageUnderstanding.GetTimerDuration(text); + if (total_seconds == 0) + { + return; + } + + int minutes = total_seconds / 60; + int seconds = total_seconds % 60; + + String begin_message; + if (minutes > 0) + { + begin_message += minutes; + begin_message += " minute "; + } + if (seconds > 0) + { + begin_message += seconds; + begin_message += " second "; + } + + begin_message += "timer started."; + + String end_message("Times up on your "); + if (minutes > 0) + { + end_message += minutes; + end_message += " minute "; + } + if (seconds > 0) + { + end_message += seconds; + end_message += " second "; + } + + end_message += "timer."; + + say(begin_message); + + timer.in(total_seconds * 1000, timerExpired, (void *)(end_message.c_str())); +} + +void loop() +{ + if (digitalRead(WIO_KEY_C) == LOW && !mic.isRecording()) + { + Serial.println("Starting recording..."); + mic.startRecording(); + } + + if (!mic.isRecording() && mic.isRecordingReady()) + { + Serial.println("Finished recording"); + + processAudio(); + + mic.reset(); + } + + timer.tick(); +} diff --git a/6-consumer/lessons/3-spoken-feedback/code-timer/wio-terminal/smart-timer/src/mic.h b/6-consumer/lessons/3-spoken-feedback/code-timer/wio-terminal/smart-timer/src/mic.h new file mode 100644 index 00000000..5f0815de --- /dev/null +++ b/6-consumer/lessons/3-spoken-feedback/code-timer/wio-terminal/smart-timer/src/mic.h @@ -0,0 +1,242 @@ +#pragma once + +#include + +#include "config.h" +#include "flash_writer.h" + +class Mic +{ +public: + Mic() + { + _isRecording = false; + _isRecordingReady = false; + } + + void startRecording() + { + _isRecording = true; + _isRecordingReady = false; + } + + bool isRecording() + { + return _isRecording; + } + + bool isRecordingReady() + { + return _isRecordingReady; + } + + void init() + { + analogReference(AR_INTERNAL2V23); + + _writer.init(); + + initBufferHeader(); + configureDmaAdc(); + } + + void reset() + { + _isRecordingReady = false; + _isRecording = false; + + _writer.reset(); + + initBufferHeader(); + } + + void dmaHandler() + { + static uint8_t count = 0; + + if (DMAC->Channel[1].CHINTFLAG.bit.SUSP) + { + DMAC->Channel[1].CHCTRLB.reg = DMAC_CHCTRLB_CMD_RESUME; + DMAC->Channel[1].CHINTFLAG.bit.SUSP = 1; + + if (count) + { + audioCallback(_adc_buf_0, ADC_BUF_LEN); + } + else + { + audioCallback(_adc_buf_1, ADC_BUF_LEN); + } + + count = (count + 1) % 2; + } + } + +private: + volatile bool _isRecording; + volatile bool _isRecordingReady; + FlashWriter _writer; + +typedef struct + { + uint16_t btctrl; + uint16_t btcnt; + uint32_t srcaddr; + uint32_t dstaddr; + uint32_t descaddr; + } dmacdescriptor; + + // Globals - DMA and ADC + volatile dmacdescriptor _wrb[DMAC_CH_NUM] __attribute__((aligned(16))); + dmacdescriptor _descriptor_section[DMAC_CH_NUM] __attribute__((aligned(16))); + dmacdescriptor _descriptor __attribute__((aligned(16))); + + void configureDmaAdc() + { + // Configure DMA to sample from ADC at a regular interval (triggered by timer/counter) + DMAC->BASEADDR.reg = (uint32_t)_descriptor_section; // Specify the location of the descriptors + DMAC->WRBADDR.reg = (uint32_t)_wrb; // Specify the location of the write back descriptors + DMAC->CTRL.reg = DMAC_CTRL_DMAENABLE | DMAC_CTRL_LVLEN(0xf); // Enable the DMAC peripheral + DMAC->Channel[1].CHCTRLA.reg = DMAC_CHCTRLA_TRIGSRC(TC5_DMAC_ID_OVF) | // Set DMAC to trigger on TC5 timer overflow + DMAC_CHCTRLA_TRIGACT_BURST; // DMAC burst transfer + + _descriptor.descaddr = (uint32_t)&_descriptor_section[1]; // Set up a circular descriptor + _descriptor.srcaddr = (uint32_t)&ADC1->RESULT.reg; // Take the result from the ADC0 RESULT register + _descriptor.dstaddr = (uint32_t)_adc_buf_0 + sizeof(uint16_t) * ADC_BUF_LEN; // Place it in the adc_buf_0 array + _descriptor.btcnt = ADC_BUF_LEN; // Beat count + _descriptor.btctrl = DMAC_BTCTRL_BEATSIZE_HWORD | // Beat size is HWORD (16-bits) + DMAC_BTCTRL_DSTINC | // Increment the destination address + DMAC_BTCTRL_VALID | // Descriptor is valid + DMAC_BTCTRL_BLOCKACT_SUSPEND; // Suspend DMAC channel 0 after block transfer + memcpy(&_descriptor_section[0], &_descriptor, sizeof(_descriptor)); // Copy the descriptor to the descriptor section + + _descriptor.descaddr = (uint32_t)&_descriptor_section[0]; // Set up a circular descriptor + _descriptor.srcaddr = (uint32_t)&ADC1->RESULT.reg; // Take the result from the ADC0 RESULT register + _descriptor.dstaddr = (uint32_t)_adc_buf_1 + sizeof(uint16_t) * ADC_BUF_LEN; // Place it in the adc_buf_1 array + _descriptor.btcnt = ADC_BUF_LEN; // Beat count + _descriptor.btctrl = DMAC_BTCTRL_BEATSIZE_HWORD | // Beat size is HWORD (16-bits) + DMAC_BTCTRL_DSTINC | // Increment the destination address + DMAC_BTCTRL_VALID | // Descriptor is valid + DMAC_BTCTRL_BLOCKACT_SUSPEND; // Suspend DMAC channel 0 after block transfer + memcpy(&_descriptor_section[1], &_descriptor, sizeof(_descriptor)); // Copy the descriptor to the descriptor section + + // Configure NVIC + NVIC_SetPriority(DMAC_1_IRQn, 0); // Set the Nested Vector Interrupt Controller (NVIC) priority for DMAC1 to 0 (highest) + NVIC_EnableIRQ(DMAC_1_IRQn); // Connect DMAC1 to Nested Vector Interrupt Controller (NVIC) + + // Activate the suspend (SUSP) interrupt on DMAC channel 1 + DMAC->Channel[1].CHINTENSET.reg = DMAC_CHINTENSET_SUSP; + + // Configure ADC + ADC1->INPUTCTRL.bit.MUXPOS = ADC_INPUTCTRL_MUXPOS_AIN12_Val; // Set the analog input to ADC0/AIN2 (PB08 - A4 on Metro M4) + while (ADC1->SYNCBUSY.bit.INPUTCTRL) + ; // Wait for synchronization + ADC1->SAMPCTRL.bit.SAMPLEN = 0x00; // Set max Sampling Time Length to half divided ADC clock pulse (2.66us) + while (ADC1->SYNCBUSY.bit.SAMPCTRL) + ; // Wait for synchronization + ADC1->CTRLA.reg = ADC_CTRLA_PRESCALER_DIV128; // Divide Clock ADC GCLK by 128 (48MHz/128 = 375kHz) + ADC1->CTRLB.reg = ADC_CTRLB_RESSEL_12BIT | // Set ADC resolution to 12 bits + ADC_CTRLB_FREERUN; // Set ADC to free run mode + while (ADC1->SYNCBUSY.bit.CTRLB) + ; // Wait for synchronization + ADC1->CTRLA.bit.ENABLE = 1; // Enable the ADC + while (ADC1->SYNCBUSY.bit.ENABLE) + ; // Wait for synchronization + ADC1->SWTRIG.bit.START = 1; // Initiate a software trigger to start an ADC conversion + while (ADC1->SYNCBUSY.bit.SWTRIG) + ; // Wait for synchronization + + // Enable DMA channel 1 + DMAC->Channel[1].CHCTRLA.bit.ENABLE = 1; + + // Configure Timer/Counter 5 + GCLK->PCHCTRL[TC5_GCLK_ID].reg = GCLK_PCHCTRL_CHEN | // Enable perhipheral channel for TC5 + GCLK_PCHCTRL_GEN_GCLK1; // Connect generic clock 0 at 48MHz + + TC5->COUNT16.WAVE.reg = TC_WAVE_WAVEGEN_MFRQ; // Set TC5 to Match Frequency (MFRQ) mode + TC5->COUNT16.CC[0].reg = 3000 - 1; // Set the trigger to 16 kHz: (4Mhz / 16000) - 1 + while (TC5->COUNT16.SYNCBUSY.bit.CC0) + ; // Wait for synchronization + + // Start Timer/Counter 5 + TC5->COUNT16.CTRLA.bit.ENABLE = 1; // Enable the TC5 timer + while (TC5->COUNT16.SYNCBUSY.bit.ENABLE) + ; // Wait for synchronization + } + + uint16_t _adc_buf_0[ADC_BUF_LEN]; + uint16_t _adc_buf_1[ADC_BUF_LEN]; + + // WAV files have a header. This struct defines that header + struct wavFileHeader + { + char riff[4]; /* "RIFF" */ + long flength; /* file length in bytes */ + char wave[4]; /* "WAVE" */ + char fmt[4]; /* "fmt " */ + long chunk_size; /* size of FMT chunk in bytes (usually 16) */ + short format_tag; /* 1=PCM, 257=Mu-Law, 258=A-Law, 259=ADPCM */ + short num_chans; /* 1=mono, 2=stereo */ + long srate; /* Sampling rate in samples per second */ + long bytes_per_sec; /* bytes per second = srate*bytes_per_samp */ + short bytes_per_samp; /* 2=16-bit mono, 4=16-bit stereo */ + short bits_per_samp; /* Number of bits per sample */ + char data[4]; /* "data" */ + long dlength; /* data length in bytes (filelength - 44) */ + }; + + void initBufferHeader() + { + wavFileHeader wavh; + + strncpy(wavh.riff, "RIFF", 4); + strncpy(wavh.wave, "WAVE", 4); + strncpy(wavh.fmt, "fmt ", 4); + strncpy(wavh.data, "data", 4); + + wavh.chunk_size = 16; + wavh.format_tag = 1; // PCM + wavh.num_chans = 1; // mono + wavh.srate = RATE; + wavh.bytes_per_sec = (RATE * 1 * 16 * 1) / 8; + wavh.bytes_per_samp = 2; + wavh.bits_per_samp = 16; + wavh.dlength = RATE * 2 * 1 * 16 / 2; + wavh.flength = wavh.dlength + 44; + + _writer.writeSfudBuffer((byte *)&wavh, 44); + } + + void audioCallback(uint16_t *buf, uint32_t buf_len) + { + static uint32_t idx = 44; + + if (_isRecording) + { + for (uint32_t i = 0; i < buf_len; i++) + { + int16_t audio_value = ((int16_t)buf[i] - 2048) * 16; + + _writer.writeSfudBuffer(audio_value & 0xFF); + _writer.writeSfudBuffer((audio_value >> 8) & 0xFF); + } + + idx += buf_len; + + if (idx >= BUFFER_SIZE) + { + _writer.flushSfudBuffer(); + idx = 44; + _isRecording = false; + _isRecordingReady = true; + } + } + } +}; + +Mic mic; + +void DMAC_1_Handler() +{ + mic.dmaHandler(); +} diff --git a/6-consumer/lessons/3-spoken-feedback/code-timer/wio-terminal/smart-timer/src/speech_to_text.h b/6-consumer/lessons/3-spoken-feedback/code-timer/wio-terminal/smart-timer/src/speech_to_text.h new file mode 100644 index 00000000..a7ce075f --- /dev/null +++ b/6-consumer/lessons/3-spoken-feedback/code-timer/wio-terminal/smart-timer/src/speech_to_text.h @@ -0,0 +1,102 @@ +#pragma once + +#include +#include +#include +#include + +#include "config.h" +#include "flash_stream.h" + +class SpeechToText +{ +public: + void init() + { + _token_client.setCACert(TOKEN_CERTIFICATE); + _speech_client.setCACert(SPEECH_CERTIFICATE); + _access_token = getAccessToken(); + } + + String convertSpeechToText() + { + char url[128]; + sprintf(url, SPEECH_URL, SPEECH_LOCATION, LANGUAGE); + + HTTPClient httpClient; + httpClient.begin(_speech_client, url); + + httpClient.addHeader("Authorization", String("Bearer ") + _access_token); + httpClient.addHeader("Content-Type", String("audio/wav; codecs=audio/pcm; samplerate=") + String(RATE)); + httpClient.addHeader("Accept", "application/json;text/xml"); + + Serial.println("Sending speech..."); + + FlashStream stream; + int httpResponseCode = httpClient.sendRequest("POST", &stream, BUFFER_SIZE); + + Serial.println("Speech sent!"); + + String text = ""; + + if (httpResponseCode == 200) + { + String result = httpClient.getString(); + Serial.println(result); + + DynamicJsonDocument doc(1024); + deserializeJson(doc, result.c_str()); + + JsonObject obj = doc.as(); + text = obj["DisplayText"].as(); + } + else if (httpResponseCode == 401) + { + Serial.println("Access token expired, trying again with a new token"); + _access_token = getAccessToken(); + return convertSpeechToText(); + } + else + { + Serial.print("Failed to convert text to speech - error "); + Serial.println(httpResponseCode); + } + + httpClient.end(); + + return text; + } + +private: + String getAccessToken() + { + char url[128]; + sprintf(url, TOKEN_URL, SPEECH_LOCATION); + + HTTPClient httpClient; + httpClient.begin(_token_client, url); + + httpClient.addHeader("Ocp-Apim-Subscription-Key", SPEECH_API_KEY); + int httpResultCode = httpClient.POST("{}"); + + if (httpResultCode != 200) + { + Serial.println("Error getting access token, trying again..."); + delay(10000); + return getAccessToken(); + } + + Serial.println("Got access token."); + String result = httpClient.getString(); + + httpClient.end(); + + return result; + } + + WiFiClientSecure _token_client; + WiFiClientSecure _speech_client; + String _access_token; +}; + +SpeechToText speechToText; diff --git a/6-consumer/lessons/3-spoken-feedback/code-timer/wio-terminal/smart-timer/test/README b/6-consumer/lessons/3-spoken-feedback/code-timer/wio-terminal/smart-timer/test/README new file mode 100644 index 00000000..b94d0890 --- /dev/null +++ b/6-consumer/lessons/3-spoken-feedback/code-timer/wio-terminal/smart-timer/test/README @@ -0,0 +1,11 @@ + +This directory is intended for PlatformIO Unit Testing and project tests. + +Unit Testing is a software testing method by which individual units of +source code, sets of one or more MCU program modules together with associated +control data, usage procedures, and operating procedures, are tested to +determine whether they are fit for use. Unit testing finds problems early +in the development cycle. + +More information about PlatformIO Unit Testing: +- https://docs.platformio.org/page/plus/unit-testing.html diff --git a/6-consumer/lessons/3-spoken-feedback/single-board-computer-set-timer.md b/6-consumer/lessons/3-spoken-feedback/single-board-computer-set-timer.md index b2b87597..828fde95 100644 --- a/6-consumer/lessons/3-spoken-feedback/single-board-computer-set-timer.md +++ b/6-consumer/lessons/3-spoken-feedback/single-board-computer-set-timer.md @@ -1,6 +1,6 @@ # Set a timer - Virtual IoT Hardware and Raspberry Pi -In this part of the lesson, you will set a timer on your virtual IoT device or Raspberry Pi based off a command from the IoT Hub. +In this part of the lesson, you will call your serverless code to understand the speech, and set a timer n your virtual IoT device or Raspberry Pi based off the results. ## Set a timer diff --git a/6-consumer/lessons/3-spoken-feedback/wio-terminal-set-timer.md b/6-consumer/lessons/3-spoken-feedback/wio-terminal-set-timer.md index 2e8910e5..3480a4fd 100644 --- a/6-consumer/lessons/3-spoken-feedback/wio-terminal-set-timer.md +++ b/6-consumer/lessons/3-spoken-feedback/wio-terminal-set-timer.md @@ -1,3 +1,287 @@ # Set a timer - Wio Terminal -Coming soon +In this part of the lesson, you will call your serverless code to understand the speech, and set a timer on your Wio Terminal based off the results. + +## Set a timer + +The text that comes back from the speech to text call needs to be sent to your serverless code to be processed by LUIS, getting back the number of seconds for the timer. This number of seconds can be used to set a timer. + +Microcontrollers don't natively have support for multiple threads in Arduino, so there are no standard timer classes like you might find when coding in Python or other higher-level languages. Instead you can use timer libraries that work by measuring elapsed time in the `loop` function, and calling functions when the time is up. + +### Task - send the text to the serverless function + +1. Open the `smart-timer` project in VS Code if it is not already open. + +1. Open the `config.h` header file and add the URL for your function app: + + ```cpp + const char *TEXT_TO_TIMER_FUNCTION_URL = ""; + ``` + + Replace `` with the URL for your function app that you obtained in the last step of the last lesson, pointing to the IP address of your local machine that is running the function app. + +1. Create a new file in the `src` folder called `language_understanding.h`. This will be used to define a class to send the recognized speech to your function app to be converted to seconds using LUIS. + +1. Add the following to the top of this file: + + ```cpp + #pragma once + + #include + #include + #include + #include + + #include "config.h" + ``` + + This includes some needed header files. + +1. Define a class called `LanguageUnderstanding`, and declare an instance of this class: + + ```cpp + class LanguageUnderstanding + { + public: + private: + }; + + LanguageUnderstanding languageUnderstanding; + ``` + +1. To call your functions app, you need to declare a WiFi client. Add the following to the `private` section of the class: + + ```cpp + WiFiClient _client; + ``` + +1. In the `public` section, declare a method called `GetTimerDuration` to call the functions app: + + ```cpp + int GetTimerDuration(String text) + { + } + ``` + +1. In the `GetTimerDuration` method, add the following code to build the JSON to be sent to the functions app: + + ```cpp + DynamicJsonDocument doc(1024); + doc["text"] = text; + + String body; + serializeJson(doc, body); + ``` + + This coverts the text passed to the `GetTimerDuration` method into the following JSON: + + ```json + { + "text" : "" + } + ``` + + where `` is the text passed to the function. + +1. Below this, add the following code to make the functions app call: + + ```cpp + HTTPClient httpClient; + httpClient.begin(_client, TEXT_TO_TIMER_FUNCTION_URL); + + int httpResponseCode = httpClient.POST(body); + ``` + + This makes a POST request to the functions app, passing the JSON body and getting the response code. + +1. Add the following code below this: + + ```cpp + int seconds = 0; + if (httpResponseCode == 200) + { + String result = httpClient.getString(); + Serial.println(result); + + DynamicJsonDocument doc(1024); + deserializeJson(doc, result.c_str()); + + JsonObject obj = doc.as(); + seconds = obj["seconds"].as(); + } + else + { + Serial.print("Failed to understand text - error "); + Serial.println(httpResponseCode); + } + ``` + + This code checks the response code. If it is 200 (success), then the number of seconds for the time is retrieved from the response body. Otherwise an error is sent to the serial monitor and the number of seconds is set to 0. + +1. Add the following code to the end of this method to close the HTTP connection and return the number of seconds: + + ```cpp + httpClient.end(); + + return seconds; + ``` + +1. In the `main.cpp` file, include this new header: + + ```cpp + #include "speech_to_text.h" + ``` + +1. On the end of the `processAudio` function, call the `GetTimerDuration` method to get the timer duration: + + ```cpp + int total_seconds = languageUnderstanding.GetTimerDuration(text); + ``` + + This converts the text from the call to the `SpeechToText` class into the number of seconds for the timer. + +### Task - set a timer + +The number of seconds can be used to set a timer. + +1. Add the following library dependency to the `platformio.ini` file to add a library to set a timer: + + ```ini + contrem/arduino-timer @ 2.3.0 + ``` + +1. Add an include directive for this library to the `main.cpp` file: + + ```cpp + #include + ``` + +1. Above the `processAudio` function, add the following code: + + ```cpp + auto timer = timer_create_default(); + ``` + + This code declares a timer called `timer`. + +1. Below this, add the following code: + + ```cpp + void say(String text) + { + Serial.print("Saying "); + Serial.println(text); + } + ``` + + This `say` function will eventually convert text to speech, but for now it will just write the passed in text to the serial monitor. + +1. Below the `say` function, add the following code: + + ```cpp + bool timerExpired(void *announcement) + { + say((char *)announcement); + return false; + } + ``` + + This is a callback function that will be called when a timer expires. It is passed a message to say when the timer expires. Timers can repeat, and this can be controlled by the return value of this callback - this returns `false`, to tell the timer to not run again. + +1. Add the following code to the end of the `processAudio` function: + + ```cpp + if (total_seconds == 0) + { + return; + } + + int minutes = total_seconds / 60; + int seconds = total_seconds % 60; + ``` + + This code checks the total number of seconds, and if it is 0, returns from teh function call so no timers are set. It then converts the total number of seconds into minutes and seconds. + +1. Below this code, add the following to create a message to say when the timer is started: + + ```cpp + String begin_message; + if (minutes > 0) + { + begin_message += minutes; + begin_message += " minute "; + } + if (seconds > 0) + { + begin_message += seconds; + begin_message += " second "; + } + + begin_message += "timer started."; + ``` + +1. Below this, add similar code to create a message to say when the timer has expired: + + ```cpp + String end_message("Times up on your "); + if (minutes > 0) + { + end_message += minutes; + end_message += " minute "; + } + if (seconds > 0) + { + end_message += seconds; + end_message += " second "; + } + + end_message += "timer."; + ``` + +1. After this, say the timer started message: + + ```cpp + say(begin_message); + ``` + +1. At the end of this function, start the timer: + + ```cpp + timer.in(total_seconds * 1000, timerExpired, (void *)(end_message.c_str())); + ``` + + This triggers the timer. The timer is set using milliseconds, so the total number of seconds is multiplied by 1,000 to convert to milliseconds. The `timerExpired` function is passed as the callback, and the `end_message` is passed as an argument to pass to the callback. This callback only takes `void *` arguments, so the string is converted appropriately. + +1. Finally, the timer needs to *tick*, and this is done in the `loop` function. Add the following code at the end of the `loop` function: + + ```cpp + timer.tick(); + ``` + +1. Build this code, upload it to your Wio Terminal and test it out through the serial monitor. Once you see `Ready` in the serial monitor, press the C button (the one on the left-hand side, closest to the power switch), and speak. 4 seconds of audio will be captured, converted to text, then sent to your function app, and a timer will be set. Make sure your functions app is running locally. + + You will see when the timer starts, and when it ends. + + ```output + --- Available filters and text transformations: colorize, debug, default, direct, hexlify, log2file, nocontrol, printable, send_on_enter, time + --- More details at http://bit.ly/pio-monitor-filters + --- Miniterm on /dev/cu.usbmodem1101 9600,8,N,1 --- + --- Quit: Ctrl+C | Menu: Ctrl+T | Help: Ctrl+T followed by Ctrl+H --- + Connecting to WiFi.. + Connected! + Got access token. + Ready. + Starting recording... + Finished recording + Sending speech... + Speech sent! + {"RecognitionStatus":"Success","DisplayText":"Set a 2 minute and 27 second timer.","Offset":4700000,"Duration":35300000} + Set a 2 minute and 27 second timer. + {"seconds": 147} + 2 minute 27 second timer started. + Times up on your 2 minute 27 second timer. + ``` + +> 💁 You can find this code in the [code-timer/wio-terminal](code-timer/wio-terminal) folder. + +😀 Your timer program was a success! diff --git a/6-consumer/lessons/3-spoken-feedback/wio-terminal-text-to-speech.md b/6-consumer/lessons/3-spoken-feedback/wio-terminal-text-to-speech.md index e27369e6..bb247e19 100644 --- a/6-consumer/lessons/3-spoken-feedback/wio-terminal-text-to-speech.md +++ b/6-consumer/lessons/3-spoken-feedback/wio-terminal-text-to-speech.md @@ -1,3 +1,522 @@ # Text to speech - Wio Terminal -Coming soon +In this part of the lesson, you will convert text to speech to provide spoken feedback. + +## Text to speech + +The speech services SDK that you used in the last lesson to convert speech to text can be used to convert text back to speech. + +## Get a list of voices + +When requesting speech, you need to provide the voice to use as speech can be generated using a variety of different voices. Each language supports a range of different voices, and you can get the list of supported voices for each language from the speech services SDK. The limitations of microcontrollers come into play here - the call to get the list of voices supported by the text to speech services is a JSON document of over 77KB in size, far to large to be processed by the Wio Terminal. At the time of writing, the full list contains 215 voices, each defined by a JSON document like the following: + +```json +{ + "Name": "Microsoft Server Speech Text to Speech Voice (en-US, AriaNeural)", + "DisplayName": "Aria", + "LocalName": "Aria", + "ShortName": "en-US-AriaNeural", + "Gender": "Female", + "Locale": "en-US", + "StyleList": [ + "chat", + "customerservice", + "narration-professional", + "newscast-casual", + "newscast-formal", + "cheerful", + "empathetic" + ], + "SampleRateHertz": "24000", + "VoiceType": "Neural", + "Status": "GA" +} +``` + +This JSON is for the **Aria** voice, which has multiple voice styles. All that is needed when converting text to speech is the shortname, `en-US-AriaNeural`. + +Instead of downloading and decoding this entire list on your microcontroller, you will need to write some more serverless code to retrieve the list of voices for the language you are using, and call this from your Wio Terminal. Your code can then pick an appropriate voice from the list, such as the first one it finds. + +### Task - create a serverless function to get a list of voices + +1. Open your `smart-timer-trigger` project in VS Code, and open the terminal ensuring the virtual environment is activated. If not, kill and re-create the terminal. + +1. Open the `local.settings.json` file and add settings for the speech API key and location: + + ```json + "SPEECH_KEY": "", + "SPEECH_LOCATION": "" + ``` + + Replace `` with the API key for your speech service resource. Replace `` with the location you used when you created the speech service resource. + +1. Add a new HTTP trigger to this app called `get-voices` using the following command from inside the VS Code terminal in the root folder of the functions app project: + + ```sh + func new --name get-voices --template "HTTP trigger" + ``` + + This will create an HTTP trigger called `get-voices`. + +1. Replace the contents of the `__init__.py` file in the `get-voices` folder with the following: + + ```python + import json + import os + import requests + + import azure.functions as func + + def main(req: func.HttpRequest) -> func.HttpResponse: + location = os.environ['SPEECH_LOCATION'] + speech_key = os.environ['SPEECH_KEY'] + + req_body = req.get_json() + language = req_body['language'] + + url = f'https://{location}.tts.speech.microsoft.com/cognitiveservices/voices/list' + + headers = { + 'Ocp-Apim-Subscription-Key': speech_key + } + + response = requests.get(url, headers=headers) + voices_json = json.loads(response.text) + + voices = filter(lambda x: x['Locale'].lower() == language.lower(), voices_json) + voices = map(lambda x: x['ShortName'], voices) + + return func.HttpResponse(json.dumps(list(voices)), status_code=200) + ``` + + This code makes an HTTP request to the endpoint to get the voices. This voices list is a large block of JSON with voices for all languages, so the voices for the language passed in the request body are filtered out, then the shortname is extracted and returned as a JSON list. The shortname is the value needed to convert text to speech, so only this value is returned. + + > 💁 You can change the filter as necessary to select just the voices you want. + + This reduces the size of the data from 77KB (at the time of writing), to a much smaller JSON document. For example, for US voices this is 408 bytes. + +1. Run your function app locally. You can then call this using a tool like curl in the same way that you tested your `text-to-timer` HTTP trigger. Make sure to pass your language as a JSON body: + + ```json + { + "language":"" + } + ``` + + Replace `` with your language, such as `en-GB`, or `zh-CN`. + +> 💁 You can find this code in the [code-spoken-response/functions](code-spoken-response/functions) folder. + +### Task - retrieve the voice from your Wio Terminal + +1. Open the `smart-timer` project in VS Code if it is not already open. + +1. Open the `config.h` header file and add the URL for your function app: + + ```cpp + const char *GET_VOICES_FUNCTION_URL = ""; + ``` + + Replace `` with the URL for the `get-voices` HTTP trigger on your function app. This will be the same as the value for `TEXT_TO_TIMER_FUNCTION_URL`, except with a function name of `get-voices` instead of `text-to-timer`. + +1. Create a new file in the `src` folder called `text_to_speech.h`. This will be used to define a class to convert from text to speech. + +1. Add the following include directives to the top of the new `text_to_speech.h` file: + + ```cpp + #pragma once + + #include + #include + #include + #include + #include + #include + #include + + #include "config.h" + #include "speech_to_text.h" + ``` + +1. Add the following code below this to declare the `TextToSpeech` class, along with an instance that can be used in the rest of the application: + + ```cpp + class TextToSpeech + { + public: + private: + }; + + TextToSpeech textToSpeech; + ``` + +1. To call your functions app, you need to declare a WiFi client. Add the following to the `private` section of the class: + + ```cpp + WiFiClient _client; + ``` + +1. In the `private` section, add a field for the selected voice: + + ```cpp + String _voice; + ``` + +1. To the `public` section, add an `init` function that will get the first voice: + + ```cpp + void init() + { + } + ``` + +1. To get the voices, a JSON document needs to be sent to the function app with the language. Add the following code to the `init` function to create this JSON document: + + ```cpp + DynamicJsonDocument doc(1024); + doc["language"] = LANGUAGE; + + String body; + serializeJson(doc, body); + ``` + +1. Next create an `HTTPClient`, then use it to call the functions app to get the voices, posting the JSON document: + + ```cpp + HTTPClient httpClient; + httpClient.begin(_client, GET_VOICES_FUNCTION_URL); + + int httpResponseCode = httpClient.POST(body); + ``` + +1. Below this add code to check the response code, and if it is 200 (success), then extract the list of voices, retrieving the first one from the list: + + ```cpp + if (httpResponseCode == 200) + { + String result = httpClient.getString(); + Serial.println(result); + + DynamicJsonDocument doc(1024); + deserializeJson(doc, result.c_str()); + + JsonArray obj = doc.as(); + _voice = obj[0].as(); + + Serial.print("Using voice "); + Serial.println(_voice); + } + else + { + Serial.print("Failed to get voices - error "); + Serial.println(httpResponseCode); + } + ``` + +1. After this, end the HTTP client connection: + + ```cpp + httpClient.end(); + ``` + +1. Open the `main.cpp` file, and add the following include directive at the top to include this new header file: + + ```cpp + #include "text_to_speech.h" + ``` + +1. In the `setup` function, underneath the call to `speechToText.init();`, add the following to initialize the `TextToSpeech` class: + + ```cpp + textToSpeech.init(); + ``` + +1. Build this code, upload it to your Wio Terminal and test it out through the serial monitor. Make sure your function app is running. + + You will see the list of available voices returned from the function app, along with the selected voice. + + ```output + --- Available filters and text transformations: colorize, debug, default, direct, hexlify, log2file, nocontrol, printable, send_on_enter, time + --- More details at http://bit.ly/pio-monitor-filters + --- Miniterm on /dev/cu.usbmodem1101 9600,8,N,1 --- + --- Quit: Ctrl+C | Menu: Ctrl+T | Help: Ctrl+T followed by Ctrl+H --- + Connecting to WiFi.. + Connected! + Got access token. + ["en-US-JennyNeural", "en-US-JennyMultilingualNeural", "en-US-GuyNeural", "en-US-AriaNeural", "en-US-AmberNeural", "en-US-AnaNeural", "en-US-AshleyNeural", "en-US-BrandonNeural", "en-US-ChristopherNeural", "en-US-CoraNeural", "en-US-ElizabethNeural", "en-US-EricNeural", "en-US-JacobNeural", "en-US-MichelleNeural", "en-US-MonicaNeural", "en-US-AriaRUS", "en-US-BenjaminRUS", "en-US-GuyRUS", "en-US-ZiraRUS"] + Using voice en-US-JennyNeural + Ready. + ``` + +## Convert text to speech + +Once you have a voice to use, it can be used to convert text to speech. The same memory limitations with voices also apply when converting speech to text, so you will need to write the speech to an SD card ready to be played over the ReSpeaker. + +> 💁 In earlier lessons in this project you used flash memory to store speech captured from the microphone. This lesson uses an SD card as is it easier to play audio from it using the Seeed audio libraries. + +There is also another limitation to consider, the available audio data from the speech service, and the formats that the Wio Terminal supports. Unlike full computers, audio libraries for microcontrollers can be very limited in the audio formats they support. For example, the Seeed Arduino Audio library that can play sound over the ReSpeaker only supports audio at a 44.1KHz sample rate. The Azure speech services can provide audio in a number of formats, but none of them use this sample rate, they only provide 8KHz, 16KHz, 24KHz and 48KHz. This means the audio needs to be re-sampled to 44.1KHz, something that would need more resources that the Wio Terminal has, especially memory. + +When needing to manipulate data like this, it is often better to use serverless code, especially if the data is sourced via a web call. The Wio Terminal can call a serverless function, passing in the text to convert, and the serverless function can both call the speech service to convert text to speech, as well as re-sample the audio to the required sample rate. It can then return the audio in the form the Wio Terminal needs to be stored on the SD card and played over the ReSpeaker. + +### Task - create a serverless function to convert text to speech + +1. Open your `smart-timer-trigger` project in VS Code, and open the terminal ensuring the virtual environment is activated. If not, kill and re-create the terminal. + +1. Add a new HTTP trigger to this app called `text-to-speech` using the following command from inside the VS Code terminal in the root folder of the functions app project: + + ```sh + func new --name text-to-speech --template "HTTP trigger" + ``` + + This will create an HTTP trigger called `text-to-speech`. + +1. The [librosa](https://librosa.org) Pip package has functions to re-sample audio, so add this to the `requirements.txt` file: + + ```sh + librosa + ``` + + Once this has been added, install the Pip packages using the following command from the VS Code terminal: + + ```sh + pip install -r requirements.txt + ``` + + > ⚠️ If you are using Linux, including Raspberry Pi OS, you may need to install `libsndfile` with the following command: + > + > ```sh + > sudo apt update + > sudo apt install libsndfile1-dev + > ``` + +1. To convert text to speech, you cannot use the speech API key directly, instead you need to request an access token, using the API key to authenticate the access token request. Open the `__init__.py` file from the `text-to-speech` folder and replace all the code in it with the following: + + ```python + import io + import os + import requests + + import librosa + import soundfile as sf + import azure.functions as func + + location = os.environ['SPEECH_LOCATION'] + speech_key = os.environ['SPEECH_KEY'] + + def get_access_token(): + headers = { + 'Ocp-Apim-Subscription-Key': speech_key + } + + token_endpoint = f'https://{location}.api.cognitive.microsoft.com/sts/v1.0/issuetoken' + response = requests.post(token_endpoint, headers=headers) + return str(response.text) + ``` + + This defines constants for the location and speech key that will be read from the settings. It then defines the `get_access_token` function that will retrieve an access token for the speech service. + +1. Below this code, add the following: + + ```python + playback_format = 'riff-48khz-16bit-mono-pcm' + + def main(req: func.HttpRequest) -> func.HttpResponse: + req_body = req.get_json() + language = req_body['language'] + voice = req_body['voice'] + text = req_body['text'] + + url = f'https://{location}.tts.speech.microsoft.com/cognitiveservices/v1' + + headers = { + 'Authorization': 'Bearer ' + get_access_token(), + 'Content-Type': 'application/ssml+xml', + 'X-Microsoft-OutputFormat': playback_format + } + + ssml = f'' + ssml += f'' + ssml += text + ssml += '' + ssml += '' + + response = requests.post(url, headers=headers, data=ssml.encode('utf-8')) + + raw_audio, sample_rate = librosa.load(io.BytesIO(response.content), sr=48000) + resampled = librosa.resample(raw_audio, sample_rate, 44100) + + output_buffer = io.BytesIO() + sf.write(output_buffer, resampled, 44100, 'PCM_16', format='wav') + output_buffer.seek(0) + + return func.HttpResponse(output_buffer.read(), status_code=200) + ``` + + This defines the HTTP trigger that converts the text to speech. It extracts the text to convert, the language and the voice from the JSON body set to the request, builds some SSML to request the speech, then calls the relevant REST API authenticating using the access token. This REST API call returns the audio encoded as 16-bit, 48KHz mono WAV file, defined by the value of `playback_format`, which is sent to the REST API call. + + This is then re-sampled by `librosa` from a sample rate of 48KHz to a sample rate of 44.1KHz, then this audio is saved to a binary buffer that is then returned. + +1. Run your function app locally, or deploy it to the cloud. You can then call this using a tool like curl in the same way that you tested your `text-to-timer` HTTP trigger. Make sure to pass the language, voice and text as the JSON body: + + ```json + { + "language": "", + "voice": "", + "text": "" + } + ``` + + Replace `` with your language, such as `en-GB`, or `zh-CN`. Replace `` with the voice you want to use. Replace `` with the text you want to convert to speech. You can save the output to a file and play it with any audio player that can play WAV files. + + For example, to convert "Hello" to speech using US English with the Jenny Neural voice, with the function app running locally, you can use the following curl command: + + ```sh + curl -X GET 'http://localhost:7071/api/text-to-speech' \ + -H 'Content-Type: application/json' \ + -o hello.wav \ + -d '{ + "language":"en-US", + "voice": "en-US-JennyNeural", + "text": "Hello" + }' + ``` + + This will save the audio to `hello.wav` in the current directory. + +> 💁 You can find this code in the [code-spoken-response/functions](code-spoken-response/functions) folder. + +### Task - retrieve the speech from your Wio Terminal + +1. Open the `smart-timer` project in VS Code if it is not already open. + +1. Open the `config.h` header file and add the URL for your function app: + + ```cpp + const char *TEXT_TO_SPEECH_FUNCTION_URL = ""; + ``` + + Replace `` with the URL for the `text-to-speech` HTTP trigger on your function app. This will be the same as the value for `TEXT_TO_TIMER_FUNCTION_URL`, except with a function name of `text-to-speech` instead of `text-to-timer`. + +1. Open the `text_to_speech.h` header file, and add the following method to the `public` section of the `TextToSpeech` class: + + ```cpp + void convertTextToSpeech(String text) + { + } + ``` + +1. To the `convertTextToSpeech` method, add the following code to create the JSON to send to the function app: + + ```cpp + DynamicJsonDocument doc(1024); + doc["language"] = LANGUAGE; + doc["voice"] = _voice; + doc["text"] = text; + + String body; + serializeJson(doc, body); + ``` + + This writes the language, voice and text to the JSON document, then serializes it to a string. + +1. Below this, add the following code to call the function app: + + ```cpp + HTTPClient httpClient; + httpClient.begin(_client, TEXT_TO_SPEECH_FUNCTION_URL); + + int httpResponseCode = httpClient.POST(body); + ``` + + This creates an HTTPClient, then makes a POST request using the JSON document to the text to speech HTTP trigger. + +1. If the call works, the raw binary data returned from the function app call can be streamed to a file on the SD card. Add the following code to do this: + + ```cpp + if (httpResponseCode == 200) + { + File wav_file = SD.open("SPEECH.WAV", FILE_WRITE); + httpClient.writeToStream(&wav_file); + wav_file.close(); + } + else + { + Serial.print("Failed to get speech - error "); + Serial.println(httpResponseCode); + } + ``` + + This code checks the response, and if it is 200 (success), the binary data is streamed to a file in the root of the SD Card called `SPEECH.WAV`. + +1. At the end of this method, close the HTTP connection: + + ```cpp + httpClient.end(); + ``` + +1. The text to be spoken can now be converted to audio. In the `main.cpp` file, add the following line to the end of the `say` function to convert the text to say into audio: + + ```cpp + textToSpeech.convertTextToSpeech(text); + ``` + +### Task - play audio from your Wio Terminal + +**Coming soon** + +## Deploying your functions app to the cloud + +The reason for running the functions app locally is because the `librosa` Pip package on linux has a dependency on a library that is not installed by default, and will need to be installed before the function app can run. Function apps are serverless - there are no servers you can manage yourself, so no way to install this library up front. + +The way to do this is instead to deploy your functions app using a Docker container. This container is deployed by the cloud whenever it needs to spin up a new instance of your function app (such as when the demand exceeds the available resources, or if the function app hasn't been used for a while and is closed down). + +You can find the instructions to set up a function app and deploy via Docker in the [create a function on Linux using a custom container documentation on Microsoft Docs](https://docs.microsoft.com/azure/azure-functions/functions-create-function-linux-custom-image?tabs=bash%2Cazurecli&pivots=programming-language-python&WT.mc_id=academic-17441-jabenn). + +Once this has been deployed, you can port your Wio Terminal code to access this function: + +1. Add the Azure Functions certificate to `config.h`: + + ```cpp + const char *FUNCTIONS_CERTIFICATE = + "-----BEGIN CERTIFICATE-----\r\n" + "MIIFWjCCBEKgAwIBAgIQDxSWXyAgaZlP1ceseIlB4jANBgkqhkiG9w0BAQsFADBa\r\n" + "MQswCQYDVQQGEwJJRTESMBAGA1UEChMJQmFsdGltb3JlMRMwEQYDVQQLEwpDeWJl\r\n" + "clRydXN0MSIwIAYDVQQDExlCYWx0aW1vcmUgQ3liZXJUcnVzdCBSb290MB4XDTIw\r\n" + "MDcyMTIzMDAwMFoXDTI0MTAwODA3MDAwMFowTzELMAkGA1UEBhMCVVMxHjAcBgNV\r\n" + "BAoTFU1pY3Jvc29mdCBDb3Jwb3JhdGlvbjEgMB4GA1UEAxMXTWljcm9zb2Z0IFJT\r\n" + "QSBUTFMgQ0EgMDEwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCqYnfP\r\n" + "mmOyBoTzkDb0mfMUUavqlQo7Rgb9EUEf/lsGWMk4bgj8T0RIzTqk970eouKVuL5R\r\n" + "IMW/snBjXXgMQ8ApzWRJCZbar879BV8rKpHoAW4uGJssnNABf2n17j9TiFy6BWy+\r\n" + "IhVnFILyLNK+W2M3zK9gheiWa2uACKhuvgCca5Vw/OQYErEdG7LBEzFnMzTmJcli\r\n" + "W1iCdXby/vI/OxbfqkKD4zJtm45DJvC9Dh+hpzqvLMiK5uo/+aXSJY+SqhoIEpz+\r\n" + "rErHw+uAlKuHFtEjSeeku8eR3+Z5ND9BSqc6JtLqb0bjOHPm5dSRrgt4nnil75bj\r\n" + "c9j3lWXpBb9PXP9Sp/nPCK+nTQmZwHGjUnqlO9ebAVQD47ZisFonnDAmjrZNVqEX\r\n" + "F3p7laEHrFMxttYuD81BdOzxAbL9Rb/8MeFGQjE2Qx65qgVfhH+RsYuuD9dUw/3w\r\n" + "ZAhq05yO6nk07AM9c+AbNtRoEcdZcLCHfMDcbkXKNs5DJncCqXAN6LhXVERCw/us\r\n" + "G2MmCMLSIx9/kwt8bwhUmitOXc6fpT7SmFvRAtvxg84wUkg4Y/Gx++0j0z6StSeN\r\n" + "0EJz150jaHG6WV4HUqaWTb98Tm90IgXAU4AW2GBOlzFPiU5IY9jt+eXC2Q6yC/Zp\r\n" + "TL1LAcnL3Qa/OgLrHN0wiw1KFGD51WRPQ0Sh7QIDAQABo4IBJTCCASEwHQYDVR0O\r\n" + "BBYEFLV2DDARzseSQk1Mx1wsyKkM6AtkMB8GA1UdIwQYMBaAFOWdWTCCR1jMrPoI\r\n" + "VDaGezq1BE3wMA4GA1UdDwEB/wQEAwIBhjAdBgNVHSUEFjAUBggrBgEFBQcDAQYI\r\n" + "KwYBBQUHAwIwEgYDVR0TAQH/BAgwBgEB/wIBADA0BggrBgEFBQcBAQQoMCYwJAYI\r\n" + "KwYBBQUHMAGGGGh0dHA6Ly9vY3NwLmRpZ2ljZXJ0LmNvbTA6BgNVHR8EMzAxMC+g\r\n" + "LaArhilodHRwOi8vY3JsMy5kaWdpY2VydC5jb20vT21uaXJvb3QyMDI1LmNybDAq\r\n" + "BgNVHSAEIzAhMAgGBmeBDAECATAIBgZngQwBAgIwCwYJKwYBBAGCNyoBMA0GCSqG\r\n" + "SIb3DQEBCwUAA4IBAQCfK76SZ1vae4qt6P+dTQUO7bYNFUHR5hXcA2D59CJWnEj5\r\n" + "na7aKzyowKvQupW4yMH9fGNxtsh6iJswRqOOfZYC4/giBO/gNsBvwr8uDW7t1nYo\r\n" + "DYGHPpvnpxCM2mYfQFHq576/TmeYu1RZY29C4w8xYBlkAA8mDJfRhMCmehk7cN5F\r\n" + "JtyWRj2cZj/hOoI45TYDBChXpOlLZKIYiG1giY16vhCRi6zmPzEwv+tk156N6cGS\r\n" + "Vm44jTQ/rs1sa0JSYjzUaYngoFdZC4OfxnIkQvUIA4TOFmPzNPEFdjcZsgbeEz4T\r\n" + "cGHTBPK4R28F44qIMCtHRV55VMX53ev6P3hRddJb\r\n" + "-----END CERTIFICATE-----\r\n"; + ``` + +1. Change all includes of `` to ``. + +1. Change all `WiFiClient` fields to `WiFiClientSecure`. + +1. In every class that has a `WiFiClientSecure` field, add a constructor and set the certificate in that constructor: + + ```cpp + _client.setCACert(FUNCTIONS_CERTIFICATE); + ``` diff --git a/6-consumer/lessons/4-multiple-language-support/code/functions/smart-timer-trigger/get-voices/__init__.py b/6-consumer/lessons/4-multiple-language-support/code/functions/smart-timer-trigger/get-voices/__init__.py new file mode 100644 index 00000000..44c81900 --- /dev/null +++ b/6-consumer/lessons/4-multiple-language-support/code/functions/smart-timer-trigger/get-voices/__init__.py @@ -0,0 +1,26 @@ +import json +import os +import requests + +import azure.functions as func + +def main(req: func.HttpRequest) -> func.HttpResponse: + location = os.environ['SPEECH_LOCATION'] + speech_key = os.environ['SPEECH_KEY'] + + req_body = req.get_json() + language = req_body['language'] + + url = f'https://{location}.tts.speech.microsoft.com/cognitiveservices/voices/list' + + headers = { + 'Ocp-Apim-Subscription-Key': speech_key + } + + response = requests.get(url, headers=headers) + voices_json = json.loads(response.text) + + voices = filter(lambda x: x['Locale'].lower() == language.lower(), voices_json) + voices = map(lambda x: x['ShortName'], voices) + + return func.HttpResponse(json.dumps(list(voices)), status_code=200) diff --git a/6-consumer/lessons/4-multiple-language-support/code/functions/smart-timer-trigger/get-voices/function.json b/6-consumer/lessons/4-multiple-language-support/code/functions/smart-timer-trigger/get-voices/function.json new file mode 100644 index 00000000..d9019652 --- /dev/null +++ b/6-consumer/lessons/4-multiple-language-support/code/functions/smart-timer-trigger/get-voices/function.json @@ -0,0 +1,20 @@ +{ + "scriptFile": "__init__.py", + "bindings": [ + { + "authLevel": "function", + "type": "httpTrigger", + "direction": "in", + "name": "req", + "methods": [ + "get", + "post" + ] + }, + { + "type": "http", + "direction": "out", + "name": "$return" + } + ] +} \ No newline at end of file diff --git a/6-consumer/lessons/4-multiple-language-support/code/functions/smart-timer-trigger/host.json b/6-consumer/lessons/4-multiple-language-support/code/functions/smart-timer-trigger/host.json new file mode 100644 index 00000000..291065f8 --- /dev/null +++ b/6-consumer/lessons/4-multiple-language-support/code/functions/smart-timer-trigger/host.json @@ -0,0 +1,15 @@ +{ + "version": "2.0", + "logging": { + "applicationInsights": { + "samplingSettings": { + "isEnabled": true, + "excludedTypes": "Request" + } + } + }, + "extensionBundle": { + "id": "Microsoft.Azure.Functions.ExtensionBundle", + "version": "[2.*, 3.0.0)" + } +} \ No newline at end of file diff --git a/6-consumer/lessons/4-multiple-language-support/code/functions/smart-timer-trigger/local.settings.json b/6-consumer/lessons/4-multiple-language-support/code/functions/smart-timer-trigger/local.settings.json new file mode 100644 index 00000000..a88a77ff --- /dev/null +++ b/6-consumer/lessons/4-multiple-language-support/code/functions/smart-timer-trigger/local.settings.json @@ -0,0 +1,14 @@ +{ + "IsEncrypted": false, + "Values": { + "FUNCTIONS_WORKER_RUNTIME": "python", + "AzureWebJobsStorage": "", + "LUIS_KEY": "", + "LUIS_ENDPOINT_URL": "", + "LUIS_APP_ID": "", + "SPEECH_KEY": "", + "SPEECH_LOCATION": "", + "TRANSLATOR_KEY": "", + "TRANSLATOR_LOCATION": "" + } +} \ No newline at end of file diff --git a/6-consumer/lessons/4-multiple-language-support/code/functions/smart-timer-trigger/requirements.txt b/6-consumer/lessons/4-multiple-language-support/code/functions/smart-timer-trigger/requirements.txt new file mode 100644 index 00000000..a2596be3 --- /dev/null +++ b/6-consumer/lessons/4-multiple-language-support/code/functions/smart-timer-trigger/requirements.txt @@ -0,0 +1,5 @@ +# Do not include azure-functions-worker as it may conflict with the Azure Functions platform + +azure-functions +azure-cognitiveservices-language-luis +librosa \ No newline at end of file diff --git a/6-consumer/lessons/4-multiple-language-support/code/functions/smart-timer-trigger/text-to-speech/__init__.py b/6-consumer/lessons/4-multiple-language-support/code/functions/smart-timer-trigger/text-to-speech/__init__.py new file mode 100644 index 00000000..f09404f3 --- /dev/null +++ b/6-consumer/lessons/4-multiple-language-support/code/functions/smart-timer-trigger/text-to-speech/__init__.py @@ -0,0 +1,52 @@ +import io +import os +import requests + +import librosa +import soundfile as sf +import azure.functions as func + +location = os.environ['SPEECH_LOCATION'] +speech_key = os.environ['SPEECH_KEY'] + +def get_access_token(): + headers = { + 'Ocp-Apim-Subscription-Key': speech_key + } + + token_endpoint = f'https://{location}.api.cognitive.microsoft.com/sts/v1.0/issuetoken' + response = requests.post(token_endpoint, headers=headers) + return str(response.text) + +playback_format = 'riff-48khz-16bit-mono-pcm' + +def main(req: func.HttpRequest) -> func.HttpResponse: + req_body = req.get_json() + language = req_body['language'] + voice = req_body['voice'] + text = req_body['text'] + + url = f'https://{location}.tts.speech.microsoft.com/cognitiveservices/v1' + + headers = { + 'Authorization': 'Bearer ' + get_access_token(), + 'Content-Type': 'application/ssml+xml', + 'X-Microsoft-OutputFormat': playback_format + } + + ssml = f'' + ssml += f'' + ssml += text + ssml += '' + ssml += '' + + response = requests.post(url, headers=headers, data=ssml.encode('utf-8')) + + raw_audio, sample_rate = librosa.load(io.BytesIO(response.content), sr=48000) + resampled = librosa.resample(raw_audio, sample_rate, 44100) + + output_buffer = io.BytesIO() + sf.write(output_buffer, resampled, 44100, 'PCM_16', format='wav') + output_buffer.seek(0) + + return func.HttpResponse(output_buffer.read(), status_code=200) diff --git a/6-consumer/lessons/4-multiple-language-support/code/functions/smart-timer-trigger/text-to-speech/function.json b/6-consumer/lessons/4-multiple-language-support/code/functions/smart-timer-trigger/text-to-speech/function.json new file mode 100644 index 00000000..d9019652 --- /dev/null +++ b/6-consumer/lessons/4-multiple-language-support/code/functions/smart-timer-trigger/text-to-speech/function.json @@ -0,0 +1,20 @@ +{ + "scriptFile": "__init__.py", + "bindings": [ + { + "authLevel": "function", + "type": "httpTrigger", + "direction": "in", + "name": "req", + "methods": [ + "get", + "post" + ] + }, + { + "type": "http", + "direction": "out", + "name": "$return" + } + ] +} \ No newline at end of file diff --git a/6-consumer/lessons/4-multiple-language-support/code/functions/smart-timer-trigger/text-to-timer/__init__.py b/6-consumer/lessons/4-multiple-language-support/code/functions/smart-timer-trigger/text-to-timer/__init__.py new file mode 100644 index 00000000..d15d6e68 --- /dev/null +++ b/6-consumer/lessons/4-multiple-language-support/code/functions/smart-timer-trigger/text-to-timer/__init__.py @@ -0,0 +1,46 @@ +import logging + +import azure.functions as func +import json +import os +from azure.cognitiveservices.language.luis.runtime import LUISRuntimeClient +from msrest.authentication import CognitiveServicesCredentials + + +def main(req: func.HttpRequest) -> func.HttpResponse: + luis_key = os.environ['LUIS_KEY'] + endpoint_url = os.environ['LUIS_ENDPOINT_URL'] + app_id = os.environ['LUIS_APP_ID'] + + credentials = CognitiveServicesCredentials(luis_key) + client = LUISRuntimeClient(endpoint=endpoint_url, credentials=credentials) + + req_body = req.get_json() + text = req_body['text'] + logging.info(f'Request - {text}') + prediction_request = { 'query' : text } + + prediction_response = client.prediction.get_slot_prediction(app_id, 'Staging', prediction_request) + + if prediction_response.prediction.top_intent == 'set timer': + numbers = prediction_response.prediction.entities['number'] + time_units = prediction_response.prediction.entities['time unit'] + total_seconds = 0 + + for i in range(0, len(numbers)): + number = numbers[i] + time_unit = time_units[i][0] + + if time_unit == 'minute': + total_seconds += number * 60 + else: + total_seconds += number + + logging.info(f'Timer required for {total_seconds} seconds') + + payload = { + 'seconds': total_seconds + } + return func.HttpResponse(json.dumps(payload), status_code=200) + + return func.HttpResponse(status_code=404) \ No newline at end of file diff --git a/6-consumer/lessons/4-multiple-language-support/code/functions/smart-timer-trigger/text-to-timer/function.json b/6-consumer/lessons/4-multiple-language-support/code/functions/smart-timer-trigger/text-to-timer/function.json new file mode 100644 index 00000000..d9019652 --- /dev/null +++ b/6-consumer/lessons/4-multiple-language-support/code/functions/smart-timer-trigger/text-to-timer/function.json @@ -0,0 +1,20 @@ +{ + "scriptFile": "__init__.py", + "bindings": [ + { + "authLevel": "function", + "type": "httpTrigger", + "direction": "in", + "name": "req", + "methods": [ + "get", + "post" + ] + }, + { + "type": "http", + "direction": "out", + "name": "$return" + } + ] +} \ No newline at end of file diff --git a/6-consumer/lessons/4-multiple-language-support/code/functions/smart-timer-trigger/translate-text/__init__.py b/6-consumer/lessons/4-multiple-language-support/code/functions/smart-timer-trigger/translate-text/__init__.py new file mode 100644 index 00000000..850200a6 --- /dev/null +++ b/6-consumer/lessons/4-multiple-language-support/code/functions/smart-timer-trigger/translate-text/__init__.py @@ -0,0 +1,36 @@ +import logging +import os +import requests + +import azure.functions as func + +location = os.environ['TRANSLATOR_LOCATION'] +translator_key = os.environ['TRANSLATOR_KEY'] + +def main(req: func.HttpRequest) -> func.HttpResponse: + req_body = req.get_json() + from_language = req_body['from_language'] + to_language = req_body['to_language'] + text = req_body['text'] + + logging.info(f'Translating {text} from {from_language} to {to_language}') + + url = f'https://api.cognitive.microsofttranslator.com/translate?api-version=3.0' + + headers = { + 'Ocp-Apim-Subscription-Key': translator_key, + 'Ocp-Apim-Subscription-Region': location, + 'Content-type': 'application/json' + } + + params = { + 'from': from_language, + 'to': to_language + } + + body = [{ + 'text' : text + }] + + response = requests.post(url, headers=headers, params=params, json=body) + return func.HttpResponse(response.json()[0]['translations'][0]['text']) diff --git a/6-consumer/lessons/4-multiple-language-support/code/functions/smart-timer-trigger/translate-text/__pycache__/__init__.cpython-39.pyc b/6-consumer/lessons/4-multiple-language-support/code/functions/smart-timer-trigger/translate-text/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 00000000..1e106428 Binary files /dev/null and b/6-consumer/lessons/4-multiple-language-support/code/functions/smart-timer-trigger/translate-text/__pycache__/__init__.cpython-39.pyc differ diff --git a/6-consumer/lessons/4-multiple-language-support/code/functions/smart-timer-trigger/translate-text/function.json b/6-consumer/lessons/4-multiple-language-support/code/functions/smart-timer-trigger/translate-text/function.json new file mode 100644 index 00000000..d9019652 --- /dev/null +++ b/6-consumer/lessons/4-multiple-language-support/code/functions/smart-timer-trigger/translate-text/function.json @@ -0,0 +1,20 @@ +{ + "scriptFile": "__init__.py", + "bindings": [ + { + "authLevel": "function", + "type": "httpTrigger", + "direction": "in", + "name": "req", + "methods": [ + "get", + "post" + ] + }, + { + "type": "http", + "direction": "out", + "name": "$return" + } + ] +} \ No newline at end of file diff --git a/6-consumer/lessons/4-multiple-language-support/code/wio-terminal/smart-timer/include/README b/6-consumer/lessons/4-multiple-language-support/code/wio-terminal/smart-timer/include/README new file mode 100644 index 00000000..194dcd43 --- /dev/null +++ b/6-consumer/lessons/4-multiple-language-support/code/wio-terminal/smart-timer/include/README @@ -0,0 +1,39 @@ + +This directory is intended for project header files. + +A header file is a file containing C declarations and macro definitions +to be shared between several project source files. You request the use of a +header file in your project source file (C, C++, etc) located in `src` folder +by including it, with the C preprocessing directive `#include'. + +```src/main.c + +#include "header.h" + +int main (void) +{ + ... +} +``` + +Including a header file produces the same results as copying the header file +into each source file that needs it. Such copying would be time-consuming +and error-prone. With a header file, the related declarations appear +in only one place. If they need to be changed, they can be changed in one +place, and programs that include the header file will automatically use the +new version when next recompiled. The header file eliminates the labor of +finding and changing all the copies as well as the risk that a failure to +find one copy will result in inconsistencies within a program. + +In C, the usual convention is to give header files names that end with `.h'. +It is most portable to use only letters, digits, dashes, and underscores in +header file names, and at most one dot. + +Read more about using header files in official GCC documentation: + +* Include Syntax +* Include Operation +* Once-Only Headers +* Computed Includes + +https://gcc.gnu.org/onlinedocs/cpp/Header-Files.html diff --git a/6-consumer/lessons/4-multiple-language-support/code/wio-terminal/smart-timer/lib/README b/6-consumer/lessons/4-multiple-language-support/code/wio-terminal/smart-timer/lib/README new file mode 100644 index 00000000..6debab1e --- /dev/null +++ b/6-consumer/lessons/4-multiple-language-support/code/wio-terminal/smart-timer/lib/README @@ -0,0 +1,46 @@ + +This directory is intended for project specific (private) libraries. +PlatformIO will compile them to static libraries and link into executable file. + +The source code of each library should be placed in a an own separate directory +("lib/your_library_name/[here are source files]"). + +For example, see a structure of the following two libraries `Foo` and `Bar`: + +|--lib +| | +| |--Bar +| | |--docs +| | |--examples +| | |--src +| | |- Bar.c +| | |- Bar.h +| | |- library.json (optional, custom build options, etc) https://docs.platformio.org/page/librarymanager/config.html +| | +| |--Foo +| | |- Foo.c +| | |- Foo.h +| | +| |- README --> THIS FILE +| +|- platformio.ini +|--src + |- main.c + +and a contents of `src/main.c`: +``` +#include +#include + +int main (void) +{ + ... +} + +``` + +PlatformIO Library Dependency Finder will find automatically dependent +libraries scanning project source files. + +More information about PlatformIO Library Dependency Finder +- https://docs.platformio.org/page/librarymanager/ldf.html diff --git a/6-consumer/lessons/4-multiple-language-support/code/wio-terminal/smart-timer/platformio.ini b/6-consumer/lessons/4-multiple-language-support/code/wio-terminal/smart-timer/platformio.ini new file mode 100644 index 00000000..8836ab42 --- /dev/null +++ b/6-consumer/lessons/4-multiple-language-support/code/wio-terminal/smart-timer/platformio.ini @@ -0,0 +1,23 @@ +; PlatformIO Project Configuration File +; +; Build options: build flags, source filter +; Upload options: custom upload port, speed and extra flags +; Library options: dependencies, extra library storages +; Advanced options: extra scripting +; +; Please visit documentation for the other options and examples +; https://docs.platformio.org/page/projectconf.html + +[env:seeed_wio_terminal] +platform = atmelsam +board = seeed_wio_terminal +framework = arduino +lib_deps = + seeed-studio/Seeed Arduino FS @ 2.1.1 + seeed-studio/Seeed Arduino SFUD @ 2.0.2 + seeed-studio/Seeed Arduino rpcWiFi @ 1.0.5 + seeed-studio/Seeed Arduino rpcUnified @ 2.1.3 + seeed-studio/Seeed_Arduino_mbedtls @ 3.0.1 + seeed-studio/Seeed Arduino RTC @ 2.0.0 + bblanchon/ArduinoJson @ 6.17.3 + contrem/arduino-timer @ 2.3.0 diff --git a/6-consumer/lessons/4-multiple-language-support/code/wio-terminal/smart-timer/src/config.h b/6-consumer/lessons/4-multiple-language-support/code/wio-terminal/smart-timer/src/config.h new file mode 100644 index 00000000..9822eb2e --- /dev/null +++ b/6-consumer/lessons/4-multiple-language-support/code/wio-terminal/smart-timer/src/config.h @@ -0,0 +1,95 @@ +#pragma once + +#define RATE 16000 +#define SAMPLE_LENGTH_SECONDS 4 +#define SAMPLES RATE * SAMPLE_LENGTH_SECONDS +#define BUFFER_SIZE (SAMPLES * 2) + 44 +#define ADC_BUF_LEN 1600 + +const char *SSID = ""; +const char *PASSWORD = ""; + +const char *SPEECH_API_KEY = ""; +const char *SPEECH_LOCATION = ""; +const char *LANGUAGE = ""; +const char *SERVER_LANGUAGE = ""; + +const char *TOKEN_URL = "https://%s.api.cognitive.microsoft.com/sts/v1.0/issuetoken"; +const char *SPEECH_URL = "https://%s.stt.speech.microsoft.com/speech/recognition/conversation/cognitiveservices/v1?language=%s"; + +const char *TEXT_TO_TIMER_FUNCTION_URL = "http://:7071/api/text-to-timer"; +const char *GET_VOICES_FUNCTION_URL = "http://:7071/api/get-voices"; +const char *TEXT_TO_SPEECH_FUNCTION_URL = "http://:7071/api/text-to-speech"; +const char *TRANSLATE_FUNCTION_URL = "http://:7071/api/translate-text"; + +const char *TOKEN_CERTIFICATE = + "-----BEGIN CERTIFICATE-----\r\n" + "MIIF8zCCBNugAwIBAgIQAueRcfuAIek/4tmDg0xQwDANBgkqhkiG9w0BAQwFADBh\r\n" + "MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3\r\n" + "d3cuZGlnaWNlcnQuY29tMSAwHgYDVQQDExdEaWdpQ2VydCBHbG9iYWwgUm9vdCBH\r\n" + "MjAeFw0yMDA3MjkxMjMwMDBaFw0yNDA2MjcyMzU5NTlaMFkxCzAJBgNVBAYTAlVT\r\n" + "MR4wHAYDVQQKExVNaWNyb3NvZnQgQ29ycG9yYXRpb24xKjAoBgNVBAMTIU1pY3Jv\r\n" + "c29mdCBBenVyZSBUTFMgSXNzdWluZyBDQSAwNjCCAiIwDQYJKoZIhvcNAQEBBQAD\r\n" + "ggIPADCCAgoCggIBALVGARl56bx3KBUSGuPc4H5uoNFkFH4e7pvTCxRi4j/+z+Xb\r\n" + "wjEz+5CipDOqjx9/jWjskL5dk7PaQkzItidsAAnDCW1leZBOIi68Lff1bjTeZgMY\r\n" + "iwdRd3Y39b/lcGpiuP2d23W95YHkMMT8IlWosYIX0f4kYb62rphyfnAjYb/4Od99\r\n" + "ThnhlAxGtfvSbXcBVIKCYfZgqRvV+5lReUnd1aNjRYVzPOoifgSx2fRyy1+pO1Uz\r\n" + "aMMNnIOE71bVYW0A1hr19w7kOb0KkJXoALTDDj1ukUEDqQuBfBxReL5mXiu1O7WG\r\n" + "0vltg0VZ/SZzctBsdBlx1BkmWYBW261KZgBivrql5ELTKKd8qgtHcLQA5fl6JB0Q\r\n" + "gs5XDaWehN86Gps5JW8ArjGtjcWAIP+X8CQaWfaCnuRm6Bk/03PQWhgdi84qwA0s\r\n" + "sRfFJwHUPTNSnE8EiGVk2frt0u8PG1pwSQsFuNJfcYIHEv1vOzP7uEOuDydsmCjh\r\n" + "lxuoK2n5/2aVR3BMTu+p4+gl8alXoBycyLmj3J/PUgqD8SL5fTCUegGsdia/Sa60\r\n" + "N2oV7vQ17wjMN+LXa2rjj/b4ZlZgXVojDmAjDwIRdDUujQu0RVsJqFLMzSIHpp2C\r\n" + "Zp7mIoLrySay2YYBu7SiNwL95X6He2kS8eefBBHjzwW/9FxGqry57i71c2cDAgMB\r\n" + "AAGjggGtMIIBqTAdBgNVHQ4EFgQU1cFnOsKjnfR3UltZEjgp5lVou6UwHwYDVR0j\r\n" + "BBgwFoAUTiJUIBiV5uNu5g/6+rkS7QYXjzkwDgYDVR0PAQH/BAQDAgGGMB0GA1Ud\r\n" + "JQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjASBgNVHRMBAf8ECDAGAQH/AgEAMHYG\r\n" + "CCsGAQUFBwEBBGowaDAkBggrBgEFBQcwAYYYaHR0cDovL29jc3AuZGlnaWNlcnQu\r\n" + "Y29tMEAGCCsGAQUFBzAChjRodHRwOi8vY2FjZXJ0cy5kaWdpY2VydC5jb20vRGln\r\n" + "aUNlcnRHbG9iYWxSb290RzIuY3J0MHsGA1UdHwR0MHIwN6A1oDOGMWh0dHA6Ly9j\r\n" + "cmwzLmRpZ2ljZXJ0LmNvbS9EaWdpQ2VydEdsb2JhbFJvb3RHMi5jcmwwN6A1oDOG\r\n" + "MWh0dHA6Ly9jcmw0LmRpZ2ljZXJ0LmNvbS9EaWdpQ2VydEdsb2JhbFJvb3RHMi5j\r\n" + "cmwwHQYDVR0gBBYwFDAIBgZngQwBAgEwCAYGZ4EMAQICMBAGCSsGAQQBgjcVAQQD\r\n" + "AgEAMA0GCSqGSIb3DQEBDAUAA4IBAQB2oWc93fB8esci/8esixj++N22meiGDjgF\r\n" + "+rA2LUK5IOQOgcUSTGKSqF9lYfAxPjrqPjDCUPHCURv+26ad5P/BYtXtbmtxJWu+\r\n" + "cS5BhMDPPeG3oPZwXRHBJFAkY4O4AF7RIAAUW6EzDflUoDHKv83zOiPfYGcpHc9s\r\n" + "kxAInCedk7QSgXvMARjjOqdakor21DTmNIUotxo8kHv5hwRlGhBJwps6fEVi1Bt0\r\n" + "trpM/3wYxlr473WSPUFZPgP1j519kLpWOJ8z09wxay+Br29irPcBYv0GMXlHqThy\r\n" + "8y4m/HyTQeI2IMvMrQnwqPpY+rLIXyviI2vLoI+4xKE4Rn38ZZ8m\r\n" + "-----END CERTIFICATE-----\r\n"; + +const char *SPEECH_CERTIFICATE = + "-----BEGIN CERTIFICATE-----\r\n" + "MIIF8zCCBNugAwIBAgIQCq+mxcpjxFFB6jvh98dTFzANBgkqhkiG9w0BAQwFADBh\r\n" + "MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3\r\n" + "d3cuZGlnaWNlcnQuY29tMSAwHgYDVQQDExdEaWdpQ2VydCBHbG9iYWwgUm9vdCBH\r\n" + "MjAeFw0yMDA3MjkxMjMwMDBaFw0yNDA2MjcyMzU5NTlaMFkxCzAJBgNVBAYTAlVT\r\n" + "MR4wHAYDVQQKExVNaWNyb3NvZnQgQ29ycG9yYXRpb24xKjAoBgNVBAMTIU1pY3Jv\r\n" + "c29mdCBBenVyZSBUTFMgSXNzdWluZyBDQSAwMTCCAiIwDQYJKoZIhvcNAQEBBQAD\r\n" + "ggIPADCCAgoCggIBAMedcDrkXufP7pxVm1FHLDNA9IjwHaMoaY8arqqZ4Gff4xyr\r\n" + "RygnavXL7g12MPAx8Q6Dd9hfBzrfWxkF0Br2wIvlvkzW01naNVSkHp+OS3hL3W6n\r\n" + "l/jYvZnVeJXjtsKYcXIf/6WtspcF5awlQ9LZJcjwaH7KoZuK+THpXCMtzD8XNVdm\r\n" + "GW/JI0C/7U/E7evXn9XDio8SYkGSM63aLO5BtLCv092+1d4GGBSQYolRq+7Pd1kR\r\n" + "EkWBPm0ywZ2Vb8GIS5DLrjelEkBnKCyy3B0yQud9dpVsiUeE7F5sY8Me96WVxQcb\r\n" + "OyYdEY/j/9UpDlOG+vA+YgOvBhkKEjiqygVpP8EZoMMijephzg43b5Qi9r5UrvYo\r\n" + "o19oR/8pf4HJNDPF0/FJwFVMW8PmCBLGstin3NE1+NeWTkGt0TzpHjgKyfaDP2tO\r\n" + "4bCk1G7pP2kDFT7SYfc8xbgCkFQ2UCEXsaH/f5YmpLn4YPiNFCeeIida7xnfTvc4\r\n" + "7IxyVccHHq1FzGygOqemrxEETKh8hvDR6eBdrBwmCHVgZrnAqnn93JtGyPLi6+cj\r\n" + "WGVGtMZHwzVvX1HvSFG771sskcEjJxiQNQDQRWHEh3NxvNb7kFlAXnVdRkkvhjpR\r\n" + "GchFhTAzqmwltdWhWDEyCMKC2x/mSZvZtlZGY+g37Y72qHzidwtyW7rBetZJAgMB\r\n" + "AAGjggGtMIIBqTAdBgNVHQ4EFgQUDyBd16FXlduSzyvQx8J3BM5ygHYwHwYDVR0j\r\n" + "BBgwFoAUTiJUIBiV5uNu5g/6+rkS7QYXjzkwDgYDVR0PAQH/BAQDAgGGMB0GA1Ud\r\n" + "JQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjASBgNVHRMBAf8ECDAGAQH/AgEAMHYG\r\n" + "CCsGAQUFBwEBBGowaDAkBggrBgEFBQcwAYYYaHR0cDovL29jc3AuZGlnaWNlcnQu\r\n" + "Y29tMEAGCCsGAQUFBzAChjRodHRwOi8vY2FjZXJ0cy5kaWdpY2VydC5jb20vRGln\r\n" + "aUNlcnRHbG9iYWxSb290RzIuY3J0MHsGA1UdHwR0MHIwN6A1oDOGMWh0dHA6Ly9j\r\n" + "cmwzLmRpZ2ljZXJ0LmNvbS9EaWdpQ2VydEdsb2JhbFJvb3RHMi5jcmwwN6A1oDOG\r\n" + "MWh0dHA6Ly9jcmw0LmRpZ2ljZXJ0LmNvbS9EaWdpQ2VydEdsb2JhbFJvb3RHMi5j\r\n" + "cmwwHQYDVR0gBBYwFDAIBgZngQwBAgEwCAYGZ4EMAQICMBAGCSsGAQQBgjcVAQQD\r\n" + "AgEAMA0GCSqGSIb3DQEBDAUAA4IBAQAlFvNh7QgXVLAZSsNR2XRmIn9iS8OHFCBA\r\n" + "WxKJoi8YYQafpMTkMqeuzoL3HWb1pYEipsDkhiMnrpfeYZEA7Lz7yqEEtfgHcEBs\r\n" + "K9KcStQGGZRfmWU07hPXHnFz+5gTXqzCE2PBMlRgVUYJiA25mJPXfB00gDvGhtYa\r\n" + "+mENwM9Bq1B9YYLyLjRtUz8cyGsdyTIG/bBM/Q9jcV8JGqMU/UjAdh1pFyTnnHEl\r\n" + "Y59Npi7F87ZqYYJEHJM2LGD+le8VsHjgeWX2CJQko7klXvcizuZvUEDTjHaQcs2J\r\n" + "+kPgfyMIOY1DMJ21NxOJ2xPRC/wAh/hzSBRVtoAnyuxtkZ4VjIOh\r\n" + "-----END CERTIFICATE-----\r\n"; diff --git a/6-consumer/lessons/4-multiple-language-support/code/wio-terminal/smart-timer/src/flash_stream.h b/6-consumer/lessons/4-multiple-language-support/code/wio-terminal/smart-timer/src/flash_stream.h new file mode 100644 index 00000000..b841f1d0 --- /dev/null +++ b/6-consumer/lessons/4-multiple-language-support/code/wio-terminal/smart-timer/src/flash_stream.h @@ -0,0 +1,69 @@ +#pragma once + +#include +#include +#include + +#include "config.h" + +class FlashStream : public Stream +{ +public: + FlashStream() + { + _pos = 0; + _flash_address = 0; + _flash = sfud_get_device_table() + 0; + + populateBuffer(); + } + + virtual size_t write(uint8_t val) + { + return 0; + } + + virtual int available() + { + int remaining = BUFFER_SIZE - ((_flash_address - HTTP_TCP_BUFFER_SIZE) + _pos); + int bytes_available = min(HTTP_TCP_BUFFER_SIZE, remaining); + + if (bytes_available == 0) + { + bytes_available = -1; + } + + return bytes_available; + } + + virtual int read() + { + int retVal = _buffer[_pos++]; + + if (_pos == HTTP_TCP_BUFFER_SIZE) + { + populateBuffer(); + } + + return retVal; + } + + virtual int peek() + { + return _buffer[_pos]; + } + +private: + void populateBuffer() + { + sfud_read(_flash, _flash_address, HTTP_TCP_BUFFER_SIZE, _buffer); + _flash_address += HTTP_TCP_BUFFER_SIZE; + _pos = 0; + } + + size_t _pos; + size_t _flash_address; + const sfud_flash *_flash; + + byte _buffer[HTTP_TCP_BUFFER_SIZE]; +}; diff --git a/6-consumer/lessons/4-multiple-language-support/code/wio-terminal/smart-timer/src/flash_writer.h b/6-consumer/lessons/4-multiple-language-support/code/wio-terminal/smart-timer/src/flash_writer.h new file mode 100644 index 00000000..87fdff29 --- /dev/null +++ b/6-consumer/lessons/4-multiple-language-support/code/wio-terminal/smart-timer/src/flash_writer.h @@ -0,0 +1,60 @@ +#pragma once + +#include +#include + +class FlashWriter +{ +public: + void init() + { + _flash = sfud_get_device_table() + 0; + _sfudBufferSize = _flash->chip.erase_gran; + _sfudBuffer = new byte[_sfudBufferSize]; + _sfudBufferPos = 0; + _sfudBufferWritePos = 0; + } + + void reset() + { + _sfudBufferPos = 0; + _sfudBufferWritePos = 0; + } + + void writeSfudBuffer(byte b) + { + _sfudBuffer[_sfudBufferPos++] = b; + if (_sfudBufferPos == _sfudBufferSize) + { + sfud_erase_write(_flash, _sfudBufferWritePos, _sfudBufferSize, _sfudBuffer); + _sfudBufferWritePos += _sfudBufferSize; + _sfudBufferPos = 0; + } + } + + void flushSfudBuffer() + { + if (_sfudBufferPos > 0) + { + sfud_erase_write(_flash, _sfudBufferWritePos, _sfudBufferSize, _sfudBuffer); + _sfudBufferWritePos += _sfudBufferSize; + _sfudBufferPos = 0; + } + } + + void writeSfudBuffer(byte *b, size_t len) + { + for (size_t i = 0; i < len; ++i) + { + writeSfudBuffer(b[i]); + } + } + +private: + byte *_sfudBuffer; + size_t _sfudBufferSize; + size_t _sfudBufferPos; + size_t _sfudBufferWritePos; + + const sfud_flash *_flash; +}; \ No newline at end of file diff --git a/6-consumer/lessons/4-multiple-language-support/code/wio-terminal/smart-timer/src/language_understanding.h b/6-consumer/lessons/4-multiple-language-support/code/wio-terminal/smart-timer/src/language_understanding.h new file mode 100644 index 00000000..1c8d8653 --- /dev/null +++ b/6-consumer/lessons/4-multiple-language-support/code/wio-terminal/smart-timer/src/language_understanding.h @@ -0,0 +1,53 @@ +#pragma once + +#include +#include +#include +#include + +#include "config.h" + +class LanguageUnderstanding +{ +public: + int GetTimerDuration(String text) + { + DynamicJsonDocument doc(1024); + doc["text"] = text; + + String body; + serializeJson(doc, body); + + HTTPClient httpClient; + httpClient.begin(_client, TEXT_TO_TIMER_FUNCTION_URL); + + int httpResponseCode = httpClient.POST(body); + + int seconds = 0; + if (httpResponseCode == 200) + { + String result = httpClient.getString(); + Serial.println(result); + + DynamicJsonDocument doc(1024); + deserializeJson(doc, result.c_str()); + + JsonObject obj = doc.as(); + seconds = obj["seconds"].as(); + } + else + { + Serial.print("Failed to understand text - error "); + Serial.println(httpResponseCode); + } + + httpClient.end(); + + return seconds; + } + +private: + WiFiClient _client; +}; + +LanguageUnderstanding languageUnderstanding; \ No newline at end of file diff --git a/6-consumer/lessons/4-multiple-language-support/code/wio-terminal/smart-timer/src/main.cpp b/6-consumer/lessons/4-multiple-language-support/code/wio-terminal/smart-timer/src/main.cpp new file mode 100644 index 00000000..3d2c6182 --- /dev/null +++ b/6-consumer/lessons/4-multiple-language-support/code/wio-terminal/smart-timer/src/main.cpp @@ -0,0 +1,133 @@ +#include +#include +#include +#include +#include + +#include "config.h" +#include "language_understanding.h" +#include "mic.h" +#include "speech_to_text.h" +#include "text_to_speech.h" +#include "text_translator.h" + +void connectWiFi() +{ + while (WiFi.status() != WL_CONNECTED) + { + Serial.println("Connecting to WiFi.."); + WiFi.begin(SSID, PASSWORD); + delay(500); + } + + Serial.println("Connected!"); +} + +void setup() +{ + Serial.begin(9600); + + while (!Serial) + ; // Wait for Serial to be ready + + delay(1000); + + connectWiFi(); + + while (!(sfud_init() == SFUD_SUCCESS)) + ; + + sfud_qspi_fast_read_enable(sfud_get_device(SFUD_W25Q32_DEVICE_INDEX), 2); + + pinMode(WIO_KEY_C, INPUT_PULLUP); + + mic.init(); + + speechToText.init(); + textToSpeech.init(); + + Serial.println("Ready."); +} + +auto timer = timer_create_default(); + +void say(String text) +{ + text = textTranslator.translateText(text, SERVER_LANGUAGE, LANGUAGE); + Serial.println(text); + textToSpeech.convertTextToSpeech(text); +} + +bool timerExpired(void *announcement) +{ + say((char *)announcement); + return false; +} + +void processAudio() +{ + String text = speechToText.convertSpeechToText(); + text = textTranslator.translateText(text, LANGUAGE, SERVER_LANGUAGE); + Serial.println(text); + + int total_seconds = languageUnderstanding.GetTimerDuration(text); + if (total_seconds == 0) + { + return; + } + + int minutes = total_seconds / 60; + int seconds = total_seconds % 60; + + String begin_message; + if (minutes > 0) + { + begin_message += minutes; + begin_message += " minute "; + } + if (seconds > 0) + { + begin_message += seconds; + begin_message += " second "; + } + + begin_message += "timer started."; + + String end_message("Times up on your "); + if (minutes > 0) + { + end_message += minutes; + end_message += " minute "; + } + if (seconds > 0) + { + end_message += seconds; + end_message += " second "; + } + + end_message += "timer."; + + say(begin_message); + + timer.in(total_seconds * 1000, timerExpired, (void *)(end_message.c_str())); +} + +void loop() +{ + if (digitalRead(WIO_KEY_C) == LOW && !mic.isRecording()) + { + Serial.println("Starting recording..."); + mic.startRecording(); + } + + if (!mic.isRecording() && mic.isRecordingReady()) + { + Serial.println("Finished recording"); + + processAudio(); + + mic.reset(); + } + + timer.tick(); +} diff --git a/6-consumer/lessons/4-multiple-language-support/code/wio-terminal/smart-timer/src/mic.h b/6-consumer/lessons/4-multiple-language-support/code/wio-terminal/smart-timer/src/mic.h new file mode 100644 index 00000000..5f0815de --- /dev/null +++ b/6-consumer/lessons/4-multiple-language-support/code/wio-terminal/smart-timer/src/mic.h @@ -0,0 +1,242 @@ +#pragma once + +#include + +#include "config.h" +#include "flash_writer.h" + +class Mic +{ +public: + Mic() + { + _isRecording = false; + _isRecordingReady = false; + } + + void startRecording() + { + _isRecording = true; + _isRecordingReady = false; + } + + bool isRecording() + { + return _isRecording; + } + + bool isRecordingReady() + { + return _isRecordingReady; + } + + void init() + { + analogReference(AR_INTERNAL2V23); + + _writer.init(); + + initBufferHeader(); + configureDmaAdc(); + } + + void reset() + { + _isRecordingReady = false; + _isRecording = false; + + _writer.reset(); + + initBufferHeader(); + } + + void dmaHandler() + { + static uint8_t count = 0; + + if (DMAC->Channel[1].CHINTFLAG.bit.SUSP) + { + DMAC->Channel[1].CHCTRLB.reg = DMAC_CHCTRLB_CMD_RESUME; + DMAC->Channel[1].CHINTFLAG.bit.SUSP = 1; + + if (count) + { + audioCallback(_adc_buf_0, ADC_BUF_LEN); + } + else + { + audioCallback(_adc_buf_1, ADC_BUF_LEN); + } + + count = (count + 1) % 2; + } + } + +private: + volatile bool _isRecording; + volatile bool _isRecordingReady; + FlashWriter _writer; + +typedef struct + { + uint16_t btctrl; + uint16_t btcnt; + uint32_t srcaddr; + uint32_t dstaddr; + uint32_t descaddr; + } dmacdescriptor; + + // Globals - DMA and ADC + volatile dmacdescriptor _wrb[DMAC_CH_NUM] __attribute__((aligned(16))); + dmacdescriptor _descriptor_section[DMAC_CH_NUM] __attribute__((aligned(16))); + dmacdescriptor _descriptor __attribute__((aligned(16))); + + void configureDmaAdc() + { + // Configure DMA to sample from ADC at a regular interval (triggered by timer/counter) + DMAC->BASEADDR.reg = (uint32_t)_descriptor_section; // Specify the location of the descriptors + DMAC->WRBADDR.reg = (uint32_t)_wrb; // Specify the location of the write back descriptors + DMAC->CTRL.reg = DMAC_CTRL_DMAENABLE | DMAC_CTRL_LVLEN(0xf); // Enable the DMAC peripheral + DMAC->Channel[1].CHCTRLA.reg = DMAC_CHCTRLA_TRIGSRC(TC5_DMAC_ID_OVF) | // Set DMAC to trigger on TC5 timer overflow + DMAC_CHCTRLA_TRIGACT_BURST; // DMAC burst transfer + + _descriptor.descaddr = (uint32_t)&_descriptor_section[1]; // Set up a circular descriptor + _descriptor.srcaddr = (uint32_t)&ADC1->RESULT.reg; // Take the result from the ADC0 RESULT register + _descriptor.dstaddr = (uint32_t)_adc_buf_0 + sizeof(uint16_t) * ADC_BUF_LEN; // Place it in the adc_buf_0 array + _descriptor.btcnt = ADC_BUF_LEN; // Beat count + _descriptor.btctrl = DMAC_BTCTRL_BEATSIZE_HWORD | // Beat size is HWORD (16-bits) + DMAC_BTCTRL_DSTINC | // Increment the destination address + DMAC_BTCTRL_VALID | // Descriptor is valid + DMAC_BTCTRL_BLOCKACT_SUSPEND; // Suspend DMAC channel 0 after block transfer + memcpy(&_descriptor_section[0], &_descriptor, sizeof(_descriptor)); // Copy the descriptor to the descriptor section + + _descriptor.descaddr = (uint32_t)&_descriptor_section[0]; // Set up a circular descriptor + _descriptor.srcaddr = (uint32_t)&ADC1->RESULT.reg; // Take the result from the ADC0 RESULT register + _descriptor.dstaddr = (uint32_t)_adc_buf_1 + sizeof(uint16_t) * ADC_BUF_LEN; // Place it in the adc_buf_1 array + _descriptor.btcnt = ADC_BUF_LEN; // Beat count + _descriptor.btctrl = DMAC_BTCTRL_BEATSIZE_HWORD | // Beat size is HWORD (16-bits) + DMAC_BTCTRL_DSTINC | // Increment the destination address + DMAC_BTCTRL_VALID | // Descriptor is valid + DMAC_BTCTRL_BLOCKACT_SUSPEND; // Suspend DMAC channel 0 after block transfer + memcpy(&_descriptor_section[1], &_descriptor, sizeof(_descriptor)); // Copy the descriptor to the descriptor section + + // Configure NVIC + NVIC_SetPriority(DMAC_1_IRQn, 0); // Set the Nested Vector Interrupt Controller (NVIC) priority for DMAC1 to 0 (highest) + NVIC_EnableIRQ(DMAC_1_IRQn); // Connect DMAC1 to Nested Vector Interrupt Controller (NVIC) + + // Activate the suspend (SUSP) interrupt on DMAC channel 1 + DMAC->Channel[1].CHINTENSET.reg = DMAC_CHINTENSET_SUSP; + + // Configure ADC + ADC1->INPUTCTRL.bit.MUXPOS = ADC_INPUTCTRL_MUXPOS_AIN12_Val; // Set the analog input to ADC0/AIN2 (PB08 - A4 on Metro M4) + while (ADC1->SYNCBUSY.bit.INPUTCTRL) + ; // Wait for synchronization + ADC1->SAMPCTRL.bit.SAMPLEN = 0x00; // Set max Sampling Time Length to half divided ADC clock pulse (2.66us) + while (ADC1->SYNCBUSY.bit.SAMPCTRL) + ; // Wait for synchronization + ADC1->CTRLA.reg = ADC_CTRLA_PRESCALER_DIV128; // Divide Clock ADC GCLK by 128 (48MHz/128 = 375kHz) + ADC1->CTRLB.reg = ADC_CTRLB_RESSEL_12BIT | // Set ADC resolution to 12 bits + ADC_CTRLB_FREERUN; // Set ADC to free run mode + while (ADC1->SYNCBUSY.bit.CTRLB) + ; // Wait for synchronization + ADC1->CTRLA.bit.ENABLE = 1; // Enable the ADC + while (ADC1->SYNCBUSY.bit.ENABLE) + ; // Wait for synchronization + ADC1->SWTRIG.bit.START = 1; // Initiate a software trigger to start an ADC conversion + while (ADC1->SYNCBUSY.bit.SWTRIG) + ; // Wait for synchronization + + // Enable DMA channel 1 + DMAC->Channel[1].CHCTRLA.bit.ENABLE = 1; + + // Configure Timer/Counter 5 + GCLK->PCHCTRL[TC5_GCLK_ID].reg = GCLK_PCHCTRL_CHEN | // Enable perhipheral channel for TC5 + GCLK_PCHCTRL_GEN_GCLK1; // Connect generic clock 0 at 48MHz + + TC5->COUNT16.WAVE.reg = TC_WAVE_WAVEGEN_MFRQ; // Set TC5 to Match Frequency (MFRQ) mode + TC5->COUNT16.CC[0].reg = 3000 - 1; // Set the trigger to 16 kHz: (4Mhz / 16000) - 1 + while (TC5->COUNT16.SYNCBUSY.bit.CC0) + ; // Wait for synchronization + + // Start Timer/Counter 5 + TC5->COUNT16.CTRLA.bit.ENABLE = 1; // Enable the TC5 timer + while (TC5->COUNT16.SYNCBUSY.bit.ENABLE) + ; // Wait for synchronization + } + + uint16_t _adc_buf_0[ADC_BUF_LEN]; + uint16_t _adc_buf_1[ADC_BUF_LEN]; + + // WAV files have a header. This struct defines that header + struct wavFileHeader + { + char riff[4]; /* "RIFF" */ + long flength; /* file length in bytes */ + char wave[4]; /* "WAVE" */ + char fmt[4]; /* "fmt " */ + long chunk_size; /* size of FMT chunk in bytes (usually 16) */ + short format_tag; /* 1=PCM, 257=Mu-Law, 258=A-Law, 259=ADPCM */ + short num_chans; /* 1=mono, 2=stereo */ + long srate; /* Sampling rate in samples per second */ + long bytes_per_sec; /* bytes per second = srate*bytes_per_samp */ + short bytes_per_samp; /* 2=16-bit mono, 4=16-bit stereo */ + short bits_per_samp; /* Number of bits per sample */ + char data[4]; /* "data" */ + long dlength; /* data length in bytes (filelength - 44) */ + }; + + void initBufferHeader() + { + wavFileHeader wavh; + + strncpy(wavh.riff, "RIFF", 4); + strncpy(wavh.wave, "WAVE", 4); + strncpy(wavh.fmt, "fmt ", 4); + strncpy(wavh.data, "data", 4); + + wavh.chunk_size = 16; + wavh.format_tag = 1; // PCM + wavh.num_chans = 1; // mono + wavh.srate = RATE; + wavh.bytes_per_sec = (RATE * 1 * 16 * 1) / 8; + wavh.bytes_per_samp = 2; + wavh.bits_per_samp = 16; + wavh.dlength = RATE * 2 * 1 * 16 / 2; + wavh.flength = wavh.dlength + 44; + + _writer.writeSfudBuffer((byte *)&wavh, 44); + } + + void audioCallback(uint16_t *buf, uint32_t buf_len) + { + static uint32_t idx = 44; + + if (_isRecording) + { + for (uint32_t i = 0; i < buf_len; i++) + { + int16_t audio_value = ((int16_t)buf[i] - 2048) * 16; + + _writer.writeSfudBuffer(audio_value & 0xFF); + _writer.writeSfudBuffer((audio_value >> 8) & 0xFF); + } + + idx += buf_len; + + if (idx >= BUFFER_SIZE) + { + _writer.flushSfudBuffer(); + idx = 44; + _isRecording = false; + _isRecordingReady = true; + } + } + } +}; + +Mic mic; + +void DMAC_1_Handler() +{ + mic.dmaHandler(); +} diff --git a/6-consumer/lessons/4-multiple-language-support/code/wio-terminal/smart-timer/src/speech_to_text.h b/6-consumer/lessons/4-multiple-language-support/code/wio-terminal/smart-timer/src/speech_to_text.h new file mode 100644 index 00000000..a7ce075f --- /dev/null +++ b/6-consumer/lessons/4-multiple-language-support/code/wio-terminal/smart-timer/src/speech_to_text.h @@ -0,0 +1,102 @@ +#pragma once + +#include +#include +#include +#include + +#include "config.h" +#include "flash_stream.h" + +class SpeechToText +{ +public: + void init() + { + _token_client.setCACert(TOKEN_CERTIFICATE); + _speech_client.setCACert(SPEECH_CERTIFICATE); + _access_token = getAccessToken(); + } + + String convertSpeechToText() + { + char url[128]; + sprintf(url, SPEECH_URL, SPEECH_LOCATION, LANGUAGE); + + HTTPClient httpClient; + httpClient.begin(_speech_client, url); + + httpClient.addHeader("Authorization", String("Bearer ") + _access_token); + httpClient.addHeader("Content-Type", String("audio/wav; codecs=audio/pcm; samplerate=") + String(RATE)); + httpClient.addHeader("Accept", "application/json;text/xml"); + + Serial.println("Sending speech..."); + + FlashStream stream; + int httpResponseCode = httpClient.sendRequest("POST", &stream, BUFFER_SIZE); + + Serial.println("Speech sent!"); + + String text = ""; + + if (httpResponseCode == 200) + { + String result = httpClient.getString(); + Serial.println(result); + + DynamicJsonDocument doc(1024); + deserializeJson(doc, result.c_str()); + + JsonObject obj = doc.as(); + text = obj["DisplayText"].as(); + } + else if (httpResponseCode == 401) + { + Serial.println("Access token expired, trying again with a new token"); + _access_token = getAccessToken(); + return convertSpeechToText(); + } + else + { + Serial.print("Failed to convert text to speech - error "); + Serial.println(httpResponseCode); + } + + httpClient.end(); + + return text; + } + +private: + String getAccessToken() + { + char url[128]; + sprintf(url, TOKEN_URL, SPEECH_LOCATION); + + HTTPClient httpClient; + httpClient.begin(_token_client, url); + + httpClient.addHeader("Ocp-Apim-Subscription-Key", SPEECH_API_KEY); + int httpResultCode = httpClient.POST("{}"); + + if (httpResultCode != 200) + { + Serial.println("Error getting access token, trying again..."); + delay(10000); + return getAccessToken(); + } + + Serial.println("Got access token."); + String result = httpClient.getString(); + + httpClient.end(); + + return result; + } + + WiFiClientSecure _token_client; + WiFiClientSecure _speech_client; + String _access_token; +}; + +SpeechToText speechToText; diff --git a/6-consumer/lessons/4-multiple-language-support/code/wio-terminal/smart-timer/src/text_to_speech.h b/6-consumer/lessons/4-multiple-language-support/code/wio-terminal/smart-timer/src/text_to_speech.h new file mode 100644 index 00000000..d7174fcd --- /dev/null +++ b/6-consumer/lessons/4-multiple-language-support/code/wio-terminal/smart-timer/src/text_to_speech.h @@ -0,0 +1,86 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +#include "config.h" + +class TextToSpeech +{ +public: + void init() + { + DynamicJsonDocument doc(1024); + doc["language"] = LANGUAGE; + + String body; + serializeJson(doc, body); + + HTTPClient httpClient; + httpClient.begin(_client, GET_VOICES_FUNCTION_URL); + + int httpResponseCode = httpClient.POST(body); + + if (httpResponseCode == 200) + { + String result = httpClient.getString(); + Serial.println(result); + + DynamicJsonDocument doc(1024); + deserializeJson(doc, result.c_str()); + + JsonArray obj = doc.as(); + _voice = obj[0].as(); + + Serial.print("Using voice "); + Serial.println(_voice); + } + else + { + Serial.print("Failed to get voices - error "); + Serial.println(httpResponseCode); + } + + httpClient.end(); + } + + void convertTextToSpeech(String text) + { + DynamicJsonDocument doc(1024); + doc["language"] = LANGUAGE; + doc["voice"] = _voice; + doc["text"] = text; + + String body; + serializeJson(doc, body); + + HTTPClient httpClient; + httpClient.begin(_client, TEXT_TO_SPEECH_FUNCTION_URL); + + int httpResponseCode = httpClient.POST(body); + + if (httpResponseCode == 200) + { + File wav_file = SD.open("SPEECH.WAV", FILE_WRITE); + httpClient.writeToStream(&wav_file); + wav_file.close(); + } + else + { + Serial.print("Failed to get speech - error "); + Serial.println(httpResponseCode); + } + + httpClient.end(); + } +private: + WiFiClient _client; + String _voice; +}; + +TextToSpeech textToSpeech; \ No newline at end of file diff --git a/6-consumer/lessons/4-multiple-language-support/code/wio-terminal/smart-timer/src/text_translator.h b/6-consumer/lessons/4-multiple-language-support/code/wio-terminal/smart-timer/src/text_translator.h new file mode 100644 index 00000000..0404fa39 --- /dev/null +++ b/6-consumer/lessons/4-multiple-language-support/code/wio-terminal/smart-timer/src/text_translator.h @@ -0,0 +1,58 @@ +#pragma once + +#include +#include +#include +#include + +#include "config.h" + +class TextTranslator +{ +public: + String translateText(String text, String from_language, String to_language) + { + DynamicJsonDocument doc(1024); + doc["text"] = text; + doc["from_language"] = from_language; + doc["to_language"] = to_language; + + String body; + serializeJson(doc, body); + + Serial.print("Translating "); + Serial.print(text); + Serial.print(" from "); + Serial.print(from_language); + Serial.print(" to "); + Serial.println(to_language); + + HTTPClient httpClient; + httpClient.begin(_client, TRANSLATE_FUNCTION_URL); + + int httpResponseCode = httpClient.POST(body); + + String translated_text = ""; + + if (httpResponseCode == 200) + { + translated_text = httpClient.getString(); + Serial.print("Translated: "); + Serial.println(translated_text); + } + else + { + Serial.print("Failed to translate text - error "); + Serial.println(httpResponseCode); + } + + httpClient.end(); + + return translated_text; + } + +private: + WiFiClient _client; +}; + +TextTranslator textTranslator; diff --git a/6-consumer/lessons/4-multiple-language-support/code/wio-terminal/smart-timer/test/README b/6-consumer/lessons/4-multiple-language-support/code/wio-terminal/smart-timer/test/README new file mode 100644 index 00000000..b94d0890 --- /dev/null +++ b/6-consumer/lessons/4-multiple-language-support/code/wio-terminal/smart-timer/test/README @@ -0,0 +1,11 @@ + +This directory is intended for PlatformIO Unit Testing and project tests. + +Unit Testing is a software testing method by which individual units of +source code, sets of one or more MCU program modules together with associated +control data, usage procedures, and operating procedures, are tested to +determine whether they are fit for use. Unit testing finds problems early +in the development cycle. + +More information about PlatformIO Unit Testing: +- https://docs.platformio.org/page/plus/unit-testing.html diff --git a/6-consumer/lessons/4-multiple-language-support/pi-translate-speech.md b/6-consumer/lessons/4-multiple-language-support/pi-translate-speech.md index 51305971..59c08ff5 100644 --- a/6-consumer/lessons/4-multiple-language-support/pi-translate-speech.md +++ b/6-consumer/lessons/4-multiple-language-support/pi-translate-speech.md @@ -8,7 +8,7 @@ The speech service REST API doesn't support direct translations, instead you can ### Task - use the translator resource to translate text -1. Your smart timer will have 2 languages set - the language of the server that was used to train LUIS, and the language spoken by the user. Update the `language` variable to be the language that will be spoken by the used, and add a new variable called `server_language` for the language used to train LUIS: +1. Your smart timer will have 2 languages set - the language of the server that was used to train LUIS (the same language is also used to build the messages to speak to the user), and the language spoken by the user. Update the `language` variable to be the language that will be spoken by the user, and add a new variable called `server_language` for the language used to train LUIS: ```python language = '' diff --git a/6-consumer/lessons/4-multiple-language-support/virtual-device-translate-speech.md b/6-consumer/lessons/4-multiple-language-support/virtual-device-translate-speech.md index 15db3690..493a5b40 100644 --- a/6-consumer/lessons/4-multiple-language-support/virtual-device-translate-speech.md +++ b/6-consumer/lessons/4-multiple-language-support/virtual-device-translate-speech.md @@ -20,7 +20,7 @@ The speech service can take speech and not only convert to text in the same lang This imports classes used to translate speech, and a `requests` library that will be used to make a call to the Translator service later in this lesson. -1. Your smart timer will have 2 languages set - the language of the server that was used to train LUIS, and the language spoken by the user. Update the `language` variable to be the language that will be spoken by the used, and add a new variable called `server_language` for the language used to train LUIS: +1. Your smart timer will have 2 languages set - the language of the server that was used to train LUIS (the same language is also used to build the messages to speak to the user), and the language spoken by the user. Update the `language` variable to be the language that will be spoken by the user, and add a new variable called `server_language` for the language used to train LUIS: ```python language = '' diff --git a/6-consumer/lessons/4-multiple-language-support/wio-terminal-translate-speech.md b/6-consumer/lessons/4-multiple-language-support/wio-terminal-translate-speech.md index 7d25fa0f..2e3ce926 100644 --- a/6-consumer/lessons/4-multiple-language-support/wio-terminal-translate-speech.md +++ b/6-consumer/lessons/4-multiple-language-support/wio-terminal-translate-speech.md @@ -1,3 +1,270 @@ # Translate speech - Wio Terminal -Coming soon! +In this part of the lesson, you will write code to translate text using the translator service. + +## Convert text to speech using the translator service + +The speech service REST API doesn't support direct translations, instead you can use the Translator service to translate the text generated by the speech to text service, and the text of the spoken response. This service has a REST API you can use to translate the text, but to make it easier to use this will be wrapped in another HTTP trigger in your functions app. + +### Task - create a serverless function to translate text + +1. Open your `smart-timer-trigger` project in VS Code, and open the terminal ensuring the virtual environment is activated. If not, kill and re-create the terminal. + +1. Open the `local.settings.json` file and add settings for the translator API key and location: + + ```json + "TRANSLATOR_KEY": "", + "TRANSLATOR_LOCATION": "" + ``` + + Replace `` with the API key for your translator service resource. Replace `` with the location you used when you created the translator service resource. + +1. Add a new HTTP trigger to this app called `translate-text` using the following command from inside the VS Code terminal in the root folder of the functions app project: + + ```sh + func new --name translate-text --template "HTTP trigger" + ``` + + This will create an HTTP trigger called `translate-text`. + +1. Replace the contents of the `__init__.py` file in the `translate-text` folder with the following: + + ```python + import logging + import os + import requests + + import azure.functions as func + + location = os.environ['TRANSLATOR_LOCATION'] + translator_key = os.environ['TRANSLATOR_KEY'] + + def main(req: func.HttpRequest) -> func.HttpResponse: + req_body = req.get_json() + from_language = req_body['from_language'] + to_language = req_body['to_language'] + text = req_body['text'] + + logging.info(f'Translating {text} from {from_language} to {to_language}') + + url = f'https://api.cognitive.microsofttranslator.com/translate?api-version=3.0' + + headers = { + 'Ocp-Apim-Subscription-Key': translator_key, + 'Ocp-Apim-Subscription-Region': location, + 'Content-type': 'application/json' + } + + params = { + 'from': from_language, + 'to': to_language + } + + body = [{ + 'text' : text + }] + + response = requests.post(url, headers=headers, params=params, json=body) + return func.HttpResponse(response.json()[0]['translations'][0]['text']) + ``` + + This code extracts the text and the languages from the HTTP request. It then makes a request to the translator REST API, passing the languages as parameters for the URL and the text to translate as the body. Finally, the translation is returned. + +1. Run your function app locally. You can then call this using a tool like curl in the same way that you tested your `text-to-timer` HTTP trigger. Make sure to pass the text to translate and the languages as a JSON body: + + ```json + { + "text": "Définir une minuterie de 30 secondes", + "from_language": "fr-FR", + "to_language": "en-US" + } + ``` + + This example translates *Définir une minuterie de 30 secondes* from French to US English. It will return *Set a 30-second timer*. + +> 💁 You can find this code in the [code/functions](code/functions) folder. + +### Task - use the translator function to translate text + +1. Open the `smart-timer` project in VS Code if it is not already open. + +1. Your smart timer will have 2 languages set - the language of the server that was used to train LUIS (the same language is also used to build the messages to speak to the user), and the language spoken by the user. Update the `LANGUAGE` constant in the `config.h` header file to be the language that will be spoken by the user, and add a new constant called `SERVER_LANGUAGE` for the language used to train LUIS: + + ```cpp + const char *LANGUAGE = ""; + const char *SERVER_LANGUAGE = ""; + ``` + + Replace `` with the locale name for language you will be speaking in, for example `fr-FR` for French, or `zn-HK` for Cantonese. + + Replace `` with the locale name for language used to train LUIS. + + You can find a list of the supported languages and their locale names in the [Language and voice support documentation on Microsoft docs](https://docs.microsoft.com/azure/cognitive-services/speech-service/language-support?WT.mc_id=academic-17441-jabenn#speech-to-text). + + > 💁 If you don't speak multiple languages you can use a service like [Bing Translate](https://www.bing.com/translator) or [Google Translate](https://translate.google.com) to translate from your preferred language to a language of your choice. These services can then play audio of the translated text. + > + > For example, if you train LUIS in English, but want to use French as the user language, you can translate sentences like "set a 2 minute and 27 second timer" from English into French using Bing Translate, then use the **Listen translation** button to speak the translation into your microphone. + > + > ![The listen translation button on Bing translate](../../../images/bing-translate.png) + +1. Add the translator API key and location below the `SPEECH_LOCATION`: + + ```cpp + const char *TRANSLATOR_API_KEY = ""; + const char *TRANSLATOR_LOCATION = ""; + ``` + + Replace `` with the API key for your translator service resource. Replace `` with the location you used when you created the translator service resource. + +1. Add the translator trigger URL below the `VOICE_URL`: + + ```cpp + const char *TRANSLATE_FUNCTION_URL = ""; + ``` + + Replace `` with the URL for the `translate-text` HTTP trigger on your function app. This will be the same as the value for `TEXT_TO_TIMER_FUNCTION_URL`, except with a function name of `translate-text` instead of `text-to-timer`. + +1. Add a new file to the `src` folder called `text_translator.h`. + +1. This new `text_translator.h` header file will contain a class to translate text. Add the following to this file to declare this class: + + ```cpp + #pragma once + + #include + #include + #include + #include + + #include "config.h" + + class TextTranslator + { + public: + private: + WiFiClient _client; + }; + + TextTranslator textTranslator; + ``` + + This declares the `TextTranslator` class, along with an instance of this class. The class has a single field for the WiFi client. + +1. To the `public` section of this class, add a method to translate text: + + ```cpp + String translateText(String text, String from_language, String to_language) + { + } + ``` + + This method takes the language to translate from, and the language to translate to. When handling speech, the speech will be translated from the user language to the LUIS server language, and when giving responses it will translate from the LUIS server language to the users language. + +1. In this method, add code to construct a JSON body containing the text to translate and the languages: + + ```cpp + DynamicJsonDocument doc(1024); + doc["text"] = text; + doc["from_language"] = from_language; + doc["to_language"] = to_language; + + String body; + serializeJson(doc, body); + + Serial.print("Translating "); + Serial.print(text); + Serial.print(" from "); + Serial.print(from_language); + Serial.print(" to "); + Serial.print(to_language); + ``` + +1. Below this, add the following code to send the body to the serverless function app: + + ```cpp + HTTPClient httpClient; + httpClient.begin(_client, TRANSLATE_FUNCTION_URL); + + int httpResponseCode = httpClient.POST(body); + ``` + +1. Next, add code to get the response: + + ```cpp + String translated_text = ""; + + if (httpResponseCode == 200) + { + translated_text = httpClient.getString(); + Serial.print("Translated: "); + Serial.println(translated_text); + } + else + { + Serial.print("Failed to translate text - error "); + Serial.println(httpResponseCode); + } + ``` + +1. Finally, add code to close the connection and return the translated text: + + ```cpp + httpClient.end(); + + return translated_text; + ``` + +### Task - translate the recognized speech and the responses + +1. Open the `main.cpp` file. + +1. Add an include directive at the top of the file for the `TextTranslator` class header file: + + ```cpp + #include "text_translator.h" + ``` + +1. The text that is said when a timer is set or expires needs to be translated. To do this, add the following as the first line of the `say` function: + + ```cpp + text = textTranslator.translateText(text, LANGUAGE, SERVER_LANGUAGE); + ``` + + This will translate the text to the users language. + +1. In the `processAudio` function, text is retrieved from the captured audio with the `String text = speechToText.convertSpeechToText();` call. After this call, translate the text: + + ```cpp + String text = speechToText.convertSpeechToText(); + text = textTranslator.translateText(text, LANGUAGE, SERVER_LANGUAGE); + ``` + + This will translate the text from the users language into the language used on the server. + +1. Build this code, upload it to your Wio Terminal and test it out through the serial monitor. Once you see `Ready` in the serial monitor, press the C button (the one on the left-hand side, closest to the power switch), and speak. Ensure your function app is running, and request a timer in the user language, either by speaking that language yourself, or using a translation app. + + ```output + Connecting to WiFi.. + Connected! + Got access token. + Ready. + Starting recording... + Finished recording + Sending speech... + Speech sent! + {"RecognitionStatus":"Success","DisplayText":"Définir une minuterie de 2 minutes 27 secondes.","Offset":9600000,"Duration":40400000} + Translating Définir une minuterie de 2 minutes 27 secondes. from fr-FR to en-US + Translated: Set a timer of 2 minutes 27 seconds. + Set a timer of 2 minutes 27 seconds. + {"seconds": 147} + Translating 2 minute 27 second timer started. from en-US to fr-FR + Translated: 2 minute 27 seconde minute a commencé. + 2 minute 27 seconde minute a commencé. + Translating Times up on your 2 minute 27 second timer. from en-US to fr-FR + Translated: Chronométrant votre minuterie de 2 minutes 27 secondes. + Chronométrant votre minuterie de 2 minutes 27 secondes. + ``` + +> 💁 You can find this code in the [code/wio-terminal](code/wio-terminal) folder. + +😀 Your multi-lingual timer program was a success! diff --git a/hardware.md b/hardware.md index 90fc4cc0..784c7858 100644 --- a/hardware.md +++ b/hardware.md @@ -46,7 +46,7 @@ These are specific to using the Wio terminal Arduino device, and are not relevan * [Breadboard Jumper Wires](https://www.seeedstudio.com/Breadboard-Jumper-Wire-Pack-241mm-200mm-160mm-117m-p-234.html) * Headphones or other speaker with a 3.5mm jack, or a JST speaker such as: * [Mono Enclosed Speaker - 2W 6 Ohm](https://www.seeedstudio.com/Mono-Enclosed-Speaker-2W-6-Ohm-p-2832.html) -* *Optional* - microSD Card 16GB or less for testing image capture, along with a connector to use the SD card with your computer if you don't have one built-in. **NOTE** - the Wio Terminal only supports SD cards up to 16GB, it does not support higher capacities. +* microSD Card 16GB or less, along with a connector to use the SD card with your computer if you don't have one built-in. **NOTE** - the Wio Terminal only supports SD cards up to 16GB, it does not support higher capacities. ## Raspberry Pi diff --git a/images/Diagrams.sketch b/images/Diagrams.sketch index 53c08897..6dfeb3b5 100644 Binary files a/images/Diagrams.sketch and b/images/Diagrams.sketch differ diff --git a/images/wio-sd-card.png b/images/wio-sd-card.png new file mode 100644 index 00000000..c69b0b30 Binary files /dev/null and b/images/wio-sd-card.png differ diff --git a/translations/README.bn.md b/translations/README.bn.md index e4da16a1..1d11b5d1 100644 --- a/translations/README.bn.md +++ b/translations/README.bn.md @@ -75,30 +75,30 @@ | | প্রজেক্ট | কনসেপ্ট | শিখনফল | লেসন/পাঠ্য | | :-: | :----------: | :-------------: | ------------------- | :-----------: | -| 01 | [IoT যাত্রার সূচনা](./1-getting-started) | IoT পরিচিতি | প্রথম আইওটি ডিভাইস সেটআপ করার সময়ই আইওটি এর প্রাথমিক নীতিগুলি এবং আইওটি সল্যুশনের বেসিক বিষয়গুলো যেমনঃ সেন্সর এবং ক্লাউড সার্ভিস সংক্রান্ত বিষয়গুলো সম্পর্কে জ্ঞান অর্জন | [IoT পরিচিতি](../1-getting-started/lessons/1-introduction-to-iot/translations/README.bn.md) | -| 02 | [IoT যাত্রার সূচনা](./1-getting-started) | IoT এর আরো গভীরে| আইওটি সিস্টেমের উপাদানগুলির পাশাপাশি মাইক্রোকন্ট্রোলার এবং সিঙ্গেল-বোর্ড কম্পিউটার সম্পর্কে জ্ঞান অর্জন | [IoT এর আরো গভীরে](../1-getting-started/lessons/2-deeper-dive/translations/README.bn.md) | -| 03 | [IoT যাত্রার সূচনা](./1-getting-started) | সেন্সর এবং অ্যাকচুয়েটরের সাহায্যে বাহ্যিক জগতের সাথে যোগাযোগ| 'নাইটলাইট' প্রজেক্টটি করার সাথেই সমান্তরালে বাহ্যিক জগত থেকে ডেটা সংগ্রহ করার জন্য সেন্সর এবং প্রতিক্রিয়া জানাতে ব্যবহৃত অ্যাকচুয়েটর সম্পর্কে জ্ঞান অর্জন | [সেন্সর এবং অ্যাকচুয়েটরের সাহায্যে বাহ্যিক জগতের সাথে যোগাযোগ](../1-getting-started/lessons/3-sensors-and-actuators/translations/README.bn.md) | -| 04 | [IoT যাত্রার সূচনা](./1-getting-started) | আইওটি ডিভাইসকে ইন্টারনেটে সংযুক্ত করা | এমকিউটিটি ব্রোকারের সাথে নাইটলাইটটি সংযুক্ত করে বার্তাগুলি প্রেরণ এবং গ্রহণ করতে আইওটি ডিভাইসটিকে কীভাবে ইন্টারনেটে সংযুক্ত করতে হবে সেই সংক্রান্ত জ্ঞান অর্জন | [আইওটি ডিভাইসকে ইন্টারনেটে সংযুক্তকরণ ](../1-getting-started/lessons/4-connect-internet/translations/README.bn.md) | -| 05 | [ফার্ম](./2-farm) | আইওটি দ্বারা উদ্ভিদ বৃদ্ধির পূর্বাভাস | আইওটি ডিভাইস দ্বারা গৃহিত তাপমাত্রার ডেটা ব্যবহার করে কীভাবে উদ্ভিদ বৃদ্ধির পূর্বাভাস দেওয়া যায় তা শেখা | [আইওটি দ্বারা উদ্ভিদ বৃদ্ধির পূর্বাভাস](../2-farm/lessons/1-predict-plant-growth/translations/README.bn.md) | -| 06 | [ফার্ম](./2-farm) | মাটির আর্দ্রতা নির্ণয় | কীভাবে মাটির আর্দ্রতা সনাক্ত করা যায় এবং তা করতে মাটির আর্দ্রতা সেন্সরটি কীভাবে ক্যালিব্রেট করতে হবে তা শেখা | [মাটির আর্দ্রতা নির্ণয়](../2-farm/lessons/2-detect-soil-moisture/translations/README.bn.md) | -| 07 | [ফার্ম](./2-farm) | স্বয়ংক্রিয় সেচকার্য | রিলে এবং এমকিউটিটি ব্যবহার করে কীভাবে স্বয়ংক্রিয়ভাবে এবং নির্দিষ্ট সময়ে সেচ দেয়া যায় সে সংক্রান্ত জ্ঞান অর্জন | [স্বয়ংক্রিয় সেচকার্য](../2-farm/lessons/3-automated-plant-watering/translations/README.bn.md) | -| 08 | [ফার্ম](./2-farm) | উদ্ভিদকে ক্লাউডে সংযুক্ত করা | ক্লাউড এবং ক্লাউড-হোস্ট করা আইওটি পরিষেবাগুলি সম্পর্কে জ্ঞান অর্জন এবং কীভাবে আমাদের উদ্ভিদটিকে পাবলিক এমকিউটিটি ব্রোকারের পরিবর্তে ক্লাউডে সংযুক্ত করতে হবে তা শেখা | [উদ্ভিদকে ক্লাউডে সংযুক্ত করা](../2-farm/lessons/4-migrate-your-plant-to-the-cloud/translations/README.bn.md) | -| 09 | [ফার্ম](./2-farm) | অ্যাপ্লিকেশন লজিককে ক্লাউডে স্থানান্তর | ক্লাউডে কীভাবে অ্যাপ্লিকেশন লজিক লিখতে হবে যাতে তা আইওটি ম্যাসেজের প্রতিক্রিয়া জানাতে পারে তা শেখা| [অ্যাপ্লিকেশন লজিককে ক্লাউডে স্থানান্তর](../2-farm/lessons/5-migrate-application-to-the-cloud/translations/README.bn.md) | -| 10 | [ফার্ম](./2-farm) | Keep your plant secure | আইওটি তে নিরাপত্তা সম্পর্কে জানা এবং Key ও Certificate এর সাহায্যে আমাদের উদ্ভিদটিকে কীভাবে সুরক্ষিত রাখা যায় তা শেখা | [Keep your plant secure](./2-farm/lessons/6-keep-your-plant-secure/README.md) | -| 11 | [পরিবহন](./3-transport) | Location tracking | আইওটি ডিভাইসে জিপিএস লোকেশন ট্র্যাকিং শেখা | [Location tracking](./3-transport/lessons/1-location-tracking/README.md) | -| 12 | [পরিবহন](./3-transport) | Store location data | পরবর্তী সময়ে বিশ্লেষণ বা চিত্রভিত্তিক ডেটা প্রদর্শন (Visualization) এর জন্য আইওটি ডেটা কীভাবে স্টোর করা যায় তা জানা | [Store location data](./3-transport/lessons/2-store-location-data/README.md) | -| 13 | [পরিবহন](./3-transport) | Visualize location data |মানচিত্রে অবস্থানের ডেটা প্রদর্শন করা এবং মানচিত্রগুলি কীভাবে ২টি মাত্রায় বাস্তব ত্রিমাত্রিক বিশ্বের উপস্থাপন করে সে সম্পর্কে জ্ঞান অর্জন | [Visualize location data](./3-transport/lessons/3-visualize-location-data/README.md) | -| 14 | [পরিবহন](./3-transport) | Geofences | Geofences সম্পর্কে জানা এবং কীভাবে এটি ব্যবহার করে সাপ্লাই চেইনের বিভিন্ন পর্যায়ের বাহনগুলো যখন গন্তব্যের কাছাকাছি পৌঁছায় তখন এলার্ট দেয়া যায় তা শেখা | [Geofences](./3-transport/lessons/4-geofences/README.md) | -| 15 | [উৎপাদন](./4-manufacturing) | Train a fruit quality detector | ক্লাউডের ছবি শ্রেণিবদ্ধকরণ মডেলকে (Image Classifier) ফলের মান সনাক্ত করতে কীভাবে প্রশিক্ষিত করতে হবে সে সম্পর্কে জানা | [Train a fruit quality detector](./4-manufacturing/lessons/1-train-fruit-detector/README.md) | -| 16 | [উৎপাদন](./4-manufacturing) | Check fruit quality from an IoT device | আইওটি ডিভাইসে ফলের গুণগত মান সনাক্তকারী ব্যবহার | [Check fruit quality from an IoT device](./4-manufacturing/lessons/2-check-fruit-from-device/README.md) | -| 17 | [উৎপাদন](./4-manufacturing) | Run your fruit detector on the edge | ফলের গুণগত মান সনাক্তকারীকে Edge হিসেবে ব্যবহার | [Run your fruit detector on the edge](./4-manufacturing/lessons/3-run-fruit-detector-edge/README.md) | -| 18 | [উৎপাদন](./4-manufacturing) | Trigger fruit quality detection from a sensor | সেন্সর থেকে ফলের গুণাগুণ সনাক্তকরণ নিয়ন্ত্রণ করা শেখা| [Trigger fruit quality detection from a sensor](./4-manufacturing/lessons/4-trigger-fruit-detector/README.md) | -| 19 | [খুচরাপর্যায়](./5-retail) | Train a stock detector | কোনও দোকানে স্টক গণনা করতে স্টক ডিটেক্টরকে প্রশিক্ষণ দেওয়ার জন্য কীভাবে অবজেক্ট সনাক্তকরণ ব্যবহার করা যায় তা শেখা | [Train a stock detector](./5-retail/lessons/1-train-stock-detector/README.md) | -| 20 | [খুচরাপর্যায়](./5-retail) | Check stock from an IoT device | কোন অবজেক্ট সনাক্তকরণ মডেল ব্যবহার করে আইওটি ডিভাইস থেকে স্টক পর্যবেক্ষণ করা শেখা | [Check stock from an IoT device](./5-retail/lessons/2-check-stock-device/README.md) | -| 21 | [খুচরাপর্যায়](./6-consumer) | Recognize speech with an IoT device | আইওটি ডিভাইস থেকে বক্তব্য (speech) সনাক্ত করে স্মার্ট টাইমার তৈরী করা | [Recognize speech with an IoT device](./6-consumer/lessons/1-speech-recognition/README.md) | -| 22 | [ভোক্তাপর্যায়](./6-consumer) | Understand language | আইওটি ডিভাইসকে কীভাবে কথা বোঝাতে হয় তা শেখা | [Understand language](./6-consumer/lessons/2-language-understanding/README.md) | -| 23 | [ভোক্তাপর্যায়](./6-consumer) | Set a timer and provide spoken feedback | আইওটি ডিভাইসে কীভাবে টাইমার সেট করতে হয় এবং টাইমার কখন সেট হয় এবং তা কখন শেষ হয় সে বিষয়ে কথিত প্রতিক্রিয়া যেন সেই আইওটি ডিভাইস জানাতে পারে তা শেখা | [Set a timer and provide spoken feedback](./6-consumer/lessons/3-spoken-feedback/README.md) | -| 24 | [ভোক্তাপর্যায়](./6-consumer) | Support multiple languages | কীভাবে আইওটি ডিভাইসে নির্দেশ দেয়া এবং স্মার্ট টাইমার থেকে আসা প্রতিক্রিয়া উভয়েই একাধিক ভাষা সাপোর্ট করানো যায় তা শেখা | [Support multiple languages](./6-consumer/lessons/4-multiple-language-support/README.md) | +| 01 | [IoT যাত্রার সূচনা](../1-getting-started) | IoT পরিচিতি | প্রথম আইওটি ডিভাইস সেটআপ করার সময়ই আইওটি এর প্রাথমিক নীতিগুলি এবং আইওটি সল্যুশনের বেসিক বিষয়গুলো যেমনঃ সেন্সর এবং ক্লাউড সার্ভিস সংক্রান্ত বিষয়গুলো সম্পর্কে জ্ঞান অর্জন | [IoT পরিচিতি](../1-getting-started/lessons/1-introduction-to-iot/translations/README.bn.md) | +| 02 | [IoT যাত্রার সূচনা](../1-getting-started) | IoT এর আরো গভীরে| আইওটি সিস্টেমের উপাদানগুলির পাশাপাশি মাইক্রোকন্ট্রোলার এবং সিঙ্গেল-বোর্ড কম্পিউটার সম্পর্কে জ্ঞান অর্জন | [IoT এর আরো গভীরে](../1-getting-started/lessons/2-deeper-dive/translations/README.bn.md) | +| 03 | [IoT যাত্রার সূচনা](../1-getting-started) | সেন্সর এবং অ্যাকচুয়েটরের সাহায্যে বাহ্যিক জগতের সাথে যোগাযোগ| 'নাইটলাইট' প্রজেক্টটি করার সাথেই সমান্তরালে বাহ্যিক জগত থেকে ডেটা সংগ্রহ করার জন্য সেন্সর এবং প্রতিক্রিয়া জানাতে ব্যবহৃত অ্যাকচুয়েটর সম্পর্কে জ্ঞান অর্জন | [সেন্সর এবং অ্যাকচুয়েটরের সাহায্যে বাহ্যিক জগতের সাথে যোগাযোগ](../1-getting-started/lessons/3-sensors-and-actuators/translations/README.bn.md) | +| 04 | [IoT যাত্রার সূচনা](../1-getting-started) | আইওটি ডিভাইসকে ইন্টারনেটে সংযুক্ত করা | এমকিউটিটি ব্রোকারের সাথে নাইটলাইটটি সংযুক্ত করে বার্তাগুলি প্রেরণ এবং গ্রহণ করতে আইওটি ডিভাইসটিকে কীভাবে ইন্টারনেটে সংযুক্ত করতে হবে সেই সংক্রান্ত জ্ঞান অর্জন | [আইওটি ডিভাইসকে ইন্টারনেটে সংযুক্তকরণ ](../1-getting-started/lessons/4-connect-internet/translations/README.bn.md) | +| 05 | [ফার্ম](../2-farm) | আইওটি দ্বারা উদ্ভিদ বৃদ্ধির পূর্বাভাস | আইওটি ডিভাইস দ্বারা গৃহিত তাপমাত্রার ডেটা ব্যবহার করে কীভাবে উদ্ভিদ বৃদ্ধির পূর্বাভাস দেওয়া যায় তা শেখা | [আইওটি দ্বারা উদ্ভিদ বৃদ্ধির পূর্বাভাস](../2-farm/lessons/1-predict-plant-growth/translations/README.bn.md) | +| 06 | [ফার্ম](../2-farm) | মাটির আর্দ্রতা নির্ণয় | কীভাবে মাটির আর্দ্রতা সনাক্ত করা যায় এবং তা করতে মাটির আর্দ্রতা সেন্সরটি কীভাবে ক্যালিব্রেট করতে হবে তা শেখা | [মাটির আর্দ্রতা নির্ণয়](../2-farm/lessons/2-detect-soil-moisture/translations/README.bn.md) | +| 07 | [ফার্ম](../2-farm) | স্বয়ংক্রিয় সেচকার্য | রিলে এবং এমকিউটিটি ব্যবহার করে কীভাবে স্বয়ংক্রিয়ভাবে এবং নির্দিষ্ট সময়ে সেচ দেয়া যায় সে সংক্রান্ত জ্ঞান অর্জন | [স্বয়ংক্রিয় সেচকার্য](../2-farm/lessons/3-automated-plant-watering/translations/README.bn.md) | +| 08 | [ফার্ম](../2-farm) | উদ্ভিদকে ক্লাউডে সংযুক্ত করা | ক্লাউড এবং ক্লাউড-হোস্ট করা আইওটি পরিষেবাগুলি সম্পর্কে জ্ঞান অর্জন এবং কীভাবে আমাদের উদ্ভিদটিকে পাবলিক এমকিউটিটি ব্রোকারের পরিবর্তে ক্লাউডে সংযুক্ত করতে হবে তা শেখা | [উদ্ভিদকে ক্লাউডে সংযুক্ত করা](../2-farm/lessons/4-migrate-your-plant-to-the-cloud/translations/README.bn.md) | +| 09 | [ফার্ম](../2-farm) | অ্যাপ্লিকেশন লজিককে ক্লাউডে স্থানান্তর | ক্লাউডে কীভাবে অ্যাপ্লিকেশন লজিক লিখতে হবে যাতে তা আইওটি ম্যাসেজের প্রতিক্রিয়া জানাতে পারে তা শেখা| [অ্যাপ্লিকেশন লজিককে ক্লাউডে স্থানান্তর](../2-farm/lessons/5-migrate-application-to-the-cloud/translations/README.bn.md) | +| 10 | [ফার্ম](../2-farm) | Keep your plant secure | আইওটি তে নিরাপত্তা সম্পর্কে জানা এবং Key ও Certificate এর সাহায্যে আমাদের উদ্ভিদটিকে কীভাবে সুরক্ষিত রাখা যায় তা শেখা | [Keep your plant secure](../2-farm/lessons/6-keep-your-plant-secure/README.md) | +| 11 | [পরিবহন](../3-transport) | Location tracking | আইওটি ডিভাইসে জিপিএস লোকেশন ট্র্যাকিং শেখা | [Location tracking](../3-transport/lessons/1-location-tracking/README.md) | +| 12 | [পরিবহন](../3-transport) | Store location data | পরবর্তী সময়ে বিশ্লেষণ বা চিত্রভিত্তিক ডেটা প্রদর্শন (Visualization) এর জন্য আইওটি ডেটা কীভাবে স্টোর করা যায় তা জানা | [Store location data](../3-transport/lessons/2-store-location-data/README.md) | +| 13 | [পরিবহন](../3-transport) | Visualize location data |মানচিত্রে অবস্থানের ডেটা প্রদর্শন করা এবং মানচিত্রগুলি কীভাবে ২টি মাত্রায় বাস্তব ত্রিমাত্রিক বিশ্বের উপস্থাপন করে সে সম্পর্কে জ্ঞান অর্জন | [Visualize location data](../3-transport/lessons/3-visualize-location-data/README.md) | +| 14 | [পরিবহন](../3-transport) | Geofences | Geofences সম্পর্কে জানা এবং কীভাবে এটি ব্যবহার করে সাপ্লাই চেইনের বিভিন্ন পর্যায়ের বাহনগুলো যখন গন্তব্যের কাছাকাছি পৌঁছায় তখন এলার্ট দেয়া যায় তা শেখা | [Geofences](../3-transport/lessons/4-geofences/README.md) | +| 15 | [উৎপাদন](../4-manufacturing) | Train a fruit quality detector | ক্লাউডের ছবি শ্রেণিবদ্ধকরণ মডেলকে (Image Classifier) ফলের মান সনাক্ত করতে কীভাবে প্রশিক্ষিত করতে হবে সে সম্পর্কে জানা | [Train a fruit quality detector](../4-manufacturing/lessons/1-train-fruit-detector/README.md) | +| 16 | [উৎপাদন](../4-manufacturing) | Check fruit quality from an IoT device | আইওটি ডিভাইসে ফলের গুণগত মান সনাক্তকারী ব্যবহার | [Check fruit quality from an IoT device](../4-manufacturing/lessons/2-check-fruit-from-device/README.md) | +| 17 | [উৎপাদন](../4-manufacturing) | Run your fruit detector on the edge | ফলের গুণগত মান সনাক্তকারীকে Edge হিসেবে ব্যবহার | [Run your fruit detector on the edge](../4-manufacturing/lessons/3-run-fruit-detector-edge/README.md) | +| 18 | [উৎপাদন](../4-manufacturing) | Trigger fruit quality detection from a sensor | সেন্সর থেকে ফলের গুণাগুণ সনাক্তকরণ নিয়ন্ত্রণ করা শেখা| [Trigger fruit quality detection from a sensor](../4-manufacturing/lessons/4-trigger-fruit-detector/README.md) | +| 19 | [খুচরাপর্যায়](../5-retail) | Train a stock detector | কোনও দোকানে স্টক গণনা করতে স্টক ডিটেক্টরকে প্রশিক্ষণ দেওয়ার জন্য কীভাবে অবজেক্ট সনাক্তকরণ ব্যবহার করা যায় তা শেখা | [Train a stock detector](../5-retail/lessons/1-train-stock-detector/README.md) | +| 20 | [খুচরাপর্যায়](../5-retail) | Check stock from an IoT device | কোন অবজেক্ট সনাক্তকরণ মডেল ব্যবহার করে আইওটি ডিভাইস থেকে স্টক পর্যবেক্ষণ করা শেখা | [Check stock from an IoT device](../5-retail/lessons/2-check-stock-device/README.md) | +| 21 | [খুচরাপর্যায়](../6-consumer) | Recognize speech with an IoT device | আইওটি ডিভাইস থেকে বক্তব্য (speech) সনাক্ত করে স্মার্ট টাইমার তৈরী করা | [Recognize speech with an IoT device](../6-consumer/lessons/1-speech-recognition/README.md) | +| 22 | [ভোক্তাপর্যায়](../6-consumer) | Understand language | আইওটি ডিভাইসকে কীভাবে কথা বোঝাতে হয় তা শেখা | [Understand language](../6-consumer/lessons/2-language-understanding/README.md) | +| 23 | [ভোক্তাপর্যায়](../6-consumer) | Set a timer and provide spoken feedback | আইওটি ডিভাইসে কীভাবে টাইমার সেট করতে হয় এবং টাইমার কখন সেট হয় এবং তা কখন শেষ হয় সে বিষয়ে কথিত প্রতিক্রিয়া যেন সেই আইওটি ডিভাইস জানাতে পারে তা শেখা | [Set a timer and provide spoken feedback](../6-consumer/lessons/3-spoken-feedback/README.md) | +| 24 | [ভোক্তাপর্যায়](../6-consumer) | Support multiple languages | কীভাবে আইওটি ডিভাইসে নির্দেশ দেয়া এবং স্মার্ট টাইমার থেকে আসা প্রতিক্রিয়া উভয়েই একাধিক ভাষা সাপোর্ট করানো যায় তা শেখা | [Support multiple languages](../6-consumer/lessons/4-multiple-language-support/README.md) | ## অফলাইন ব্যবহার @@ -126,4 +126,4 @@ npm run convert ## চিত্রের Attributions -এই পাঠ্যক্রমটিতে ব্যবহৃত ছবিগুলির জন্য সকল এট্রিবিউট পাওয়া যাবে [Attributions](./attributions.md) ফোল্ডারটিতে । +এই পাঠ্যক্রমটিতে ব্যবহৃত ছবিগুলির জন্য সকল এট্রিবিউট পাওয়া যাবে [Attributions](../attributions.md) ফোল্ডারটিতে । diff --git a/translations/README.tr.md b/translations/README.tr.md index 0b66160d..cfda4ec7 100644 --- a/translations/README.tr.md +++ b/translations/README.tr.md @@ -13,28 +13,26 @@ [![Chinese](https://img.shields.io/badge/-Chinese-yellow)](README.zh-cn.md) # Yeni Başlayanlar için IOT + Microsoft'tan Azure Cloud Advocates size IOT temelleri hakkında 12 haftalık 24 dersten oluşan programı zevkle sunar. Her ders ön-quiz, dersi tamamlamanız için talimatlar, bir çözüm, bir ödev ve ders sonrası quiz içerir. Proje tabanlı pedogojimiz öğrenirken bir şeyler oluşturmanıza izin verecek. Bu, ispanlanmıştır ki yeni becerileri adeta size "yapıştıracak". Projeler, yemeğimizin çiftlikten sofralara olan yolculuğuyla ilgili. Buna; tarım, taşımacılık, işleme, satış, müşteriler gibi IOT cihazları için tüm popüler endüstri alanları dahildir. - -![Girişi, çiftçiliği, taşımacılığı, işlemeyi, satışı ve pişirmeyi kapsayan 24 dersin yol haritası](sketchnotes/Roadmap.jpg) - +![Girişi, çiftçiliği, taşımacılığı, işlemeyi, satışı ve pişirmeyi kapsayan 24 dersin yol haritası](../sketchnotes/Roadmap.jpg) > [Nitya Narasimhan](https://github.com/nitya) 'dan taslak notu. Daha büyük hali için resme tıklayın - **Yazarlarımıza en kalbi duygularla teşekkür ederiz [Jen Fox](https://github.com/jenfoxbot), [Jen Looper](https://github.com/jlooper), [Jim Bennett](https://github.com/jimbobbennett), ve taslak notu için [Nitya Narasimhan](https://github.com/nitya) 'a** **[Microsoft Learn Student Ambassadors](https://studentambassadors.microsoft.com?WT.mc_id=academic-17441-jabenn) ekibimize de teşekkür edriz. Dersleri gözden geçirenler ve çeşitli dillere çevirenler - [Aditya Garg](https://github.com/AdityaGarg00), [Anurag Sharma](https://github.com/Anurag-0-1-A), [Arpita Das](https://github.com/Arpiiitaaa), [Aryan Jain](https://www.linkedin.com/in/aryan-jain-47a4a1145/), [Bhavesh Suneja](https://github.com/EliteWarrior315), [Faith Hunja](https://faithhunja.github.io/), [Lateefah Bello](https://www.linkedin.com/in/lateefah-bello/), [Manvi Jha](https://github.com/Severus-Matthew), [Mireille Tan](https://www.linkedin.com/in/mireille-tan-a4834819a/), [Mohammad Iftekher (Iftu) Ebne Jalal](https://github.com/Iftu119), [Mohammad Zulfikar](https://github.com/mohzulfikar), [Priyanshu Srivastav](https://www.linkedin.com/in/priyanshu-srivastav-b067241ba), [Thanmai Gowducheruvu](https://github.com/innovation-platform), and [Zina Kamel](https://www.linkedin.com/in/zina-kamel/).** Takımla tnışın! -[![Tanıtım videosu](./images/iot-for-beginners.png)](https://youtu.be/-wippUJRi5k) +[![Tanıtım videosu](../images/iot-for-beginners.png)](https://youtu.be/-wippUJRi5k) > 🎥 Proje hakkındaki video için yukarıdaki resme tıklayın! -> **Öğretmenler**, için bu dersleri nasıl kullancaklarına dair [bazı öneriler](for-teachers.md). Eğer kendi derslerinizi oluşturmak istiyorsanız [ders taslağı](lesson-template/README.md) ekledik. +> **Öğretmenler**, için bu dersleri nasıl kullancaklarına dair [bazı öneriler](../for-teachers.md). Eğer kendi derslerinizi oluşturmak istiyorsanız [ders taslağı](../lesson-template/README.md) ekledik. > **Öğrenciler**,bu dersleri kendiniz için kullanmak istiyorsanız tüm repo'yu fork'layın ve tüm egzersizleri bitirin. Ön-quizlerle başlayın, sonra bölümü okuyun ve kalan etkinlikleri bitirin. Çözüm için kodu kopyalamaktansa kendiniz projeler oluşturun ve anlayın, ama çözüm kodları her proje tabanlı ders içerisinde /sollutions klasörünün içindedir. Başka bir fikir de arkadaşlarınızla çalışma grupları oluşturmak ve beraber gitmektir. Daha fazla çalışma için [Microsoft Learn](https://docs.microsoft.com/users/jimbobbennett/collections/ke2ehd351jopwr?WT.mc_id=academic-17441-jabenn). @@ -56,9 +54,9 @@ Her proje öğrencilerde ve hobicilerde bulunan gerçek donanımlara dayanır. H ## Hardware -We have two choices of IoT hardware to use for the projects depending on personal preference, programming language knowledge or preferences, learning goals and availability. We have also provided a 'virtual hardware' version for those who don't have access to hardware, or want to learn more before committing to a purchase. You can read more and find a 'shopping list' on the [hardware page](./hardware.md), including links to buy complete kits from our friends at Seeed Studio. +We have two choices of IoT hardware to use for the projects depending on personal preference, programming language knowledge or preferences, learning goals and availability. We have also provided a 'virtual hardware' version for those who don't have access to hardware, or want to learn more before committing to a purchase. You can read more and find a 'shopping list' on the [hardware page](../hardware.md), including links to buy complete kits from our friends at Seeed Studio. -> 💁 Find our [Code of Conduct](CODE_OF_CONDUCT.md), [Contributing](CONTRIBUTING.md), and [Translation](TRANSLATIONS.md) guidelines. We welcome your constructive feedback! +> 💁 Find our [Code of Conduct](../CODE_OF_CONDUCT.md), [Contributing](../CONTRIBUTING.md), and [Translation](../TRANSLATIONS.md) guidelines. We welcome your constructive feedback! ## Her ders şunları içerir: @@ -79,33 +77,34 @@ We have two choices of IoT hardware to use for the projects depending on persona | | Proje Adı | Öğretilen Kavramlar | Hedeflenen Konular | Bağlantılı Ders | | :-: | :----------: | :-------------: | ------------------- | :-----------: | -| 01 | [Başlangıç](./1-getting-started) | Nesnelerin internetine giriş |İlk IoT cihazınızı yaparken, IoT'nin temel ilkelerini, sensörler ve bulut hizmetleri gibi IoT çözümlerinin temellerini öğrenin. | [Nesnelerin internetine giriş](./1-getting-started/lessons/1-introduction-to-iot/README.md) | -| 02 | [Başlangıç](./1-getting-started) | IOT'ye daha derin bir dalış | IoT sistemlerinin bileşenleri hakkında daha fazlasını öğrenin hem de mikro işlemcileri ve tek-kart bilgisayarları | [IOT'ye daha derin bir dalış](./1-getting-started/lessons/2-deeper-dive/README.md) | -| 03 | [Başlangıç](./1-getting-started) | Sensörler ve aktüatörler ile gerçek dünyayla etkileşin | Gece lambası inşa ederken sensörlerin fiziksel dünyadan veri toplamalarını ve aktüatörler in tepki vermelerini öğrenin L | [Sensörler ve aktüatörler ile gerçek dünyayla etkileşin](./1-getting-started/lessons/3-sensors-and-actuators/README.md) | -| 04 | [Başlangıç](./1-getting-started) | Devrenizi internete bağlayın | Devrelerin internete nasıl bağlandığını ve internetten nasıl mesaj aldıklarını gece lambanızı MQTT'ye bağlayarak öğrenin. | [Devrenizi internete bağlayın](./1-getting-started/lessons/4-connect-internet/README.md) | -| 05 | [Çiftlik](./2-farm) | Bitkinin büyümesini tahmin edin | IoT devresiyle toplanan sıcaklık verisinin bitki büyümesini tahmin etmede nasıl kullanıldığını öğrenin | [Bitkinin büyümesini tahmin edin](./2-farm/lessons/1-predict-plant-growth/README.md) | -| 06 | [Çiftlik](./2-farm) | Toprak nemini algılayın | Toprak neminin nasıl tespit edildiğini ve toprak nem sensörünün nasıl kalibre edildiğini öğrenin. | [Toprak nemini algılayın](./2-farm/lessons/2-detect-soil-moisture/README.md) | -| 07 | [Çiftlik](./2-farm) | Otomatik Bitki Sulama| Sulamanın nasıl otomatikleştirildiğini bir röle ve MQTT kullanarak öğrenin | [Otomatik Bitki Sulama](./2-farm/lessons/3-automated-plant-watering/README.md) | -| 08 | [Çiftlik](./2-farm) | Bitkinizi buluta taşıyın | Bulut tabanlı IOT servislerini ve MQTT yerine bunları kullanmayı öğrenin | [Bitkinizi buluta taşıyın](./2-farm/lessons/4-migrate-your-plant-to-the-cloud/README.md) | -| 09 | [Çiftlik](./2-farm) | Uygulama mantığını buluta taşıyın | IOT mesajlarını bulutta cevaplayan ugulama mantığı nasıl yazılır | [Uygulama mantığını buluta taşıyın](./2-farm/lessons/5-migrate-application-to-the-cloud/README.md) | -| 10 | [Çiftlik](./2-farm) | Bitkinizi güvende tutun | IoT'nin güvenliğini ve bitkinizi anahtarlarla ve sertifikalarla güvende tutmayı öğrenin | [Bitkinizi güvende tutun](./2-farm/lessons/6-keep-your-plant-secure/README.md) | -| 11 | [Nakliyat](./3-transport) | Konum takibi | GPS ile IoT cihazlarını takip etmeyi öğrenin. | [Konum takibi](./3-transport/lessons/1-location-tracking/README.md) | -| 12 | [Nakliyat](./3-transport) | Konum bilgilerini depolayın | IoT verilerinin sonradan görselleştirilme ve analiz için nasıl saklandığını öğrenin | [Konum bilgilerini depolayın](./3-transport/lessons/2-store-location-data/README.md) | -| 13 | [Nakliyat](./3-transport) | Konum verilerini görselleştirin | Konum verisini harita üzerinde görselleştirmeyi ve haritaların 3 boyutlu dünyamızı nasıl 2 boyutlu gösterdiğini öğrenin | [Konum verilerini görselleştirin](./3-transport/lessons/3-visualize-location-data/README.md) | -| 14 | [Nakliyat](./3-transport) | Coğrafi Sınırlar | Coğrafi sınırları, tedarik zincirindeki araçları hedefe yaklaştıklarında uyarmak için coğrafi sınırları nasıl kullanacağınızı öğrenin. | [Coğrafi Sınırlar](./3-transport/lessons/4-geofences/README.md) | -| 15 | [Üretim](./4-manufacturing) | Meyve kalite kontrolcüsünü eğitin | Meyvelerin kalitesini kontrol etmek için buluttaki bir resim sınıflandırma algoritmasını eğitmeyi öğrenin. | [Meyve kalite algılayıcısını eğitin](./4-manufacturing/lessons/1-train-fruit-detector/README.md) | -| 16 | [Üretim](./4-manufacturing) | IoT devresinden meyvelerin kalitesini kontrol edin | Meyve kalitesini IoT cihazınızdan nasıl kontrol edeceğinizi öğrenin | [IoT devresinden meyvelerin kalitesini kontrol edin](./4-manufacturing/lessons/2-check-fruit-from-device/README.md) | -| 17 | [Üretim](./4-manufacturing) |Meyve dedektörünüzü bir köşede çalıştırın| Meyve dedektörünüzü ve IoT devrelerini bir köşede nasıl çalıştıracağınızı öğrenin. | [Meyve dedektörünüzü bir köşede çalıştırın](./4-manufacturing/lessons/3-run-fruit-detector-edge/README.md) | -| 18 | [Üretim](./4-manufacturing) | Bir sensörden meyve kalitesini algılamayı tetikleyin | Bir sensörden meyve kalitesinin algılanmasını nasıl tetikleyebileceğinizi öğrenin. | [Bir sensörden meyve kalitesini algılamayı tetikleyin](./4-manufacturing/lessons/4-trigger-fruit-detector/README.md) | -| 19 | [Perakende](./5-retail) | Stok dedektörünü eğitin | Marketinizdeki stoğu sayması için nesne tanıyan dedektörün nasıl eğitildiğini öğrenin | [Stok dedektörünü eğitin](./5-retail/lessons/1-train-stock-detector/README.md) | -| 20 | [Perakende](./5-retail) | Stokları IoT cihazınız ile kontrol edin | Stokları nesne tanıyan IoT cihazınız ile kontrol etmeyi öğrenin | [CStokları IoT cihazınız ile kontrol edin](./5-retail/lessons/2-check-stock-device/README.md) | -| 21 | [Tüketici](./6-consumer) | IoT cihazınız ile konuşma tanıyın | Akıllı bir zamanlayıcı oluşturmak için IoT cihazınızla konuşma tanıyacağınızı öğrenin. | [IoT cihazınız ile konuşma tanıyın ](./6-consumer/lessons/1-speech-recognition/README.md) | -| 22 | [Tüketici](./6-consumer) | Dili anlayın | IoT cihazınızın konuşulan cümleleri nasıl anladığını öğrenin | [Dili anlayın](./6-consumer/lessons/2-language-understanding/README.md) | -| 23 | [Tüketici](./6-consumer) | Bir zamanlayıcı kurun ve konuşturun | IoT cihazları için nasıl zamanlayıcı oluşturmayı ve zamanlayıcı kurulup çalıştıktan sonra IoT cihazlarına konuşarak geri bildirim verdirmeyi öğrenin.| [Bir zamanlayıcı kurun ve konuşturun](./6-consumer/lessons/3-spoken-feedback/README.md) | -| 24 | [Tüketici](./6-consumer) | Çoklu dil desteği | Hem konuşulan hem de geri bildirim için zamanlayıcınıza nasıl çoklu dil desteği sunulduğunu öğrenin | [Çoklu dil desteği](./6-consumer/lessons/4-multiple-language-support/README.md) | +| 01 | [Başlangıç](../1-getting-started) | Nesnelerin internetine giriş |İlk IoT cihazınızı yaparken, IoT'nin temel ilkelerini, sensörler ve bulut hizmetleri gibi IoT çözümlerinin temellerini öğrenin. | [Nesnelerin internetine giriş](../1-getting-started/lessons/1-introduction-to-iot/README.md) | +| 02 | [Başlangıç](../1-getting-started) | IOT'ye daha derin bir dalış | IoT sistemlerinin bileşenleri hakkında daha fazlasını öğrenin hem de mikro işlemcileri ve tek-kart bilgisayarları | [IOT'ye daha derin bir dalış](../1-getting-started/lessons/2-deeper-dive/README.md) | +| 03 | [Başlangıç](../1-getting-started) | Sensörler ve aktüatörler ile gerçek dünyayla etkileşin | Gece lambası inşa ederken sensörlerin fiziksel dünyadan veri toplamalarını ve aktüatörler in tepki vermelerini öğrenin L | [Sensörler ve aktüatörler ile gerçek dünyayla etkileşin](../1-getting-started/lessons/3-sensors-and-actuators/README.md) | +| 04 | [Başlangıç](../1-getting-started) | Devrenizi internete bağlayın | Devrelerin internete nasıl bağlandığını ve internetten nasıl mesaj aldıklarını gece lambanızı MQTT'ye bağlayarak öğrenin. | [Devrenizi internete bağlayın](../1-getting-started/lessons/4-connect-internet/README.md) | +| 05 | [Çiftlik](../2-farm) | Bitkinin büyümesini tahmin edin | IoT devresiyle toplanan sıcaklık verisinin bitki büyümesini tahmin etmede nasıl kullanıldığını öğrenin | [Bitkinin büyümesini tahmin edin](../2-farm/lessons/1-predict-plant-growth/README.md) | +| 06 | [Çiftlik](../2-farm) | Toprak nemini algılayın | Toprak neminin nasıl tespit edildiğini ve toprak nem sensörünün nasıl kalibre edildiğini öğrenin. | [Toprak nemini algılayın](../2-farm/lessons/2-detect-soil-moisture/README.md) | +| 07 | [Çiftlik](../2-farm) | Otomatik Bitki Sulama| Sulamanın nasıl otomatikleştirildiğini bir röle ve MQTT kullanarak öğrenin | [Otomatik Bitki Sulama](../2-farm/lessons/3-automated-plant-watering/README.md) | +| 08 | [Çiftlik](../2-farm) | Bitkinizi buluta taşıyın | Bulut tabanlı IOT servislerini ve MQTT yerine bunları kullanmayı öğrenin | [Bitkinizi buluta taşıyın](../2-farm/lessons/4-migrate-your-plant-to-the-cloud/README.md) | +| 09 | [Çiftlik](../2-farm) | Uygulama mantığını buluta taşıyın | IOT mesajlarını bulutta cevaplayan ugulama mantığı nasıl yazılır | [Uygulama mantığını buluta taşıyın](../2-farm/lessons/5-migrate-application-to-the-cloud/README.md) | +| 10 | [Çiftlik](../2-farm) | Bitkinizi güvende tutun | IoT'nin güvenliğini ve bitkinizi anahtarlarla ve sertifikalarla güvende tutmayı öğrenin | [Bitkinizi güvende tutun](../2-farm/lessons/6-keep-your-plant-secure/README.md) | +| 11 | [Nakliyat](../3-transport) | Konum takibi | GPS ile IoT cihazlarını takip etmeyi öğrenin. | [Konum takibi](../3-transport/lessons/1-location-tracking/README.md) | +| 12 | [Nakliyat](../3-transport) | Konum bilgilerini depolayın | IoT verilerinin sonradan görselleştirilme ve analiz için nasıl saklandığını öğrenin | [Konum bilgilerini depolayın](../3-transport/lessons/2-store-location-data/README.md) | +| 13 | [Nakliyat](../3-transport) | Konum verilerini görselleştirin | Konum verisini harita üzerinde görselleştirmeyi ve haritaların 3 boyutlu dünyamızı nasıl 2 boyutlu gösterdiğini öğrenin | [Konum verilerini görselleştirin](../3-transport/lessons/3-visualize-location-data/README.md) | +| 14 | [Nakliyat](../3-transport) | Coğrafi Sınırlar | Coğrafi sınırları, tedarik zincirindeki araçları hedefe yaklaştıklarında uyarmak için coğrafi sınırları nasıl kullanacağınızı öğrenin. | [Coğrafi Sınırlar](../3-transport/lessons/4-geofences/README.md) | +| 15 | [Üretim](../4-manufacturing) | Meyve kalite kontrolcüsünü eğitin | Meyvelerin kalitesini kontrol etmek için buluttaki bir resim sınıflandırma algoritmasını eğitmeyi öğrenin. | [Meyve kalite algılayıcısını eğitin](../4-manufacturing/lessons/1-train-fruit-detector/README.md) | +| 16 | [Üretim](../4-manufacturing) | IoT devresinden meyvelerin kalitesini kontrol edin | Meyve kalitesini IoT cihazınızdan nasıl kontrol edeceğinizi öğrenin | [IoT devresinden meyvelerin kalitesini kontrol edin](../4-manufacturing/lessons/2-check-fruit-from-device/README.md) | +| 17 | [Üretim](../4-manufacturing) |Meyve dedektörünüzü bir köşede çalıştırın| Meyve dedektörünüzü ve IoT devrelerini bir köşede nasıl çalıştıracağınızı öğrenin. | [Meyve dedektörünüzü bir köşede çalıştırın](../4-manufacturing/lessons/3-run-fruit-detector-edge/README.md) | +| 18 | [Üretim](../4-manufacturing) | Bir sensörden meyve kalitesini algılamayı tetikleyin | Bir sensörden meyve kalitesinin algılanmasını nasıl tetikleyebileceğinizi öğrenin. | [Bir sensörden meyve kalitesini algılamayı tetikleyin](../4-manufacturing/lessons/4-trigger-fruit-detector/README.md) | +| 19 | [Perakende](../5-retail) | Stok dedektörünü eğitin | Marketinizdeki stoğu sayması için nesne tanıyan dedektörün nasıl eğitildiğini öğrenin | [Stok dedektörünü eğitin](../5-retail/lessons/1-train-stock-detector/README.md) | +| 20 | [Perakende](../5-retail) | Stokları IoT cihazınız ile kontrol edin | Stokları nesne tanıyan IoT cihazınız ile kontrol etmeyi öğrenin | [CStokları IoT cihazınız ile kontrol edin](../5-retail/lessons/2-check-stock-device/README.md) | +| 21 | [Tüketici](../6-consumer) | IoT cihazınız ile konuşma tanıyın | Akıllı bir zamanlayıcı oluşturmak için IoT cihazınızla konuşma tanıyacağınızı öğrenin. | [IoT cihazınız ile konuşma tanıyın ](../6-consumer/lessons/1-speech-recognition/README.md) | +| 22 | [Tüketici](../6-consumer) | Dili anlayın | IoT cihazınızın konuşulan cümleleri nasıl anladığını öğrenin | [Dili anlayın](../6-consumer/lessons/2-language-understanding/README.md) | +| 23 | [Tüketici](../6-consumer) | Bir zamanlayıcı kurun ve konuşturun | IoT cihazları için nasıl zamanlayıcı oluşturmayı ve zamanlayıcı kurulup çalıştıktan sonra IoT cihazlarına konuşarak geri bildirim verdirmeyi öğrenin.| [Bir zamanlayıcı kurun ve konuşturun](../6-consumer/lessons/3-spoken-feedback/README.md) | +| 24 | [Tüketici](../6-consumer) | Çoklu dil desteği | Hem konuşulan hem de geri bildirim için zamanlayıcınıza nasıl çoklu dil desteği sunulduğunu öğrenin | [Çoklu dil desteği](../6-consumer/lessons/4-multiple-language-support/README.md) | ## Çevirim dışı erişim - Bu belgeleri çevirim dışı olarak [Docsify](https://docsify.js.org/#/) kullanarak çalıştırabilirsiniz. Bu repo'yu forklayın [Docsify'ı kurun](https://docsify.js.org/#/quickstart) ve bu repo'nun ana klasöründe `docsify serve` yazın. Website sizin yerelinizde: `localhost:3000`. + +Bu belgeleri çevirim dışı olarak [Docsify](https://docsify.js.org/#/) kullanarak çalıştırabilirsiniz. Bu repo'yu forklayın [Docsify'ı kurun](https://docsify.js.org/#/quickstart) ve bu repo'nun ana klasöründe `docsify serve` yazın. Website sizin yerelinizde: `localhost:3000`. ### PDF @@ -118,7 +117,7 @@ npm run convert ## Yardım Aranıyor! -Bir çeviriyle katkıda bulunmak ister miydiniz? Lütfen [çeviri rehberimizi](TRANSLATIONS.md) okuyun ve [çeviri issue'lerinden birine](https://github.com/microsoft/IoT-For-Beginners/issues?q=is%3Aissue+is%3Aopen+label%3Atranslation) yazınız. Eğer yeni bir dile çevirmek istiyorsanız lütfen yeni bir issue oluşturun. +Bir çeviriyle katkıda bulunmak ister miydiniz? Lütfen [çeviri rehberimizi](../TRANSLATIONS.md) okuyun ve [çeviri issue'lerinden birine](https://github.com/microsoft/IoT-For-Beginners/issues?q=is%3Aissue+is%3Aopen+label%3Atranslation) yazınız. Eğer yeni bir dile çevirmek istiyorsanız lütfen yeni bir issue oluşturun. ## Diğer Dersler @@ -129,4 +128,4 @@ Takımımız başka derler de yapıyor: ## Resim atıfları -Bu derslerde kullanılan tüm atıfları ihtiyaç halinde [buradan bulabilirsiniz](./attributions.md). +Bu derslerde kullanılan tüm atıfları ihtiyaç halinde [buradan bulabilirsiniz](../attributions.md). diff --git a/translations/README.zh-cn.md b/translations/README.zh-cn.md index 063b3f1b..15679afc 100644 --- a/translations/README.zh-cn.md +++ b/translations/README.zh-cn.md @@ -27,11 +27,11 @@ Microsoft 的 Azure Cloud 大使很高兴提供关于 IoT 基础一个12个星 **也感谢帮我们审查以及翻译这个课程的一组 [Microsoft Learn 学生大使](https://studentambassadors.microsoft.com?WT.mc_id=academic-17441-jabenn) :[Aditya Garg](https://github.com/AdityaGarg00), [Anurag Sharma](https://github.com/Anurag-0-1-A), [Arpita Das](https://github.com/Arpiiitaaa), [Aryan Jain](https://www.linkedin.com/in/aryan-jain-47a4a1145/), [Bhavesh Suneja](https://github.com/EliteWarrior315), [Faith Hunja](https://faithhunja.github.io/), [Lateefah Bello](https://www.linkedin.com/in/lateefah-bello/), [Manvi Jha](https://github.com/Severus-Matthew), [Mireille Tan](https://www.linkedin.com/in/mireille-tan-a4834819a/), [Mohammad Iftekher (Iftu) Ebne Jalal](https://github.com/Iftu119), [Mohammad Zulfikar](https://github.com/mohzulfikar), [Priyanshu Srivastav](https://www.linkedin.com/in/priyanshu-srivastav-b067241ba), [Thanmai Gowducheruvu](https://github.com/innovation-platform), 和 [Zina Kamel](https://www.linkedin.com/in/zina-kamel/).** -> **老师们**,我们为这个课程的用法 [提供了一些意见](../for-teachers.md)。如果你想自己创建课程,那我们也提供了一个[课程模板](lesson-template/README.md). +> **老师们**,我们为这个课程的用法 [提供了一些意见](../for-teachers.md)。如果你想自己创建课程,那我们也提供了一个[课程模板](../lesson-template/README.md). > **学生们**, 为了自己学习这个课程,请复刻整个项目库,再自己完成练习,从课前知识测验开始,再阅读讲座,然后完成剩余的活动。尝试通过理解课程的内容来完成项目,而不要仅仅把代码答案抄下来;然而,在每个项目课程里,你都能从 /solutions 文件夹访问那些答案代码。另外一个办法是跟朋友成立学习小组,然后一起分析内容。想进一步研究,我们鼓励你查一查[Microsoft Learn](https://docs.microsoft.com/users/jimbobbennett/collections/ke2ehd351jopwr?WT.mc_id=academic-17441-jabenn)。 -[![宣传片](./images/iot-for-beginners.png)](https://youtube.com/watch?v=bccEMm8gRuc "Promo video") +[![宣传片](../images/iot-for-beginners.png)](https://youtube.com/watch?v=bccEMm8gRuc "Promo video") > 🎥 点击以上的图片来看这个项目的宣传片! diff --git a/translations/hardware.bn.md b/translations/hardware.bn.md index 887d368e..96f4c2b3 100644 --- a/translations/hardware.bn.md +++ b/translations/hardware.bn.md @@ -12,7 +12,7 @@ IoT শব্দে **T** এর পূর্ণরূপ হলো **Things** ## ক্রয়তালিকা -![The Seeed studios logo](./images/seeed-logo.png) +![The Seeed studios logo](../images/seeed-logo.png) Seeed Studios থেকে সহজেই kit ক্রয় করা যাবে : @@ -20,19 +20,19 @@ Seeed Studios থেকে সহজেই kit ক্রয় করা যা **[IoT for beginners with Seeed and Microsoft - Wio Terminal Starter Kit](https://www.seeedstudio.com/IoT-for-beginners-with-Seeed-and-Microsoft-Wio-Terminal-Starter-Kit-p-5006.html)** -[![The Wio Terminal hardware kit](./images/wio-hardware-kit.png)](https://www.seeedstudio.com/IoT-for-beginners-with-Seeed-and-Microsoft-Wio-Terminal-Starter-Kit-p-5006.html) +[![The Wio Terminal hardware kit](../images/wio-hardware-kit.png)](https://www.seeedstudio.com/IoT-for-beginners-with-Seeed-and-Microsoft-Wio-Terminal-Starter-Kit-p-5006.html) ### রাস্পবেরি পাই **[IoT for beginners with Seeed and Microsoft - Raspberry Pi 4 Starter Kit](https://www.seeedstudio.com/IoT-for-beginners-with-Seeed-and-Microsoft-Raspberry-Pi-Starter-Kit.html)** -[![The Raspberry Pi Terminal hardware kit](./images/pi-hardware-kit.png)](https://www.seeedstudio.com/IoT-for-beginners-with-Seeed-and-Microsoft-Raspberry-Pi-Starter-Kit.html) +[![The Raspberry Pi Terminal hardware kit](../images/pi-hardware-kit.png)](https://www.seeedstudio.com/IoT-for-beginners-with-Seeed-and-Microsoft-Raspberry-Pi-Starter-Kit.html) ## আরডুইনো আরডুইনো এর সব কোড C++ ভাষায় করা হয়। অ্যাসাইনমেন্ট সম্পন্ন করতে নিম্নলিখিত উপাদানগুলির প্রয়োজন হবে: -### আরডুইনো হার্ডওয়্যার +### আরডুইনো হার্ডওয়্যার * [Wio Terminal](https://www.seeedstudio.com/Wio-Terminal-p-4509.html) * *ঐচ্ছিক* - USB-C ক্যাবল অথবা USB-A থেকে USB-C এডাপ্টার - উইও টার্মিনালে একটি ইউএসবি-সি পোর্ট রয়েছে এবং এটিতে ইউএসবি-সি থেকে ইউএসবি-এ ক্যাবল থাকে। যদি ব্যবহারকারীর পিসি বা ম্যাক এ কেবল ইউএসবি-সি পোর্ট থাকে তবে একটি ইউএসবি-সি বা ইউএসবি-এ থেকে ইউএসবি-সি অ্যাডাপ্টার প্রয়োজন হবে।