You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
IoT-For-Beginners/5-retail/lessons/2-check-stock-device/code-count/pi/fruit-quality-detector/app.py

93 lines
2.7 KiB

Lesson 20 (#140) * Adding content * Update en.json * Update README.md * Update TRANSLATIONS.md * Adding lesson tempolates * Fixing code files with each others code in * Update README.md * Adding lesson 16 * Adding virtual camera * Adding Wio Terminal camera capture * Adding wio terminal code * Adding SBC classification to lesson 16 * Adding challenge, review and assignment * Adding images and using new Azure icons * Update README.md * Update iot-reference-architecture.png * Adding structure for JulyOT links * Removing icons * Sketchnotes! * Create lesson-1.png * Starting on lesson 18 * Updated sketch * Adding virtual distance sensor * Adding Wio Terminal image classification * Update README.md * Adding structure for project 6 and wio terminal distance sensor * Adding some of the smart timer stuff * Updating sketchnotes * Adding virtual device speech to text * Adding chapter 21 * Language tweaks * Lesson 22 stuff * Update en.json * Bumping seeed libraries * Adding functions lab to lesson 22 * Almost done with LUIS * Update README.md * Reverting sunlight sensor change Fixes #88 * Structure * Adding speech to text lab for Pi * Adding virtual device text to speech lab * Finishing lesson 23 * Clarifying privacy Fixes #99 * Update README.md * Update hardware.md * Update README.md * Fixing some code samples that were wrong * Adding more on translation * Adding more on translator * Update README.md * Update README.md * Adding public access to the container * First part of retail object detection * More on stock lesson * Tweaks to maps lesson * Update README.md * Update pi-sensor.md * IoT Edge install stuffs * Notes on consumer groups and not running the event monitor at the same time * Assignment for object detector * Memory notes for speech to text * Migrating LUIS to an HTTP trigger * Adding Wio Terminal speech to text * Changing smart timer to functions from hub * Changing a param to body to avoid URL encoding * Update README.md * Tweaks before IoT Show * Adding sketchnote links * Adding object detection labs * Adding more on object detection * More on stock detection * Finishing stock counting
3 years ago
import io
import time
from picamera import PiCamera
from azure.cognitiveservices.vision.customvision.prediction import CustomVisionPredictionClient
from msrest.authentication import ApiKeyCredentials
from PIL import Image, ImageDraw, ImageColor
from shapely.geometry import Polygon
camera = PiCamera()
camera.resolution = (640, 480)
camera.rotation = 0
time.sleep(2)
image = io.BytesIO()
camera.capture(image, 'jpeg')
image.seek(0)
with open('image.jpg', 'wb') as image_file:
image_file.write(image.read())
prediction_url = '<prediction_url>'
prediction_key = '<prediction key>'
parts = prediction_url.split('/')
endpoint = 'https://' + parts[2]
project_id = parts[6]
iteration_name = parts[9]
prediction_credentials = ApiKeyCredentials(in_headers={"Prediction-key": prediction_key})
predictor = CustomVisionPredictionClient(endpoint, prediction_credentials)
image.seek(0)
results = predictor.detect_image(project_id, iteration_name, image)
threshold = 0.3
predictions = list(prediction for prediction in results.predictions if prediction.probability > threshold)
for prediction in predictions:
print(f'{prediction.tag_name}:\t{prediction.probability * 100:.2f}%')
overlap_threshold = 0.002
def create_polygon(prediction):
scale_left = prediction.bounding_box.left
scale_top = prediction.bounding_box.top
scale_right = prediction.bounding_box.left + prediction.bounding_box.width
scale_bottom = prediction.bounding_box.top + prediction.bounding_box.height
return Polygon([(scale_left, scale_top), (scale_right, scale_top), (scale_right, scale_bottom), (scale_left, scale_bottom)])
to_delete = []
for i in range(0, len(predictions)):
polygon_1 = create_polygon(predictions[i])
for j in range(i+1, len(predictions)):
polygon_2 = create_polygon(predictions[j])
overlap = polygon_1.intersection(polygon_2).area
smallest_area = min(polygon_1.area, polygon_2.area)
if overlap > (overlap_threshold * smallest_area):
to_delete.append(predictions[i])
break
for d in to_delete:
predictions.remove(d)
print(f'Counted {len(predictions)} stock items')
with Image.open('image.jpg') as im:
draw = ImageDraw.Draw(im)
for prediction in predictions:
scale_left = prediction.bounding_box.left
scale_top = prediction.bounding_box.top
scale_right = prediction.bounding_box.left + prediction.bounding_box.width
scale_bottom = prediction.bounding_box.top + prediction.bounding_box.height
left = scale_left * im.width
top = scale_top * im.height
right = scale_right * im.width
bottom = scale_bottom * im.height
draw.rectangle([left, top, right, bottom], outline=ImageColor.getrgb('red'), width=2)
im.save('image.jpg')