{ "cells": [ { "source": [ "# Build Classification Models" ], "cell_type": "markdown", "metadata": {} }, { "cell_type": "code", "execution_count": 1, "metadata": {}, "outputs": [ { "output_type": "execute_result", "data": { "text/plain": [ " Unnamed: 0 cuisine almond angelica anise anise_seed apple \\\n", "0 0 indian 0 0 0 0 0 \n", "1 1 indian 1 0 0 0 0 \n", "2 2 indian 0 0 0 0 0 \n", "3 3 indian 0 0 0 0 0 \n", "4 4 indian 0 0 0 0 0 \n", "\n", " apple_brandy apricot armagnac ... whiskey white_bread white_wine \\\n", "0 0 0 0 ... 0 0 0 \n", "1 0 0 0 ... 0 0 0 \n", "2 0 0 0 ... 0 0 0 \n", "3 0 0 0 ... 0 0 0 \n", "4 0 0 0 ... 0 0 0 \n", "\n", " whole_grain_wheat_flour wine wood yam yeast yogurt zucchini \n", "0 0 0 0 0 0 0 0 \n", "1 0 0 0 0 0 0 0 \n", "2 0 0 0 0 0 0 0 \n", "3 0 0 0 0 0 0 0 \n", "4 0 0 0 0 0 1 0 \n", "\n", "[5 rows x 382 columns]" ], "text/html": "
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n
Unnamed: 0cuisinealmondangelicaaniseanise_seedappleapple_brandyapricotarmagnac...whiskeywhite_breadwhite_winewhole_grain_wheat_flourwinewoodyamyeastyogurtzucchini
00indian00000000...0000000000
11indian10000000...0000000000
22indian00000000...0000000000
33indian00000000...0000000000
44indian00000000...0000000010
\n

5 rows × 382 columns

\n
" }, "metadata": {}, "execution_count": 1 } ], "source": [ "import pandas as pd\n", "cuisines_df = pd.read_csv(\"../../data/cleaned_cuisines.csv\")\n", "cuisines_df.head()" ] }, { "cell_type": "code", "execution_count": 2, "metadata": {}, "outputs": [], "source": [ "from sklearn.linear_model import LogisticRegression\n", "from sklearn.model_selection import train_test_split, cross_val_score\n", "from sklearn.metrics import accuracy_score,precision_score,confusion_matrix,classification_report, precision_recall_curve\n", "from sklearn.svm import SVC\n", "import numpy as np" ] }, { "cell_type": "code", "execution_count": 3, "metadata": {}, "outputs": [ { "output_type": "execute_result", "data": { "text/plain": [ "0 indian\n", "1 indian\n", "2 indian\n", "3 indian\n", "4 indian\n", "Name: cuisine, dtype: object" ] }, "metadata": {}, "execution_count": 3 } ], "source": [ "cuisines_label_df = cuisines_df['cuisine']\n", "cuisines_label_df.head()" ] }, { "cell_type": "code", "execution_count": 4, "metadata": {}, "outputs": [ { "output_type": "execute_result", "data": { "text/plain": [ " almond angelica anise anise_seed apple apple_brandy apricot \\\n", "0 0 0 0 0 0 0 0 \n", "1 1 0 0 0 0 0 0 \n", "2 0 0 0 0 0 0 0 \n", "3 0 0 0 0 0 0 0 \n", "4 0 0 0 0 0 0 0 \n", "\n", " armagnac artemisia artichoke ... whiskey white_bread white_wine \\\n", "0 0 0 0 ... 0 0 0 \n", "1 0 0 0 ... 0 0 0 \n", "2 0 0 0 ... 0 0 0 \n", "3 0 0 0 ... 0 0 0 \n", "4 0 0 0 ... 0 0 0 \n", "\n", " whole_grain_wheat_flour wine wood yam yeast yogurt zucchini \n", "0 0 0 0 0 0 0 0 \n", "1 0 0 0 0 0 0 0 \n", "2 0 0 0 0 0 0 0 \n", "3 0 0 0 0 0 0 0 \n", "4 0 0 0 0 0 1 0 \n", "\n", "[5 rows x 380 columns]" ], "text/html": "
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n
almondangelicaaniseanise_seedappleapple_brandyapricotarmagnacartemisiaartichoke...whiskeywhite_breadwhite_winewhole_grain_wheat_flourwinewoodyamyeastyogurtzucchini
00000000000...0000000000
11000000000...0000000000
20000000000...0000000000
30000000000...0000000000
40000000000...0000000010
\n

5 rows × 380 columns

\n
" }, "metadata": {}, "execution_count": 4 } ], "source": [ "cuisines_feature_df = cuisines_df.drop(['Unnamed: 0', 'cuisine'], axis=1)\n", "cuisines_feature_df.head()" ] }, { "cell_type": "code", "execution_count": 5, "metadata": {}, "outputs": [], "source": [ "X_train, X_test, y_train, y_test = train_test_split(cuisines_feature_df, cuisines_label_df, test_size=0.3)" ] }, { "cell_type": "code", "execution_count": 6, "metadata": {}, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "Accuracy is 0.8181818181818182\n" ] } ], "source": [ "lr = LogisticRegression(multi_class='ovr',solver='liblinear')\n", "model = lr.fit(X_train, np.ravel(y_train))\n", "\n", "accuracy = model.score(X_test, y_test)\n", "print (\"Accuracy is {}\".format(accuracy))" ] }, { "cell_type": "code", "execution_count": 7, "metadata": {}, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "ingredients: Index(['artemisia', 'black_pepper', 'mushroom', 'shiitake', 'soy_sauce',\n 'vegetable_oil'],\n dtype='object')\ncuisine: korean\n" ] } ], "source": [ "# test an item\n", "print(f'ingredients: {X_test.iloc[50][X_test.iloc[50]!=0].keys()}')\n", "print(f'cuisine: {y_test.iloc[50]}')" ] }, { "cell_type": "code", "execution_count": 8, "metadata": {}, "outputs": [ { "output_type": "execute_result", "data": { "text/plain": [ " 0\n", "korean 0.392231\n", "chinese 0.372872\n", "japanese 0.218825\n", "thai 0.013427\n", "indian 0.002645" ], "text/html": "
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n
0
korean0.392231
chinese0.372872
japanese0.218825
thai0.013427
indian0.002645
\n
" }, "metadata": {}, "execution_count": 8 } ], "source": [ "#rehsape to 2d array and transpose\n", "test= X_test.iloc[50].values.reshape(-1, 1).T\n", "# predict with score\n", "proba = model.predict_proba(test)\n", "classes = model.classes_\n", "# create df with classes and scores\n", "resultdf = pd.DataFrame(data=proba, columns=classes)\n", "\n", "# create df to show results\n", "topPrediction = resultdf.T.sort_values(by=[0], ascending = [False])\n", "topPrediction.head()" ] }, { "cell_type": "code", "execution_count": 9, "metadata": {}, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ " precision recall f1-score support\n\n chinese 0.75 0.73 0.74 223\n indian 0.93 0.88 0.90 255\n japanese 0.78 0.78 0.78 253\n korean 0.87 0.86 0.86 236\n thai 0.76 0.84 0.80 232\n\n accuracy 0.82 1199\n macro avg 0.82 0.82 0.82 1199\nweighted avg 0.82 0.82 0.82 1199\n\n" ] } ], "source": [ "y_pred = model.predict(X_test)\r\n", "print(classification_report(y_test,y_pred))" ] } ], "metadata": { "interpreter": { "hash": "70b38d7a306a849643e446cd70466270a13445e5987dfa1344ef2b127438fa4d" }, "kernelspec": { "name": "python3", "display_name": "Python 3.7.0 64-bit ('3.7')" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.7.0" }, "metadata": { "interpreter": { "hash": "70b38d7a306a849643e446cd70466270a13445e5987dfa1344ef2b127438fa4d" } } }, "nbformat": 4, "nbformat_minor": 4 }