pull/717/head
jnopareboateng 2 years ago
parent cc7e613d90
commit fe8dcd09c8

File diff suppressed because one or more lines are too long

@ -1,5 +1,663 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Build Classification Models"
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [
{
"data": {
"text/html": [
"<div>\n",
"<style scoped>\n",
" .dataframe tbody tr th:only-of-type {\n",
" vertical-align: middle;\n",
" }\n",
"\n",
" .dataframe tbody tr th {\n",
" vertical-align: top;\n",
" }\n",
"\n",
" .dataframe thead th {\n",
" text-align: right;\n",
" }\n",
"</style>\n",
"<table border=\"1\" class=\"dataframe\">\n",
" <thead>\n",
" <tr style=\"text-align: right;\">\n",
" <th></th>\n",
" <th>Unnamed: 0</th>\n",
" <th>cuisine</th>\n",
" <th>almond</th>\n",
" <th>angelica</th>\n",
" <th>anise</th>\n",
" <th>anise_seed</th>\n",
" <th>apple</th>\n",
" <th>apple_brandy</th>\n",
" <th>apricot</th>\n",
" <th>armagnac</th>\n",
" <th>...</th>\n",
" <th>whiskey</th>\n",
" <th>white_bread</th>\n",
" <th>white_wine</th>\n",
" <th>whole_grain_wheat_flour</th>\n",
" <th>wine</th>\n",
" <th>wood</th>\n",
" <th>yam</th>\n",
" <th>yeast</th>\n",
" <th>yogurt</th>\n",
" <th>zucchini</th>\n",
" </tr>\n",
" </thead>\n",
" <tbody>\n",
" <tr>\n",
" <th>0</th>\n",
" <td>0</td>\n",
" <td>indian</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>...</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" </tr>\n",
" <tr>\n",
" <th>1</th>\n",
" <td>1</td>\n",
" <td>indian</td>\n",
" <td>1</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>...</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" </tr>\n",
" <tr>\n",
" <th>2</th>\n",
" <td>2</td>\n",
" <td>indian</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>...</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" </tr>\n",
" <tr>\n",
" <th>3</th>\n",
" <td>3</td>\n",
" <td>indian</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>...</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" </tr>\n",
" <tr>\n",
" <th>4</th>\n",
" <td>4</td>\n",
" <td>indian</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>...</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>1</td>\n",
" <td>0</td>\n",
" </tr>\n",
" </tbody>\n",
"</table>\n",
"<p>5 rows × 382 columns</p>\n",
"</div>"
],
"text/plain": [
" Unnamed: 0 cuisine almond angelica anise anise_seed apple \\\n",
"0 0 indian 0 0 0 0 0 \n",
"1 1 indian 1 0 0 0 0 \n",
"2 2 indian 0 0 0 0 0 \n",
"3 3 indian 0 0 0 0 0 \n",
"4 4 indian 0 0 0 0 0 \n",
"\n",
" apple_brandy apricot armagnac ... whiskey white_bread white_wine \\\n",
"0 0 0 0 ... 0 0 0 \n",
"1 0 0 0 ... 0 0 0 \n",
"2 0 0 0 ... 0 0 0 \n",
"3 0 0 0 ... 0 0 0 \n",
"4 0 0 0 ... 0 0 0 \n",
"\n",
" whole_grain_wheat_flour wine wood yam yeast yogurt zucchini \n",
"0 0 0 0 0 0 0 0 \n",
"1 0 0 0 0 0 0 0 \n",
"2 0 0 0 0 0 0 0 \n",
"3 0 0 0 0 0 0 0 \n",
"4 0 0 0 0 0 1 0 \n",
"\n",
"[5 rows x 382 columns]"
]
},
"execution_count": 1,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"import pandas as pd\n",
"cuisines_df = pd.read_csv(\"../data/cleaned_cuisines.csv\")\n",
"cuisines_df.head()"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
"from sklearn.linear_model import LogisticRegression\n",
"from sklearn.model_selection import train_test_split, cross_val_score\n",
"from sklearn.metrics import accuracy_score,precision_score,confusion_matrix,classification_report, precision_recall_curve\n",
"from sklearn.svm import SVC\n",
"import numpy as np"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"0 indian\n",
"1 indian\n",
"2 indian\n",
"3 indian\n",
"4 indian\n",
"Name: cuisine, dtype: object"
]
},
"execution_count": 3,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"cuisines_label_df = cuisines_df['cuisine']\n",
"cuisines_label_df.head()"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [
{
"data": {
"text/html": [
"<div>\n",
"<style scoped>\n",
" .dataframe tbody tr th:only-of-type {\n",
" vertical-align: middle;\n",
" }\n",
"\n",
" .dataframe tbody tr th {\n",
" vertical-align: top;\n",
" }\n",
"\n",
" .dataframe thead th {\n",
" text-align: right;\n",
" }\n",
"</style>\n",
"<table border=\"1\" class=\"dataframe\">\n",
" <thead>\n",
" <tr style=\"text-align: right;\">\n",
" <th></th>\n",
" <th>almond</th>\n",
" <th>angelica</th>\n",
" <th>anise</th>\n",
" <th>anise_seed</th>\n",
" <th>apple</th>\n",
" <th>apple_brandy</th>\n",
" <th>apricot</th>\n",
" <th>armagnac</th>\n",
" <th>artemisia</th>\n",
" <th>artichoke</th>\n",
" <th>...</th>\n",
" <th>whiskey</th>\n",
" <th>white_bread</th>\n",
" <th>white_wine</th>\n",
" <th>whole_grain_wheat_flour</th>\n",
" <th>wine</th>\n",
" <th>wood</th>\n",
" <th>yam</th>\n",
" <th>yeast</th>\n",
" <th>yogurt</th>\n",
" <th>zucchini</th>\n",
" </tr>\n",
" </thead>\n",
" <tbody>\n",
" <tr>\n",
" <th>0</th>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>...</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" </tr>\n",
" <tr>\n",
" <th>1</th>\n",
" <td>1</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>...</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" </tr>\n",
" <tr>\n",
" <th>2</th>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>...</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" </tr>\n",
" <tr>\n",
" <th>3</th>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>...</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" </tr>\n",
" <tr>\n",
" <th>4</th>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>...</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>1</td>\n",
" <td>0</td>\n",
" </tr>\n",
" </tbody>\n",
"</table>\n",
"<p>5 rows × 380 columns</p>\n",
"</div>"
],
"text/plain": [
" almond angelica anise anise_seed apple apple_brandy apricot \\\n",
"0 0 0 0 0 0 0 0 \n",
"1 1 0 0 0 0 0 0 \n",
"2 0 0 0 0 0 0 0 \n",
"3 0 0 0 0 0 0 0 \n",
"4 0 0 0 0 0 0 0 \n",
"\n",
" armagnac artemisia artichoke ... whiskey white_bread white_wine \\\n",
"0 0 0 0 ... 0 0 0 \n",
"1 0 0 0 ... 0 0 0 \n",
"2 0 0 0 ... 0 0 0 \n",
"3 0 0 0 ... 0 0 0 \n",
"4 0 0 0 ... 0 0 0 \n",
"\n",
" whole_grain_wheat_flour wine wood yam yeast yogurt zucchini \n",
"0 0 0 0 0 0 0 0 \n",
"1 0 0 0 0 0 0 0 \n",
"2 0 0 0 0 0 0 0 \n",
"3 0 0 0 0 0 0 0 \n",
"4 0 0 0 0 0 1 0 \n",
"\n",
"[5 rows x 380 columns]"
]
},
"execution_count": 4,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"cuisines_feature_df = cuisines_df.drop(['Unnamed: 0', 'cuisine'], axis=1)\n",
"cuisines_feature_df.head()"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {},
"outputs": [],
"source": [
"X_train, X_test, y_train, y_test = train_test_split(cuisines_feature_df, cuisines_label_df, test_size=0.3)"
]
},
{
"cell_type": "code",
"execution_count": 11,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Accuracy is 0.7964970809007507\n"
]
}
],
"source": [
"lr = LogisticRegression(multi_class='ovr',solver='liblinear')\n",
"model = lr.fit(X_train, np.ravel(y_train))\n",
"\n",
"accuracy = model.score(X_test, y_test)\n",
"print (\"Accuracy is {}\".format(accuracy))"
]
},
{
"cell_type": "code",
"execution_count": 12,
"metadata": {},
"outputs": [],
"source": [
"# lr = LogisticRegression(multi_class='ovr',solver='lbfgs')\n",
"# model = lr.fit(X_train, np.ravel(y_train))\n",
"\n",
"# # accuracy = model.score(X_test, y_test)\n",
"# print (\"Accuracy is {}\".format(accuracy))"
]
},
{
"cell_type": "code",
"execution_count": 13,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"ingredients: Index(['black_pepper', 'egg', 'scallion', 'soy_sauce'], dtype='object')\n",
"cuisine: korean\n"
]
}
],
"source": [
"print(f'ingredients: {X_test.iloc[50][X_test.iloc[50]!=0].keys()}')\n",
"print(f'cuisine: {y_test.iloc[50]}')"
]
},
{
"cell_type": "code",
"execution_count": 14,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"d:\\DEV WORK\\Data Science Library\\ML-For-Beginners\\.venv\\lib\\site-packages\\sklearn\\base.py:464: UserWarning: X does not have valid feature names, but LogisticRegression was fitted with feature names\n",
" warnings.warn(\n"
]
},
{
"data": {
"text/html": [
"<div>\n",
"<style scoped>\n",
" .dataframe tbody tr th:only-of-type {\n",
" vertical-align: middle;\n",
" }\n",
"\n",
" .dataframe tbody tr th {\n",
" vertical-align: top;\n",
" }\n",
"\n",
" .dataframe thead th {\n",
" text-align: right;\n",
" }\n",
"</style>\n",
"<table border=\"1\" class=\"dataframe\">\n",
" <thead>\n",
" <tr style=\"text-align: right;\">\n",
" <th></th>\n",
" <th>0</th>\n",
" </tr>\n",
" </thead>\n",
" <tbody>\n",
" <tr>\n",
" <th>chinese</th>\n",
" <td>0.412795</td>\n",
" </tr>\n",
" <tr>\n",
" <th>korean</th>\n",
" <td>0.362903</td>\n",
" </tr>\n",
" <tr>\n",
" <th>japanese</th>\n",
" <td>0.171653</td>\n",
" </tr>\n",
" <tr>\n",
" <th>thai</th>\n",
" <td>0.051627</td>\n",
" </tr>\n",
" <tr>\n",
" <th>indian</th>\n",
" <td>0.001022</td>\n",
" </tr>\n",
" </tbody>\n",
"</table>\n",
"</div>"
],
"text/plain": [
" 0\n",
"chinese 0.412795\n",
"korean 0.362903\n",
"japanese 0.171653\n",
"thai 0.051627\n",
"indian 0.001022"
]
},
"execution_count": 14,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"test= X_test.iloc[50].values.reshape(-1, 1).T\n",
"proba = model.predict_proba(test)\n",
"classes = model.classes_\n",
"resultdf = pd.DataFrame(data=proba, columns=classes)\n",
"\n",
"topPrediction = resultdf.T.sort_values(by=[0], ascending = [False])\n",
"topPrediction.head()"
]
},
{
"cell_type": "code",
"execution_count": 15,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
" precision recall f1-score support\n",
"\n",
" chinese 0.76 0.66 0.70 252\n",
" indian 0.91 0.88 0.90 232\n",
" japanese 0.76 0.77 0.77 245\n",
" korean 0.83 0.83 0.83 233\n",
" thai 0.74 0.86 0.79 237\n",
"\n",
" accuracy 0.80 1199\n",
" macro avg 0.80 0.80 0.80 1199\n",
"weighted avg 0.80 0.80 0.80 1199\n",
"\n"
]
}
],
"source": [
"y_pred = model.predict(X_test)\n",
"print(classification_report(y_test,y_pred))"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": ".venv",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
@ -10,19 +668,10 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": 3
"version": "3.10.11"
},
"orig_nbformat": 2
},
"nbformat": 4,
"nbformat_minor": 2,
"cells": [
{
"source": [
"# Build Classification Models"
],
"cell_type": "markdown",
"metadata": {}
}
]
}
"nbformat_minor": 2
}

@ -9,12 +9,194 @@
},
{
"cell_type": "code",
"execution_count": 9,
"execution_count": 21,
"metadata": {},
"outputs": [],
"source": [
"from sklearn.neighbors import KNeighborsClassifier\n",
"from sklearn.linear_model import LogisticRegression\n",
"from sklearn.svm import SVC\n",
"from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier\n",
"from sklearn.model_selection import train_test_split, cross_val_score\n",
"from sklearn.metrics import accuracy_score,precision_score,confusion_matrix,classification_report, precision_recall_curve\n",
"import numpy as np"
]
},
{
"cell_type": "code",
"execution_count": 22,
"metadata": {},
"outputs": [
{
"output_type": "execute_result",
"data": {
"text/html": [
"<div>\n",
"<style scoped>\n",
" .dataframe tbody tr th:only-of-type {\n",
" vertical-align: middle;\n",
" }\n",
"\n",
" .dataframe tbody tr th {\n",
" vertical-align: top;\n",
" }\n",
"\n",
" .dataframe thead th {\n",
" text-align: right;\n",
" }\n",
"</style>\n",
"<table border=\"1\" class=\"dataframe\">\n",
" <thead>\n",
" <tr style=\"text-align: right;\">\n",
" <th></th>\n",
" <th>Unnamed: 0</th>\n",
" <th>cuisine</th>\n",
" <th>almond</th>\n",
" <th>angelica</th>\n",
" <th>anise</th>\n",
" <th>anise_seed</th>\n",
" <th>apple</th>\n",
" <th>apple_brandy</th>\n",
" <th>apricot</th>\n",
" <th>armagnac</th>\n",
" <th>...</th>\n",
" <th>whiskey</th>\n",
" <th>white_bread</th>\n",
" <th>white_wine</th>\n",
" <th>whole_grain_wheat_flour</th>\n",
" <th>wine</th>\n",
" <th>wood</th>\n",
" <th>yam</th>\n",
" <th>yeast</th>\n",
" <th>yogurt</th>\n",
" <th>zucchini</th>\n",
" </tr>\n",
" </thead>\n",
" <tbody>\n",
" <tr>\n",
" <th>0</th>\n",
" <td>0</td>\n",
" <td>indian</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>...</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" </tr>\n",
" <tr>\n",
" <th>1</th>\n",
" <td>1</td>\n",
" <td>indian</td>\n",
" <td>1</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>...</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" </tr>\n",
" <tr>\n",
" <th>2</th>\n",
" <td>2</td>\n",
" <td>indian</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>...</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" </tr>\n",
" <tr>\n",
" <th>3</th>\n",
" <td>3</td>\n",
" <td>indian</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>...</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" </tr>\n",
" <tr>\n",
" <th>4</th>\n",
" <td>4</td>\n",
" <td>indian</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>...</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>1</td>\n",
" <td>0</td>\n",
" </tr>\n",
" </tbody>\n",
"</table>\n",
"<p>5 rows × 382 columns</p>\n",
"</div>"
],
"text/plain": [
" Unnamed: 0 cuisine almond angelica anise anise_seed apple \\\n",
"0 0 indian 0 0 0 0 0 \n",
@ -38,11 +220,11 @@
"4 0 0 0 0 0 1 0 \n",
"\n",
"[5 rows x 382 columns]"
],
"text/html": "<div>\n<style scoped>\n .dataframe tbody tr th:only-of-type {\n vertical-align: middle;\n }\n\n .dataframe tbody tr th {\n vertical-align: top;\n }\n\n .dataframe thead th {\n text-align: right;\n }\n</style>\n<table border=\"1\" class=\"dataframe\">\n <thead>\n <tr style=\"text-align: right;\">\n <th></th>\n <th>Unnamed: 0</th>\n <th>cuisine</th>\n <th>almond</th>\n <th>angelica</th>\n <th>anise</th>\n <th>anise_seed</th>\n <th>apple</th>\n <th>apple_brandy</th>\n <th>apricot</th>\n <th>armagnac</th>\n <th>...</th>\n <th>whiskey</th>\n <th>white_bread</th>\n <th>white_wine</th>\n <th>whole_grain_wheat_flour</th>\n <th>wine</th>\n <th>wood</th>\n <th>yam</th>\n <th>yeast</th>\n <th>yogurt</th>\n <th>zucchini</th>\n </tr>\n </thead>\n <tbody>\n <tr>\n <th>0</th>\n <td>0</td>\n <td>indian</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>...</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n </tr>\n <tr>\n <th>1</th>\n <td>1</td>\n <td>indian</td>\n <td>1</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>...</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n </tr>\n <tr>\n <th>2</th>\n <td>2</td>\n <td>indian</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>...</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n </tr>\n <tr>\n <th>3</th>\n <td>3</td>\n <td>indian</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>...</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n </tr>\n <tr>\n <th>4</th>\n <td>4</td>\n <td>indian</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>...</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>1</td>\n <td>0</td>\n </tr>\n </tbody>\n</table>\n<p>5 rows × 382 columns</p>\n</div>"
]
},
"execution_count": 22,
"metadata": {},
"execution_count": 9
"output_type": "execute_result"
}
],
"source": [
@ -53,11 +235,10 @@
},
{
"cell_type": "code",
"execution_count": 10,
"execution_count": 23,
"metadata": {},
"outputs": [
{
"output_type": "execute_result",
"data": {
"text/plain": [
"0 indian\n",
@ -68,8 +249,9 @@
"Name: cuisine, dtype: object"
]
},
"execution_count": 23,
"metadata": {},
"execution_count": 10
"output_type": "execute_result"
}
],
"source": [
@ -79,12 +261,179 @@
},
{
"cell_type": "code",
"execution_count": 11,
"execution_count": 24,
"metadata": {},
"outputs": [
{
"output_type": "execute_result",
"data": {
"text/html": [
"<div>\n",
"<style scoped>\n",
" .dataframe tbody tr th:only-of-type {\n",
" vertical-align: middle;\n",
" }\n",
"\n",
" .dataframe tbody tr th {\n",
" vertical-align: top;\n",
" }\n",
"\n",
" .dataframe thead th {\n",
" text-align: right;\n",
" }\n",
"</style>\n",
"<table border=\"1\" class=\"dataframe\">\n",
" <thead>\n",
" <tr style=\"text-align: right;\">\n",
" <th></th>\n",
" <th>almond</th>\n",
" <th>angelica</th>\n",
" <th>anise</th>\n",
" <th>anise_seed</th>\n",
" <th>apple</th>\n",
" <th>apple_brandy</th>\n",
" <th>apricot</th>\n",
" <th>armagnac</th>\n",
" <th>artemisia</th>\n",
" <th>artichoke</th>\n",
" <th>...</th>\n",
" <th>whiskey</th>\n",
" <th>white_bread</th>\n",
" <th>white_wine</th>\n",
" <th>whole_grain_wheat_flour</th>\n",
" <th>wine</th>\n",
" <th>wood</th>\n",
" <th>yam</th>\n",
" <th>yeast</th>\n",
" <th>yogurt</th>\n",
" <th>zucchini</th>\n",
" </tr>\n",
" </thead>\n",
" <tbody>\n",
" <tr>\n",
" <th>0</th>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>...</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" </tr>\n",
" <tr>\n",
" <th>1</th>\n",
" <td>1</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>...</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" </tr>\n",
" <tr>\n",
" <th>2</th>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>...</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" </tr>\n",
" <tr>\n",
" <th>3</th>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>...</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" </tr>\n",
" <tr>\n",
" <th>4</th>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>...</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>1</td>\n",
" <td>0</td>\n",
" </tr>\n",
" </tbody>\n",
"</table>\n",
"<p>5 rows × 380 columns</p>\n",
"</div>"
],
"text/plain": [
" almond angelica anise anise_seed apple apple_brandy apricot \\\n",
"0 0 0 0 0 0 0 0 \n",
@ -108,17 +457,100 @@
"4 0 0 0 0 0 1 0 \n",
"\n",
"[5 rows x 380 columns]"
],
"text/html": "<div>\n<style scoped>\n .dataframe tbody tr th:only-of-type {\n vertical-align: middle;\n }\n\n .dataframe tbody tr th {\n vertical-align: top;\n }\n\n .dataframe thead th {\n text-align: right;\n }\n</style>\n<table border=\"1\" class=\"dataframe\">\n <thead>\n <tr style=\"text-align: right;\">\n <th></th>\n <th>almond</th>\n <th>angelica</th>\n <th>anise</th>\n <th>anise_seed</th>\n <th>apple</th>\n <th>apple_brandy</th>\n <th>apricot</th>\n <th>armagnac</th>\n <th>artemisia</th>\n <th>artichoke</th>\n <th>...</th>\n <th>whiskey</th>\n <th>white_bread</th>\n <th>white_wine</th>\n <th>whole_grain_wheat_flour</th>\n <th>wine</th>\n <th>wood</th>\n <th>yam</th>\n <th>yeast</th>\n <th>yogurt</th>\n <th>zucchini</th>\n </tr>\n </thead>\n <tbody>\n <tr>\n <th>0</th>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>...</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n </tr>\n <tr>\n <th>1</th>\n <td>1</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>...</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n </tr>\n <tr>\n <th>2</th>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>...</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n </tr>\n <tr>\n <th>3</th>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>...</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n </tr>\n <tr>\n <th>4</th>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>...</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>1</td>\n <td>0</td>\n </tr>\n </tbody>\n</table>\n<p>5 rows × 380 columns</p>\n</div>"
]
},
"execution_count": 24,
"metadata": {},
"execution_count": 11
"output_type": "execute_result"
}
],
"source": [
"cuisines_feature_df = cuisines_df.drop(['Unnamed: 0', 'cuisine'], axis=1)\n",
"cuisines_feature_df.head()"
]
},
{
"cell_type": "code",
"execution_count": 25,
"metadata": {},
"outputs": [],
"source": [
"X_train, X_test, y_train, y_test = train_test_split(cuisines_feature_df, cuisines_label_df, test_size=0.3)"
]
},
{
"cell_type": "code",
"execution_count": 26,
"metadata": {},
"outputs": [],
"source": [
"C = 10\n",
"# Create different classifiers.\n",
"classifiers = {\n",
" 'Linear SVC': SVC(kernel='linear', C=C, probability=True,random_state=0),\n",
" 'KNN classifier': KNeighborsClassifier(C),\n",
" 'SVC': SVC()\n",
"}"
]
},
{
"cell_type": "code",
"execution_count": 27,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Accuracy (train) for Linear SVC: 77.8% \n",
" precision recall f1-score support\n",
"\n",
" chinese 0.68 0.75 0.71 260\n",
" indian 0.89 0.86 0.87 258\n",
" japanese 0.77 0.69 0.73 219\n",
" korean 0.85 0.77 0.81 221\n",
" thai 0.72 0.81 0.76 241\n",
"\n",
" accuracy 0.78 1199\n",
" macro avg 0.78 0.78 0.78 1199\n",
"weighted avg 0.78 0.78 0.78 1199\n",
"\n"
]
},
{
"ename": "AttributeError",
"evalue": "'Flags' object has no attribute 'c_contiguous'",
"output_type": "error",
"traceback": [
"\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[1;31mAttributeError\u001b[0m Traceback (most recent call last)",
"\u001b[1;32md:\\DEV WORK\\Data Science Library\\ML-For-Beginners\\4-Classification\\3-Classifiers-2\\notebook.ipynb Cell 8\u001b[0m line \u001b[0;36m6\n\u001b[0;32m <a href='vscode-notebook-cell:/d%3A/DEV%20WORK/Data%20Science%20Library/ML-For-Beginners/4-Classification/3-Classifiers-2/notebook.ipynb#X11sZmlsZQ%3D%3D?line=2'>3</a>\u001b[0m \u001b[39mfor\u001b[39;00m index, (name, classifier) \u001b[39min\u001b[39;00m \u001b[39menumerate\u001b[39m(classifiers\u001b[39m.\u001b[39mitems()):\n\u001b[0;32m <a href='vscode-notebook-cell:/d%3A/DEV%20WORK/Data%20Science%20Library/ML-For-Beginners/4-Classification/3-Classifiers-2/notebook.ipynb#X11sZmlsZQ%3D%3D?line=3'>4</a>\u001b[0m classifier\u001b[39m.\u001b[39mfit(X_train, np\u001b[39m.\u001b[39mravel(y_train))\n\u001b[1;32m----> <a href='vscode-notebook-cell:/d%3A/DEV%20WORK/Data%20Science%20Library/ML-For-Beginners/4-Classification/3-Classifiers-2/notebook.ipynb#X11sZmlsZQ%3D%3D?line=5'>6</a>\u001b[0m y_pred \u001b[39m=\u001b[39m classifier\u001b[39m.\u001b[39;49mpredict(X_test)\n\u001b[0;32m <a href='vscode-notebook-cell:/d%3A/DEV%20WORK/Data%20Science%20Library/ML-For-Beginners/4-Classification/3-Classifiers-2/notebook.ipynb#X11sZmlsZQ%3D%3D?line=6'>7</a>\u001b[0m accuracy \u001b[39m=\u001b[39m accuracy_score(y_test, y_pred)\n\u001b[0;32m <a href='vscode-notebook-cell:/d%3A/DEV%20WORK/Data%20Science%20Library/ML-For-Beginners/4-Classification/3-Classifiers-2/notebook.ipynb#X11sZmlsZQ%3D%3D?line=7'>8</a>\u001b[0m \u001b[39mprint\u001b[39m(\u001b[39m\"\u001b[39m\u001b[39mAccuracy (train) for \u001b[39m\u001b[39m%s\u001b[39;00m\u001b[39m: \u001b[39m\u001b[39m%0.1f\u001b[39;00m\u001b[39m%%\u001b[39;00m\u001b[39m \u001b[39m\u001b[39m\"\u001b[39m \u001b[39m%\u001b[39m (name, accuracy \u001b[39m*\u001b[39m \u001b[39m100\u001b[39m))\n",
"File \u001b[1;32md:\\DEV WORK\\Data Science Library\\ML-For-Beginners\\.venv\\lib\\site-packages\\sklearn\\neighbors\\_classification.py:246\u001b[0m, in \u001b[0;36mKNeighborsClassifier.predict\u001b[1;34m(self, X)\u001b[0m\n\u001b[0;32m 244\u001b[0m check_is_fitted(\u001b[39mself\u001b[39m, \u001b[39m\"\u001b[39m\u001b[39m_fit_method\u001b[39m\u001b[39m\"\u001b[39m)\n\u001b[0;32m 245\u001b[0m \u001b[39mif\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mweights \u001b[39m==\u001b[39m \u001b[39m\"\u001b[39m\u001b[39muniform\u001b[39m\u001b[39m\"\u001b[39m:\n\u001b[1;32m--> 246\u001b[0m \u001b[39mif\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_fit_method \u001b[39m==\u001b[39m \u001b[39m\"\u001b[39m\u001b[39mbrute\u001b[39m\u001b[39m\"\u001b[39m \u001b[39mand\u001b[39;00m ArgKminClassMode\u001b[39m.\u001b[39;49mis_usable_for(\n\u001b[0;32m 247\u001b[0m X, \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49m_fit_X, \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49mmetric\n\u001b[0;32m 248\u001b[0m ):\n\u001b[0;32m 249\u001b[0m probabilities \u001b[39m=\u001b[39m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mpredict_proba(X)\n\u001b[0;32m 250\u001b[0m \u001b[39mif\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39moutputs_2d_:\n",
"File \u001b[1;32md:\\DEV WORK\\Data Science Library\\ML-For-Beginners\\.venv\\lib\\site-packages\\sklearn\\metrics\\_pairwise_distances_reduction\\_dispatcher.py:471\u001b[0m, in \u001b[0;36mArgKminClassMode.is_usable_for\u001b[1;34m(cls, X, Y, metric)\u001b[0m\n\u001b[0;32m 448\u001b[0m \u001b[39m@classmethod\u001b[39m\n\u001b[0;32m 449\u001b[0m \u001b[39mdef\u001b[39;00m \u001b[39mis_usable_for\u001b[39m(\u001b[39mcls\u001b[39m, X, Y, metric) \u001b[39m-\u001b[39m\u001b[39m>\u001b[39m \u001b[39mbool\u001b[39m:\n\u001b[0;32m 450\u001b[0m \u001b[39m \u001b[39m\u001b[39m\"\"\"Return True if the dispatcher can be used for the given parameters.\u001b[39;00m\n\u001b[0;32m 451\u001b[0m \n\u001b[0;32m 452\u001b[0m \u001b[39m Parameters\u001b[39;00m\n\u001b[1;32m (...)\u001b[0m\n\u001b[0;32m 468\u001b[0m \u001b[39m True if the PairwiseDistancesReduction can be used, else False.\u001b[39;00m\n\u001b[0;32m 469\u001b[0m \u001b[39m \"\"\"\u001b[39;00m\n\u001b[0;32m 470\u001b[0m \u001b[39mreturn\u001b[39;00m (\n\u001b[1;32m--> 471\u001b[0m ArgKmin\u001b[39m.\u001b[39;49mis_usable_for(X, Y, metric)\n\u001b[0;32m 472\u001b[0m \u001b[39m# TODO: Support CSR matrices.\u001b[39;00m\n\u001b[0;32m 473\u001b[0m \u001b[39mand\u001b[39;00m \u001b[39mnot\u001b[39;00m issparse(X)\n\u001b[0;32m 474\u001b[0m \u001b[39mand\u001b[39;00m \u001b[39mnot\u001b[39;00m issparse(Y)\n\u001b[0;32m 475\u001b[0m \u001b[39m# TODO: implement Euclidean specialization with GEMM.\u001b[39;00m\n\u001b[0;32m 476\u001b[0m \u001b[39mand\u001b[39;00m metric \u001b[39mnot\u001b[39;00m \u001b[39min\u001b[39;00m (\u001b[39m\"\u001b[39m\u001b[39meuclidean\u001b[39m\u001b[39m\"\u001b[39m, \u001b[39m\"\u001b[39m\u001b[39msqeuclidean\u001b[39m\u001b[39m\"\u001b[39m)\n\u001b[0;32m 477\u001b[0m )\n",
"File \u001b[1;32md:\\DEV WORK\\Data Science Library\\ML-For-Beginners\\.venv\\lib\\site-packages\\sklearn\\metrics\\_pairwise_distances_reduction\\_dispatcher.py:115\u001b[0m, in \u001b[0;36mBaseDistancesReductionDispatcher.is_usable_for\u001b[1;34m(cls, X, Y, metric)\u001b[0m\n\u001b[0;32m 101\u001b[0m \u001b[39mdef\u001b[39;00m \u001b[39mis_valid_sparse_matrix\u001b[39m(X):\n\u001b[0;32m 102\u001b[0m \u001b[39mreturn\u001b[39;00m (\n\u001b[0;32m 103\u001b[0m isspmatrix_csr(X)\n\u001b[0;32m 104\u001b[0m \u001b[39mand\u001b[39;00m\n\u001b[1;32m (...)\u001b[0m\n\u001b[0;32m 110\u001b[0m X\u001b[39m.\u001b[39mindices\u001b[39m.\u001b[39mdtype \u001b[39m==\u001b[39m X\u001b[39m.\u001b[39mindptr\u001b[39m.\u001b[39mdtype \u001b[39m==\u001b[39m np\u001b[39m.\u001b[39mint32\n\u001b[0;32m 111\u001b[0m )\n\u001b[0;32m 113\u001b[0m is_usable \u001b[39m=\u001b[39m (\n\u001b[0;32m 114\u001b[0m get_config()\u001b[39m.\u001b[39mget(\u001b[39m\"\u001b[39m\u001b[39menable_cython_pairwise_dist\u001b[39m\u001b[39m\"\u001b[39m, \u001b[39mTrue\u001b[39;00m)\n\u001b[1;32m--> 115\u001b[0m \u001b[39mand\u001b[39;00m (is_numpy_c_ordered(X) \u001b[39mor\u001b[39;00m is_valid_sparse_matrix(X))\n\u001b[0;32m 116\u001b[0m \u001b[39mand\u001b[39;00m (is_numpy_c_ordered(Y) \u001b[39mor\u001b[39;00m is_valid_sparse_matrix(Y))\n\u001b[0;32m 117\u001b[0m \u001b[39mand\u001b[39;00m X\u001b[39m.\u001b[39mdtype \u001b[39m==\u001b[39m Y\u001b[39m.\u001b[39mdtype\n\u001b[0;32m 118\u001b[0m \u001b[39mand\u001b[39;00m X\u001b[39m.\u001b[39mdtype \u001b[39min\u001b[39;00m (np\u001b[39m.\u001b[39mfloat32, np\u001b[39m.\u001b[39mfloat64)\n\u001b[0;32m 119\u001b[0m \u001b[39mand\u001b[39;00m metric \u001b[39min\u001b[39;00m \u001b[39mcls\u001b[39m\u001b[39m.\u001b[39mvalid_metrics()\n\u001b[0;32m 120\u001b[0m )\n\u001b[0;32m 122\u001b[0m \u001b[39mreturn\u001b[39;00m is_usable\n",
"File \u001b[1;32md:\\DEV WORK\\Data Science Library\\ML-For-Beginners\\.venv\\lib\\site-packages\\sklearn\\metrics\\_pairwise_distances_reduction\\_dispatcher.py:99\u001b[0m, in \u001b[0;36mBaseDistancesReductionDispatcher.is_usable_for.<locals>.is_numpy_c_ordered\u001b[1;34m(X)\u001b[0m\n\u001b[0;32m 98\u001b[0m \u001b[39mdef\u001b[39;00m \u001b[39mis_numpy_c_ordered\u001b[39m(X):\n\u001b[1;32m---> 99\u001b[0m \u001b[39mreturn\u001b[39;00m \u001b[39mhasattr\u001b[39m(X, \u001b[39m\"\u001b[39m\u001b[39mflags\u001b[39m\u001b[39m\"\u001b[39m) \u001b[39mand\u001b[39;00m X\u001b[39m.\u001b[39;49mflags\u001b[39m.\u001b[39;49mc_contiguous\n",
"\u001b[1;31mAttributeError\u001b[0m: 'Flags' object has no attribute 'c_contiguous'"
]
}
],
"source": [
"n_classifiers = len(classifiers)\n",
"\n",
"for index, (name, classifier) in enumerate(classifiers.items()):\n",
" classifier.fit(X_train, np.ravel(y_train))\n",
"\n",
" y_pred = classifier.predict(X_test)\n",
" accuracy = accuracy_score(y_test, y_pred)\n",
" print(\"Accuracy (train) for %s: %0.1f%% \" % (name, accuracy * 100))\n",
" print(classification_report(y_test,y_pred))"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
@ -126,8 +558,8 @@
"hash": "70b38d7a306a849643e446cd70466270a13445e5987dfa1344ef2b127438fa4d"
},
"kernelspec": {
"name": "python3",
"display_name": "Python 3.7.0 64-bit ('3.7')"
"display_name": "Python 3.7.0 64-bit ('3.7')",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
@ -139,7 +571,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.7.0"
"version": "3.10.11"
},
"metadata": {
"interpreter": {
@ -149,4 +581,4 @@
},
"nbformat": 4,
"nbformat_minor": 4
}
}

@ -1,11 +1,11 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Build More Classification Models"
],
"cell_type": "markdown",
"metadata": {}
]
},
{
"cell_type": "code",
@ -13,8 +13,175 @@
"metadata": {},
"outputs": [
{
"output_type": "execute_result",
"data": {
"text/html": [
"<div>\n",
"<style scoped>\n",
" .dataframe tbody tr th:only-of-type {\n",
" vertical-align: middle;\n",
" }\n",
"\n",
" .dataframe tbody tr th {\n",
" vertical-align: top;\n",
" }\n",
"\n",
" .dataframe thead th {\n",
" text-align: right;\n",
" }\n",
"</style>\n",
"<table border=\"1\" class=\"dataframe\">\n",
" <thead>\n",
" <tr style=\"text-align: right;\">\n",
" <th></th>\n",
" <th>Unnamed: 0</th>\n",
" <th>cuisine</th>\n",
" <th>almond</th>\n",
" <th>angelica</th>\n",
" <th>anise</th>\n",
" <th>anise_seed</th>\n",
" <th>apple</th>\n",
" <th>apple_brandy</th>\n",
" <th>apricot</th>\n",
" <th>armagnac</th>\n",
" <th>...</th>\n",
" <th>whiskey</th>\n",
" <th>white_bread</th>\n",
" <th>white_wine</th>\n",
" <th>whole_grain_wheat_flour</th>\n",
" <th>wine</th>\n",
" <th>wood</th>\n",
" <th>yam</th>\n",
" <th>yeast</th>\n",
" <th>yogurt</th>\n",
" <th>zucchini</th>\n",
" </tr>\n",
" </thead>\n",
" <tbody>\n",
" <tr>\n",
" <th>0</th>\n",
" <td>0</td>\n",
" <td>indian</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>...</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" </tr>\n",
" <tr>\n",
" <th>1</th>\n",
" <td>1</td>\n",
" <td>indian</td>\n",
" <td>1</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>...</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" </tr>\n",
" <tr>\n",
" <th>2</th>\n",
" <td>2</td>\n",
" <td>indian</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>...</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" </tr>\n",
" <tr>\n",
" <th>3</th>\n",
" <td>3</td>\n",
" <td>indian</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>...</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" </tr>\n",
" <tr>\n",
" <th>4</th>\n",
" <td>4</td>\n",
" <td>indian</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>...</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>1</td>\n",
" <td>0</td>\n",
" </tr>\n",
" </tbody>\n",
"</table>\n",
"<p>5 rows × 382 columns</p>\n",
"</div>"
],
"text/plain": [
" Unnamed: 0 cuisine almond angelica anise anise_seed apple \\\n",
"0 0 indian 0 0 0 0 0 \n",
@ -38,11 +205,11 @@
"4 0 0 0 0 0 1 0 \n",
"\n",
"[5 rows x 382 columns]"
],
"text/html": "<div>\n<style scoped>\n .dataframe tbody tr th:only-of-type {\n vertical-align: middle;\n }\n\n .dataframe tbody tr th {\n vertical-align: top;\n }\n\n .dataframe thead th {\n text-align: right;\n }\n</style>\n<table border=\"1\" class=\"dataframe\">\n <thead>\n <tr style=\"text-align: right;\">\n <th></th>\n <th>Unnamed: 0</th>\n <th>cuisine</th>\n <th>almond</th>\n <th>angelica</th>\n <th>anise</th>\n <th>anise_seed</th>\n <th>apple</th>\n <th>apple_brandy</th>\n <th>apricot</th>\n <th>armagnac</th>\n <th>...</th>\n <th>whiskey</th>\n <th>white_bread</th>\n <th>white_wine</th>\n <th>whole_grain_wheat_flour</th>\n <th>wine</th>\n <th>wood</th>\n <th>yam</th>\n <th>yeast</th>\n <th>yogurt</th>\n <th>zucchini</th>\n </tr>\n </thead>\n <tbody>\n <tr>\n <th>0</th>\n <td>0</td>\n <td>indian</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>...</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n </tr>\n <tr>\n <th>1</th>\n <td>1</td>\n <td>indian</td>\n <td>1</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>...</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n </tr>\n <tr>\n <th>2</th>\n <td>2</td>\n <td>indian</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>...</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n </tr>\n <tr>\n <th>3</th>\n <td>3</td>\n <td>indian</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>...</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n </tr>\n <tr>\n <th>4</th>\n <td>4</td>\n <td>indian</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>...</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>1</td>\n <td>0</td>\n </tr>\n </tbody>\n</table>\n<p>5 rows × 382 columns</p>\n</div>"
]
},
"execution_count": 1,
"metadata": {},
"execution_count": 1
"output_type": "execute_result"
}
],
"source": [
@ -57,7 +224,6 @@
"metadata": {},
"outputs": [
{
"output_type": "execute_result",
"data": {
"text/plain": [
"0 indian\n",
@ -68,8 +234,9 @@
"Name: cuisine, dtype: object"
]
},
"execution_count": 2,
"metadata": {},
"execution_count": 2
"output_type": "execute_result"
}
],
"source": [
@ -83,8 +250,175 @@
"metadata": {},
"outputs": [
{
"output_type": "execute_result",
"data": {
"text/html": [
"<div>\n",
"<style scoped>\n",
" .dataframe tbody tr th:only-of-type {\n",
" vertical-align: middle;\n",
" }\n",
"\n",
" .dataframe tbody tr th {\n",
" vertical-align: top;\n",
" }\n",
"\n",
" .dataframe thead th {\n",
" text-align: right;\n",
" }\n",
"</style>\n",
"<table border=\"1\" class=\"dataframe\">\n",
" <thead>\n",
" <tr style=\"text-align: right;\">\n",
" <th></th>\n",
" <th>almond</th>\n",
" <th>angelica</th>\n",
" <th>anise</th>\n",
" <th>anise_seed</th>\n",
" <th>apple</th>\n",
" <th>apple_brandy</th>\n",
" <th>apricot</th>\n",
" <th>armagnac</th>\n",
" <th>artemisia</th>\n",
" <th>artichoke</th>\n",
" <th>...</th>\n",
" <th>whiskey</th>\n",
" <th>white_bread</th>\n",
" <th>white_wine</th>\n",
" <th>whole_grain_wheat_flour</th>\n",
" <th>wine</th>\n",
" <th>wood</th>\n",
" <th>yam</th>\n",
" <th>yeast</th>\n",
" <th>yogurt</th>\n",
" <th>zucchini</th>\n",
" </tr>\n",
" </thead>\n",
" <tbody>\n",
" <tr>\n",
" <th>0</th>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>...</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" </tr>\n",
" <tr>\n",
" <th>1</th>\n",
" <td>1</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>...</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" </tr>\n",
" <tr>\n",
" <th>2</th>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>...</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" </tr>\n",
" <tr>\n",
" <th>3</th>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>...</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" </tr>\n",
" <tr>\n",
" <th>4</th>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>...</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>0</td>\n",
" <td>1</td>\n",
" <td>0</td>\n",
" </tr>\n",
" </tbody>\n",
"</table>\n",
"<p>5 rows × 380 columns</p>\n",
"</div>"
],
"text/plain": [
" almond angelica anise anise_seed apple apple_brandy apricot \\\n",
"0 0 0 0 0 0 0 0 \n",
@ -108,11 +442,11 @@
"4 0 0 0 0 0 1 0 \n",
"\n",
"[5 rows x 380 columns]"
],
"text/html": "<div>\n<style scoped>\n .dataframe tbody tr th:only-of-type {\n vertical-align: middle;\n }\n\n .dataframe tbody tr th {\n vertical-align: top;\n }\n\n .dataframe thead th {\n text-align: right;\n }\n</style>\n<table border=\"1\" class=\"dataframe\">\n <thead>\n <tr style=\"text-align: right;\">\n <th></th>\n <th>almond</th>\n <th>angelica</th>\n <th>anise</th>\n <th>anise_seed</th>\n <th>apple</th>\n <th>apple_brandy</th>\n <th>apricot</th>\n <th>armagnac</th>\n <th>artemisia</th>\n <th>artichoke</th>\n <th>...</th>\n <th>whiskey</th>\n <th>white_bread</th>\n <th>white_wine</th>\n <th>whole_grain_wheat_flour</th>\n <th>wine</th>\n <th>wood</th>\n <th>yam</th>\n <th>yeast</th>\n <th>yogurt</th>\n <th>zucchini</th>\n </tr>\n </thead>\n <tbody>\n <tr>\n <th>0</th>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>...</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n </tr>\n <tr>\n <th>1</th>\n <td>1</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>...</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n </tr>\n <tr>\n <th>2</th>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>...</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n </tr>\n <tr>\n <th>3</th>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>...</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n </tr>\n <tr>\n <th>4</th>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>...</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>0</td>\n <td>1</td>\n <td>0</td>\n </tr>\n </tbody>\n</table>\n<p>5 rows × 380 columns</p>\n</div>"
]
},
"execution_count": 3,
"metadata": {},
"execution_count": 3
"output_type": "execute_result"
}
],
"source": [
@ -176,8 +510,8 @@
"metadata": {},
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"output_type": "stream",
"text": [
"Accuracy (train) for Linear SVC: 76.4% \n",
" precision recall f1-score support\n",
@ -265,8 +599,8 @@
"hash": "70b38d7a306a849643e446cd70466270a13445e5987dfa1344ef2b127438fa4d"
},
"kernelspec": {
"name": "python3",
"display_name": "Python 3.7.0 64-bit ('3.7')"
"display_name": "Python 3.7.0 64-bit ('3.7')",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
@ -278,7 +612,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.7.0"
"version": "3.10.11"
},
"metadata": {
"interpreter": {
@ -288,4 +622,4 @@
},
"nbformat": 4,
"nbformat_minor": 4
}
}

File diff suppressed because it is too large Load Diff

@ -0,0 +1,393 @@
# Regression - Experimenting with additional models
In the previous notebook, we used simple regression models to look at the relationship between features of a bike rentals dataset. In this notebook, we'll experiment with more complex models to improve our regression performance.
Let's start by loading the bicycle sharing data as a **Pandas** DataFrame and viewing the first few rows. We'll also split our data into training and test datasets.
```python
# Import modules we'll need for this notebook
import pandas as pd
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error, r2_score
from sklearn.model_selection import train_test_split
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
# load the training dataset
!wget https://raw.githubusercontent.com/MicrosoftDocs/mslearn-introduction-to-machine-learning/main/Data/ml-basics/daily-bike-share.csv
bike_data = pd.read_csv('daily-bike-share.csv')
bike_data['day'] = pd.DatetimeIndex(bike_data['dteday']).day
numeric_features = ['temp', 'atemp', 'hum', 'windspeed']
categorical_features = ['season','mnth','holiday','weekday','workingday','weathersit', 'day']
bike_data[numeric_features + ['rentals']].describe()
print(bike_data.head())
# Separate features and labels
# After separating the dataset, we now have numpy arrays named **X** containing the features, and **y** containing the labels.
X, y = bike_data[['season','mnth', 'holiday','weekday','workingday','weathersit','temp', 'atemp', 'hum', 'windspeed']].values, bike_data['rentals'].values
# Split data 70%-30% into training set and test set
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.30, random_state=0)
print ('Training Set: %d rows\nTest Set: %d rows' % (X_train.shape[0], X_test.shape[0]))
```
```Now we have the following four datasets:
- **X_train**: The feature values we'll use to train the model
- **y_train**: The corresponding labels we'll use to train the model
- **X_test**: The feature values we'll use to validate the model
- **y_test**: The corresponding labels we'll use to validate the model
Now we're ready to train a model by fitting a suitable regression algorithm to the training data.
## Experiment with Algorithms
The linear-regression algorithm we used last time to train the model has some predictive capability, but there are many kinds of regression algorithm we could try, including:
- **Linear algorithms**: Not just the Linear Regression algorithm we used above (which is technically an _Ordinary Least Squares_ algorithm), but other variants such as _Lasso_ and _Ridge_.
- **Tree-based algorithms**: Algorithms that build a decision tree to reach a prediction.
- **Ensemble algorithms**: Algorithms that combine the outputs of multiple base algorithms to improve generalizability.
> **Note**: For a full list of Scikit-Learn estimators that encapsulate algorithms for supervised machine learning, see the [Scikit-Learn documentation](https://scikit-learn.org/stable/supervised_learning.html). There are many algorithms from which to choose, but for most real-world scenarios, the [Scikit-Learn estimator cheat sheet](https://scikit-learn.org/stable/tutorial/machine_learning_map/index.html) can help you find a suitable starting point.
### Try Another Linear Algorithm
Let's try training our regression model by using a **Lasso** algorithm. We can do this by just changing the estimator in the training code.
```
```python
from sklearn.linear_model import Lasso
# Fit a lasso model on the training set
model = Lasso().fit(X_train, y_train)
print (model, "\n")
# Evaluate the model using the test data
predictions = model.predict(X_test)
mse = mean_squared_error(y_test, predictions)
print("MSE:", mse)
rmse = np.sqrt(mse)
print("RMSE:", rmse)
r2 = r2_score(y_test, predictions)
print("R2:", r2)
# Plot predicted vs actual
plt.scatter(y_test, predictions)
plt.xlabel('Actual Labels')
plt.ylabel('Predicted Labels')
plt.title('Daily Bike Share Predictions')
# overlay the regression line
z = np.polyfit(y_test, predictions, 1)
p = np.poly1d(z)
plt.plot(y_test,p(y_test), color='magenta')
plt.show()
```
### Try a Decision Tree Algorithm
As an alternative to a linear model, there's a category of algorithms for machine learning that uses a tree-based approach in which the features in the dataset are examined in a series of evaluations, each of which results in a *branch* in a *decision tree* based on the feature value. At the end of each series of branches are leaf-nodes with the predicted label value based on the feature values.
It's easiest to see how this works with an example. Let's train a Decision Tree regression model using the bike rental data. After training the model, the following code will print the model definition and a text representation of the tree it uses to predict label values.
```python
from sklearn.tree import DecisionTreeRegressor
from sklearn.tree import export_text
# Train the model
model = DecisionTreeRegressor().fit(X_train, y_train)
print (model, "\n")
# Visualize the model tree
tree = export_text(model)
print(tree)
```
So now we have a tree-based model, but is it any good? Let's evaluate it with the test data.
```python
# Evaluate the model using the test data
predictions = model.predict(X_test)
mse = mean_squared_error(y_test, predictions)
print("MSE:", mse)
rmse = np.sqrt(mse)
print("RMSE:", rmse)
r2 = r2_score(y_test, predictions)
print("R2:", r2)
# Plot predicted vs actual
plt.scatter(y_test, predictions)
plt.xlabel('Actual Labels')
plt.ylabel('Predicted Labels')
plt.title('Daily Bike Share Predictions')
# overlay the regression line
z = np.polyfit(y_test, predictions, 1)
p = np.poly1d(z)
plt.plot(y_test,p(y_test), color='magenta')
plt.show()
```
The tree-based model doesn't seem to have improved over the linear model, so what else could we try?
### Try an Ensemble Algorithm
Ensemble algorithms work by combining multiple base estimators to produce an optimal model, either by applying an aggregate function to a collection of base models (sometimes referred to a _bagging_) or by building a sequence of models that build on one another to improve predictive performance (referred to as _boosting_).
For example, let's try a Random Forest model, which applies an averaging function to multiple Decision Tree models for a better overall model.
```python
from sklearn.ensemble import RandomForestRegressor
# Train the model
model = RandomForestRegressor().fit(X_train, y_train)
print (model, "\n")
# Evaluate the model using the test data
predictions = model.predict(X_test)
mse = mean_squared_error(y_test, predictions)
print("MSE:", mse)
rmse = np.sqrt(mse)
print("RMSE:", rmse)
r2 = r2_score(y_test, predictions)
print("R2:", r2)
# Plot predicted vs actual
plt.scatter(y_test, predictions)
plt.xlabel('Actual Labels')
plt.ylabel('Predicted Labels')
plt.title('Daily Bike Share Predictions')
# overlay the regression line
z = np.polyfit(y_test, predictions, 1)
p = np.poly1d(z)
plt.plot(y_test,p(y_test), color='magenta')
plt.show()
```
For good measure, let's also try a *boosting* ensemble algorithm. We'll use a Gradient Boosting estimator, which like a Random Forest algorithm builds multiple trees; but instead of building them all independently and taking the average result, each tree is built on the outputs of the previous one in an attempt to incrementally reduce the *loss* (error) in the model.
```python
# Train the model
from sklearn.ensemble import GradientBoostingRegressor
# Fit a lasso model on the training set
model = GradientBoostingRegressor().fit(X_train, y_train)
print (model, "\n")
# Evaluate the model using the test data
predictions = model.predict(X_test)
mse = mean_squared_error(y_test, predictions)
print("MSE:", mse)
rmse = np.sqrt(mse)
print("RMSE:", rmse)
r2 = r2_score(y_test, predictions)
print("R2:", r2)
# Plot predicted vs actual
plt.scatter(y_test, predictions)
plt.xlabel('Actual Labels')
plt.ylabel('Predicted Labels')
plt.title('Daily Bike Share Predictions')
# overlay the regression line
z = np.polyfit(y_test, predictions, 1)
p = np.poly1d(z)
plt.plot(y_test,p(y_test), color='magenta')
plt.show()
```
## Summary
Here, we've tried a number of new regression algorithms to improve performance. In our next notebook, we'll look at *tuning* these algorithms to improve performance.
## Further Reading
To learn more about Scikit-Learn, see the [Scikit-Learn documentation](https://scikit-learn.org/stable/modules/model_evaluation.html#regression-metrics).
### Lasso model
In statistics and machine learning,
- Lasso (Least Absolute Shrinkage and Selection Operator; also Lasso or LASSO)
- is a regression analysis method that performs both
- variable selection and
- regularization in order to enhance the prediction accuracy and
- interpretability of the resulting statistical model.
Lasso was originally formulated for linear regression models. This simple case reveals a substantial amount about the estimator. [These include its relationship to ridge regression and best subset selection and the connections between lasso coefficient estimates and so-called soft thresholding](https://en.wikipedia.org/wiki/Lasso_%28statistics%29)[1](https://en.wikipedia.org/wiki/Lasso_%28statistics%29).
The optimization objective for Lasso is:
<math xmlns="http://www.w3.org/1998/Math/MathML" display="block"><semantics><mrow><mfrac><mn>1</mn><mrow><mn>2</mn><msub><mi>n</mi><mrow><mi>s</mi><mi>a</mi><mi>m</mi><mi>p</mi><mi>l</mi><mi>e</mi><mi>s</mi></mrow></msub></mrow></mfrac><mi mathvariant="normal"></mi><mi mathvariant="normal"></mi><mi>y</mi><mo></mo><mi>X</mi><mi>w</mi><mi mathvariant="normal"></mi><msubsup><mi mathvariant="normal"></mi><mn>2</mn><mn>2</mn></msubsup><mo>+</mo><mi>α</mi><mi mathvariant="normal"></mi><mi mathvariant="normal"></mi><mi>w</mi><mi mathvariant="normal"></mi><msub><mi mathvariant="normal"></mi><mn>1</mn></msub></mrow><annotation encoding="application/x-tex">\frac{1}{2n_{samples}}||y-Xw||^2_2+\alpha||w||_1
</annotation></semantics></math>
where `n_samples` is the number of samples, `y` is the target variable, `X` is the input data, `w` is the weight vector, and `alpha` is a constant that multiplies the L1 term.
Lassos ability to perform subset selection relies on the form of the constraint and has a variety of interpretations including in terms of geometry, Bayesian statistics, and convex analysis. The LASSO method regularizes model parameters by shrinking the regression coefficients, reducing some of them to zero. The feature selection phase occurs after the shrinkage, where every non-zero value is selected to be used in the model
This method is significant in the minimization of prediction errors that are common in statistical models. Its used over regression methods for a more accurate prediction. This model uses shrinkage where data values are shrunk towards a central point as the mean. The lasso procedure encourages simple, sparse models (i.e., models with fewer parameters)
### explanation
Sure, lets imagine youre playing a game of soccer with your friends. Now, each of your friends has different skills. Some are good at scoring goals, some are good at defending, and some are good at passing the ball.
Now, youre the team captain and you want to pick the best team. But you can only pick a few friends to be on your team. How do you decide?
You could just pick randomly, but that might not give you the best team. Instead, you want a way to pick the best players based on their skills.
This is where Lasso comes in. In our analogy, Lasso is like a smart team captain. It looks at all your friends skills (these are like the features in a dataset), and picks the ones that are most important for winning the game (these are the variables in our model).
Just like a good captain wont pick a friend who cant run fast or kick well, Lasso also shrinks the importance of less useful features down to zero - effectively leaving them out of the model.
So, Lasso helps us make better decisions by focusing on whats really important and ignoring whats not. And just like picking the right team can help you win your soccer game, using Lasso can help make better predictions with data!

@ -0,0 +1,397 @@
def create_ingredient_df(df):
    ingredient_df = df.T.drop(['cuisine','Unnamed: 0']).sum(axis=1).to_frame('value')
    ingredient_df = ingredient_df[(ingredient_df.T != 0).any()]
    ingredient_df = ingredient_df.sort_values(by='value', ascending=False,
                                               inplace=False)
    return ingredient_df
country_dfs = {
    'thai': thai_df,
    'japanese': japanese_df,
    'chinese': chinese_df,
    'indian': indian_df,
    'korean': korean_df
}
# Create a dictionary to store the ingredient dataframes for each country
country_ingredient_dfs = {}
for country, df in country_dfs.items():
    country_ingredient_dfs[country] = create_ingredient_df(df)
# Plot a bar chart of the top 10 ingredients for each country
for country, df in country_ingredient_dfs.items():
    df.head(10).plot.barh(title=f'{country.title()} Ingredient Popularity')
### MS LEARN ...
[Skip to main content](https://learn.microsoft.com/en-us/training/modules/train-evaluate-regression-models/5-exercise-powerful-models#main)
[
](https://www.microsoft.com/)
- [Learn](https://learn.microsoft.com/en-us/)
- [Documentation](https://learn.microsoft.com/en-us/docs/)
- [Training](https://learn.microsoft.com/en-us/training/)
- [Credentials](https://learn.microsoft.com/en-us/credentials/)
- [Q&A](https://learn.microsoft.com/en-us/answers/)
- [Code Samples](https://learn.microsoft.com/en-us/samples/browse/)
- [Assessments](https://learn.microsoft.com/en-us/assessments/)
- [Shows](https://learn.microsoft.com/en-us/shows/)
Search
![](data:image/svg+xml, %3Csvg xmlns='http://www.w3.org/2000/svg' height='64' class='font-weight-bold' style='font: 600 30.11764705882353px "SegoeUI", Arial' width='64'%3E%3Ccircle fill='hsl(163.2, 63%, 25%)' cx='32' cy='32' r='32' /%3E%3Ctext x='50%25' y='55%25' dominant-baseline='middle' text-anchor='middle' fill='%23FFF' %3EJN%3C/text%3E%3C/svg%3E)
[Training](https://learn.microsoft.com/en-us/training/)
- Products
- Career Paths
- [Learning Paths](https://learn.microsoft.com/en-us/training/browse/)
- [Courses](https://learn.microsoft.com/en-us/training/courses/browse/)
- Educator Center
- Student Hub
- [FAQ & Help](https://learn.microsoft.com/en-us/training/support/)
1. [Learn](https://learn.microsoft.com/en-us/) 
 3. [Training](https://learn.microsoft.com/en-us/training/) 
 5. [Browse](https://learn.microsoft.com/en-us/training/browse/) 
 7. [Train and evaluate regression models](https://learn.microsoft.com/en-us/training/modules/train-evaluate-regression-models/) 
Add
[Previous](https://learn.microsoft.com/en-us/training/modules/train-evaluate-regression-models/4-discover-new-regression-models/)
- Unit 5 of 9
[Next](https://learn.microsoft.com/en-us/training/modules/train-evaluate-regression-models/6-improve-models/)
# Exercise - Experiment with more powerful regression models
Completed100 XP
- 10 minutes
Sandbox activated! Time remaining: 
1 hr 57 min
You have used 1 of 10 sandboxes for today. More sandboxes will be available tomorrow.
Execution succeeded with no output for cell at position 2. Kernel is now idle
__
RuntimeFileEditView
__Run all__
__azureml_py38__
__
__
____
[1]
```
# Import modules we'll need for this notebook
import pandas as pd
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error, r2_score
from sklearn.model_selection import train_test_split
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
# load the training dataset
!wget https://raw.githubusercontent.com/MicrosoftDocs/mslearn-introduction-to-machine-learning/main/Data/ml-basics/daily-bike-share.csv
bike_data = pd.read_csv('daily-bike-share.csv')
bike_data['day'] = pd.DatetimeIndex(bike_data['dteday']).day
numeric_features = ['temp', 'atemp', 'hum', 'windspeed']
categorical_features = ['season','mnth','holiday','weekday','workingday','weathersit', 'day']
bike_data[numeric_features + ['rentals']].describe()
print(bike_data.head())
# Separate features and labels
# After separating the dataset, we now have numpy arrays named **X** containing the features, and **y** containing the labels.
X, y = bike_data[['season','mnth', 'holiday','weekday','workingday','weathersit','temp', 'atemp', 'hum', 'windspeed']].values, bike_data['rentals'].values
# Split data 70%-30% into training set and test set
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.30, random_state=0)
print ('Training Set: %d rows\nTest Set: %d rows' % (X_train.shape[0], X_test.shape[0]))
```
`Matplotlib is building the font cache using fc-list. This may take a moment.` `--2023-10-14 18:18:28-- [https://raw.githubusercontent.com/MicrosoftDocs/mslearn-introduction-to-machine-learning/main/Data/ml-basics/daily-bike-share.csv](https://raw.githubusercontent.com/MicrosoftDocs/mslearn-introduction-to-machine-learning/main/Data/ml-basics/daily-bike-share.csv) Resolving raw.githubusercontent.com (raw.githubusercontent.com)... 185.199.108.133, 185.199.109.133, 185.199.110.133, ... Connecting to raw.githubusercontent.com (raw.githubusercontent.com)|185.199.108.133|:443... connected. HTTP request sent, awaiting response... 200 OK Length: 48800 (48K) [text/plain] Saving to: daily-bike-share.csv daily-bike-share.cs 100%[===================>] 47.66K --.-KB/s in 0.008s 2023-10-14 18:18:28 (5.76 MB/s) - daily-bike-share.csv saved [48800/48800] instant dteday season yr mnth holiday weekday workingday \ 0 1 1/1/2011 1 0 1 0 6 0 1 2 1/2/2011 1 0 1 0 0 0 2 3 1/3/2011 1 0 1 0 1 1 3 4 1/4/2011 1 0 1 0 2 1 4 5 1/5/2011 1 0 1 0 3 1 weathersit temp atemp hum windspeed rentals day 0 2 0.344167 0.363625 0.805833 0.160446 331 1 1 2 0.363478 0.353739 0.696087 0.248539 131 2 2 1 0.196364 0.189405 0.437273 0.248309 120 3 3 1 0.200000 0.212122 0.590435 0.160296 108 4 4 1 0.226957 0.229270 0.436957 0.186900 82 5 Training Set: 511 rows Test Set: 220 rows`
```PYTHON
```
from sklearn.linear_model import Lasso
# Fit a lasso model on the training set
model = Lasso().fit(X_train, y_train)
print (model, "\n")
# Evaluate the model using the test data
predictions = model.predict(X_test)
mse = mean_squared_error(y_test, predictions)
print("MSE:", mse)
rmse = np.sqrt(mse)
print("RMSE:", rmse)
r2 = r2_score(y_test, predictions)
print("R2:", r2)
# Plot predicted vs actual
plt.scatter(y_test, predictions)
plt.xlabel('Actual Labels')
plt.ylabel('Predicted Labels')
plt.title('Daily Bike Share Predictions')
# overlay the regression line
z = np.polyfit(y_test, predictions, 1)
p = np.poly1d(z)
plt.plot(y_test,p(y_test), color='magenta')
plt.show()
from sklearn.tree import DecisionTreeRegressor
from sklearn.tree import export_text
# Train the model
model = DecisionTreeRegressor().fit(X_train, y_train)
print (model, "\n")
# Visualize the model tree
tree = export_text(model)
print(tree)
# Evaluate the model using the test data
predictions = model.predict(X_test)
mse = mean_squared_error(y_test, predictions)
print("MSE:", mse)
rmse = np.sqrt(mse)
print("RMSE:", rmse)
r2 = r2_score(y_test, predictions)
print("R2:", r2)
# Plot predicted vs actual
plt.scatter(y_test, predictions)
plt.xlabel('Actual Labels')
plt.ylabel('Predicted Labels')
plt.title('Daily Bike Share Predictions')
# overlay the regression line
z = np.polyfit(y_test, predictions, 1)
p = np.poly1d(z)
plt.plot(y_test,p(y_test), color='magenta')
plt.show()
from sklearn.ensemble import RandomForestRegressor
# Train the model
model = RandomForestRegressor().fit(X_train, y_train)
print (model, "\n")
# Evaluate the model using the test data
predictions = model.predict(X_test)
mse = mean_squared_error(y_test, predictions)
print("MSE:", mse)
rmse = np.sqrt(mse)
print("RMSE:", rmse)
r2 = r2_score(y_test, predictions)
print("R2:", r2)
# Plot predicted vs actual
plt.scatter(y_test, predictions)
plt.xlabel('Actual Labels')
plt.ylabel('Predicted Labels')
plt.title('Daily Bike Share Predictions')
# overlay the regression line
z = np.polyfit(y_test, predictions, 1)
p = np.poly1d(z)
plt.plot(y_test,p(y_test), color='magenta')
plt.show()
# Train the model
from sklearn.ensemble import GradientBoostingRegressor
# Fit a lasso model on the training set
model = GradientBoostingRegressor().fit(X_train, y_train)
print (model, "\n")
# Evaluate the model using the test data
predictions = model.predict(X_test)
mse = mean_squared_error(y_test, predictions)
print("MSE:", mse)
rmse = np.sqrt(mse)
print("RMSE:", rmse)
r2 = r2_score(y_test, predictions)
print("R2:", r2)
# Plot predicted vs actual
plt.scatter(y_test, predictions)
plt.xlabel('Actual Labels')
plt.ylabel('Predicted Labels')
plt.title('Daily Bike Share Predictions')
# overlay the regression line
z = np.polyfit(y_test, predictions, 1)
p = np.poly1d(z)
plt.plot(y_test,p(y_test), color='magenta')
plt.show()
__learn-notebooks-bc2fd8aa-5b1f-44ce-a745-cac114299f27
Compute connected
__Viewing
Kernel idle
azureml_py38
---
## Next unit: Improve models with hyperparameters
[Continue](https://learn.microsoft.com/en-us/training/modules/train-evaluate-regression-models/6-improve-models/)
Need help? See our [troubleshooting guide](https://learn.microsoft.com/en-us/training/support/troubleshooting?uid=learn.wwl.train-evaluate-regression-models.exercise-powerful-models&documentId=d189d9e6-568a-fe91-e351-b0a8234848c3&versionIndependentDocumentId=3c48d34b-e2b6-e3c8-c86c-a6284baa2b0d&contentPath=%2FMicrosoftDocs%2Flearn-pr%2Fblob%2Flive%2Flearn-pr%2Fmachine-learning%2Ftrain-evaluate-regression-models%2F5-exercise-powerful-models.yml&url=https%3A%2F%2Flearn.microsoft.com%2Fen-us%2Ftraining%2Fmodules%2Ftrain-evaluate-regression-models%2F5-exercise-powerful-models&author=jasdeb) or provide specific feedback by [reporting an issue](https://learn.microsoft.com/en-us/training/support/troubleshooting?uid=learn.wwl.train-evaluate-regression-models.exercise-powerful-models&documentId=d189d9e6-568a-fe91-e351-b0a8234848c3&versionIndependentDocumentId=3c48d34b-e2b6-e3c8-c86c-a6284baa2b0d&contentPath=%2FMicrosoftDocs%2Flearn-pr%2Fblob%2Flive%2Flearn-pr%2Fmachine-learning%2Ftrain-evaluate-regression-models%2F5-exercise-powerful-models.yml&url=https%3A%2F%2Flearn.microsoft.com%2Fen-us%2Ftraining%2Fmodules%2Ftrain-evaluate-regression-models%2F5-exercise-powerful-models&author=jasdeb#report-feedback).
How are we doing?
TerriblePoorFairGoodGreat
[English (United States)](https://learn.microsoft.com/en-us/locale?target=https%3A%2F%2Flearn.microsoft.com%2Fen-us%2Ftraining%2Fmodules%2Ftrain-evaluate-regression-models%2F5-exercise-powerful-models)
Theme
- [Previous Versions](https://learn.microsoft.com/en-us/previous-versions/)
- [Blog](https://techcommunity.microsoft.com/t5/microsoft-learn-blog/bg-p/MicrosoftLearnBlog)
- [Contribute](https://learn.microsoft.com/en-us/contribute/)
- [Privacy](https://go.microsoft.com/fwlink/?LinkId=521839)
- [Terms of Use](https://learn.microsoft.com/en-us/legal/termsofuse)
- [Trademarks](https://www.microsoft.com/legal/intellectualproperty/Trademarks/)
- © Microsoft 2023
Loading…
Cancel
Save