diff --git a/竞赛优胜技巧/.ipynb_checkpoints/AutoAI Tools-checkpoint.ipynb b/竞赛优胜技巧/.ipynb_checkpoints/AutoAI Tools-checkpoint.ipynb index 3744e19..b42ab74 100644 --- a/竞赛优胜技巧/.ipynb_checkpoints/AutoAI Tools-checkpoint.ipynb +++ b/竞赛优胜技巧/.ipynb_checkpoints/AutoAI Tools-checkpoint.ipynb @@ -28,7 +28,7 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": 1, "id": "f5e0e977", "metadata": {}, "outputs": [], @@ -181,85 +181,160 @@ "name": "stdout", "output_type": "stream", "text": [ - "训练集: (13932, 8)\n", - "验证集: (4644, 8)\n", + "训练集: (18576, 8)\n", "测试集: (2064, 8)\n" ] } ], "source": [ "# 切分训练和测试集\n", - "train_valid_x, test_x, train_valid_y, test_y = train_test_split(X, y,random_state=42,test_size=0.1)\n", + "train_x, test_x, train_y, test_y = train_test_split(X, y,random_state=42,test_size=0.1)\n", + "train_y = pd.DataFrame(train_y,columns=['result'])\n", + "test_y = pd.DataFrame(test_y,columns=['result'])\n", "\n", - "# 切分训练和验证集\n", - "train_x, valid_x, train_y, valid_y = train_test_split(train_valid_x, train_valid_y,random_state=42)\n", "print('训练集:',train_x.shape)\n", - "print('验证集:',valid_x.shape)\n", "print('测试集:', test_x.shape)" ] }, + { + "cell_type": "code", + "execution_count": 6, + "id": "009efc5d", + "metadata": {}, + "outputs": [], + "source": [ + "from sklearn.model_selection import KFold\n", + "skf = KFold(n_splits=5, shuffle=True, random_state=42)" + ] + }, { "cell_type": "markdown", "id": "e87a2121", "metadata": {}, "source": [ - "### 使用LGB作为模型,不使用optuna调参" + "### 使用LGB作为模型并CV,不使用optuna调参" ] }, { "cell_type": "code", - "execution_count": 12, + "execution_count": 7, "id": "5cad8967", - "metadata": {}, + "metadata": { + "scrolled": true + }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "[LightGBM] [Warning] Auto-choosing col-wise multi-threading, the overhead of testing was 0.001822 seconds.\n", + "[LightGBM] [Warning] Auto-choosing col-wise multi-threading, the overhead of testing was 0.000866 seconds.\n", "You can set `force_col_wise=true` to remove the overhead.\n", "[LightGBM] [Info] Total Bins 1837\n", - "[LightGBM] [Info] Number of data points in the train set: 13932, number of used features: 8\n", - "[LightGBM] [Info] Start training from score 2.072422\n", + "[LightGBM] [Info] Number of data points in the train set: 14860, number of used features: 8\n", + "[LightGBM] [Info] Start training from score 2.073247\n", "Training until validation scores don't improve for 20 rounds\n", "[50]\tvalid_0's rmse: 0.492877\n", - "[100]\tvalid_0's rmse: 0.471803\n", - "[150]\tvalid_0's rmse: 0.46445\n", - "[200]\tvalid_0's rmse: 0.459716\n", - "[250]\tvalid_0's rmse: 0.456658\n", - "[300]\tvalid_0's rmse: 0.454223\n", - "[350]\tvalid_0's rmse: 0.452644\n", + "[100]\tvalid_0's rmse: 0.469235\n", + "[150]\tvalid_0's rmse: 0.460357\n", + "[200]\tvalid_0's rmse: 0.455302\n", + "[250]\tvalid_0's rmse: 0.452674\n", + "[300]\tvalid_0's rmse: 0.451179\n", + "Early stopping, best iteration is:\n", + "[313]\tvalid_0's rmse: 0.450303\n", + "[LightGBM] [Warning] Auto-choosing col-wise multi-threading, the overhead of testing was 0.000494 seconds.\n", + "You can set `force_col_wise=true` to remove the overhead.\n", + "[LightGBM] [Info] Total Bins 1838\n", + "[LightGBM] [Info] Number of data points in the train set: 14861, number of used features: 8\n", + "[LightGBM] [Info] Start training from score 2.070396\n", + "Training until validation scores don't improve for 20 rounds\n", + "[50]\tvalid_0's rmse: 0.496484\n", + "[100]\tvalid_0's rmse: 0.476225\n", + "[150]\tvalid_0's rmse: 0.469582\n", + "[200]\tvalid_0's rmse: 0.464643\n", + "[250]\tvalid_0's rmse: 0.463244\n", + "[300]\tvalid_0's rmse: 0.461886\n", + "[350]\tvalid_0's rmse: 0.460305\n", + "[400]\tvalid_0's rmse: 0.4596\n", + "[450]\tvalid_0's rmse: 0.458645\n", + "Early stopping, best iteration is:\n", + "[435]\tvalid_0's rmse: 0.458368\n", + "[LightGBM] [Warning] Auto-choosing col-wise multi-threading, the overhead of testing was 0.000800 seconds.\n", + "You can set `force_col_wise=true` to remove the overhead.\n", + "[LightGBM] [Info] Total Bins 1837\n", + "[LightGBM] [Info] Number of data points in the train set: 14861, number of used features: 8\n", + "[LightGBM] [Info] Start training from score 2.069154\n", + "Training until validation scores don't improve for 20 rounds\n", + "[50]\tvalid_0's rmse: 0.48676\n", + "[100]\tvalid_0's rmse: 0.466911\n", + "[150]\tvalid_0's rmse: 0.459693\n", + "[200]\tvalid_0's rmse: 0.457631\n", + "[250]\tvalid_0's rmse: 0.452574\n", + "Early stopping, best iteration is:\n", + "[264]\tvalid_0's rmse: 0.451947\n", + "[LightGBM] [Warning] Auto-choosing col-wise multi-threading, the overhead of testing was 0.000779 seconds.\n", + "You can set `force_col_wise=true` to remove the overhead.\n", + "[LightGBM] [Info] Total Bins 1838\n", + "[LightGBM] [Info] Number of data points in the train set: 14861, number of used features: 8\n", + "[LightGBM] [Info] Start training from score 2.063366\n", + "Training until validation scores don't improve for 20 rounds\n", + "[50]\tvalid_0's rmse: 0.492482\n", + "[100]\tvalid_0's rmse: 0.468929\n", + "[150]\tvalid_0's rmse: 0.459713\n", + "[200]\tvalid_0's rmse: 0.456203\n", + "[250]\tvalid_0's rmse: 0.454063\n", + "[300]\tvalid_0's rmse: 0.451901\n", + "Early stopping, best iteration is:\n", + "[322]\tvalid_0's rmse: 0.450961\n", + "[LightGBM] [Warning] Auto-choosing col-wise multi-threading, the overhead of testing was 0.000694 seconds.\n", + "You can set `force_col_wise=true` to remove the overhead.\n", + "[LightGBM] [Info] Total Bins 1838\n", + "[LightGBM] [Info] Number of data points in the train set: 14861, number of used features: 8\n", + "[LightGBM] [Info] Start training from score 2.066924\n", + "Training until validation scores don't improve for 20 rounds\n", + "[50]\tvalid_0's rmse: 0.488385\n", + "[100]\tvalid_0's rmse: 0.464851\n", + "[150]\tvalid_0's rmse: 0.455183\n", + "[200]\tvalid_0's rmse: 0.450506\n", + "[250]\tvalid_0's rmse: 0.4461\n", + "[300]\tvalid_0's rmse: 0.44438\n", + "[350]\tvalid_0's rmse: 0.441808\n", + "[400]\tvalid_0's rmse: 0.44079\n", "Early stopping, best iteration is:\n", - "[342]\tvalid_0's rmse: 0.452522\n" + "[411]\tvalid_0's rmse: 0.440543\n", + "Wall time: 3.44 s\n" ] } ], "source": [ + "%%time\n", + "test_predict = np.zeros(shape=[test_x.shape[0], 5],dtype=float)\n", "params = {'boosting_type': 'gbdt',\n", " 'objective': 'regression',\n", " \"metric\": 'rmse'}\n", - "dtrain = lgb.Dataset(train_x, label=train_y)\n", - "dvalid = lgb.Dataset(valid_x, label=valid_y)\n", - "model = lgb.train(params=params, train_set=dtrain,valid_sets=[dvalid],\n", - " verbose_eval=50,\n", - " early_stopping_rounds=20,\n", - " num_boost_round=5000)\n", - "predict = model.predict(test_x)" + "for i, (trn_idx, val_idx) in enumerate(skf.split(train_x, train_y)):\n", + " dtrain = lgb.Dataset(train_x.iloc[trn_idx], label=train_y.iloc[trn_idx])\n", + " dvalid = lgb.Dataset(train_x.iloc[val_idx], label=train_y.iloc[val_idx])\n", + " model = lgb.train(params=params, train_set=dtrain,valid_sets=[dvalid],\n", + " verbose_eval=50,\n", + " early_stopping_rounds=20,\n", + " num_boost_round=5000)\n", + " test_predict[:,i] = model.predict(test_x)\n", + "predict = np.mean(test_predict,axis=1)" ] }, { "cell_type": "code", - "execution_count": 14, + "execution_count": 8, "id": "886bfdad", "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "0.4531666044672748" + "0.4346521330333544" ] }, - "execution_count": 14, + "execution_count": 8, "metadata": {}, "output_type": "execute_result" } @@ -274,80 +349,24 @@ "id": "c271442c", "metadata": {}, "source": [ - "### 使用LGB作为模型,使用optuna调参" + "### 使用LGB作为模型并CV,使用optuna调参" ] }, { "cell_type": "code", - "execution_count": 41, + "execution_count": 9, "id": "9a87f9db", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Looking in indexes: https://pypi.tuna.tsinghua.edu.cn/simple\n", - "Collecting optuna\n", - " Downloading https://pypi.tuna.tsinghua.edu.cn/packages/2a/b4/061c8721b5da1614794d1b66fcb212eee156efd5284f66854d02f295b0be/optuna-2.9.1-py3-none-any.whl (302 kB)\n", - "Requirement already satisfied: tqdm in d:\\programdata\\anaconda3\\lib\\site-packages (from optuna) (4.59.0)\n", - "Collecting cmaes>=0.8.2\n", - " Downloading https://pypi.tuna.tsinghua.edu.cn/packages/01/1f/43b01223a0366171f474320c6e966c39a11587287f098a5f09809b45e05f/cmaes-0.8.2-py3-none-any.whl (15 kB)\n", - "Requirement already satisfied: scipy!=1.4.0 in d:\\programdata\\anaconda3\\lib\\site-packages (from optuna) (1.4.1)\n", - "Collecting cliff\n", - " Downloading https://pypi.tuna.tsinghua.edu.cn/packages/18/f7/2a98b032a43b2925ea32bc13a8feb6cf9416e7d2b2c0f6d2ce14636a03b1/cliff-3.9.0-py3-none-any.whl (80 kB)\n", - "Collecting alembic\n", - " Downloading https://pypi.tuna.tsinghua.edu.cn/packages/7a/5a/be479a2c379e6b3c57dc56ea3b139ad4d46c2d244a0035ac4d7475116076/alembic-1.7.1-py3-none-any.whl (208 kB)\n", - "Requirement already satisfied: packaging>=20.0 in d:\\programdata\\anaconda3\\lib\\site-packages (from optuna) (20.9)\n", - "Requirement already satisfied: numpy in d:\\programdata\\anaconda3\\lib\\site-packages (from optuna) (1.18.5)\n", - "Collecting colorlog\n", - " Downloading https://pypi.tuna.tsinghua.edu.cn/packages/2d/93/4b0bb101e54206e92feb3c986c274902212b2ed8c55423e6e7f6d8b693ca/colorlog-6.4.1-py2.py3-none-any.whl (11 kB)\n", - "Requirement already satisfied: PyYAML in d:\\programdata\\anaconda3\\lib\\site-packages (from optuna) (5.4.1)\n", - "Requirement already satisfied: sqlalchemy>=1.1.0 in d:\\programdata\\anaconda3\\lib\\site-packages (from optuna) (1.4.7)\n", - "Requirement already satisfied: pyparsing>=2.0.2 in d:\\programdata\\anaconda3\\lib\\site-packages (from packaging>=20.0->optuna) (2.4.7)\n", - "Requirement already satisfied: greenlet!=0.4.17 in d:\\programdata\\anaconda3\\lib\\site-packages (from sqlalchemy>=1.1.0->optuna) (1.0.0)\n", - "Collecting importlib-resources\n", - " Downloading https://pypi.tuna.tsinghua.edu.cn/packages/f2/6c/2f3b930513bb971172ffceb63cf4e910944e57451724e69b1dec97cfefa6/importlib_resources-5.2.2-py3-none-any.whl (27 kB)\n", - "Collecting Mako\n", - " Downloading https://pypi.tuna.tsinghua.edu.cn/packages/75/69/c3ab0db9234fa5681a85a1c55203763a62902d56ad76b6d9b9bfa2c83694/Mako-1.1.5-py2.py3-none-any.whl (75 kB)\n", - "Collecting PrettyTable>=0.7.2\n", - " Downloading https://pypi.tuna.tsinghua.edu.cn/packages/bd/b5/c09f8d237e060a9e7b5d2d1577c2a6bc49fa298a7b4aefd52146f2b9a62e/prettytable-2.2.0-py3-none-any.whl (23 kB)\n", - "Collecting pbr!=2.1.0,>=2.0.0\n", - " Downloading https://pypi.tuna.tsinghua.edu.cn/packages/18/e0/1d4702dd81121d04a477c272d47ee5b6bc970d1a0990b11befa275c55cf2/pbr-5.6.0-py2.py3-none-any.whl (111 kB)\n", - "Collecting cmd2>=1.0.0\n", - " Downloading https://pypi.tuna.tsinghua.edu.cn/packages/44/ca/d407811641ec1d8bd8a38ee3165d73aa44776d7700436bd4d4a6606f2736/cmd2-2.1.2-py3-none-any.whl (141 kB)\n", - "Collecting stevedore>=2.0.1\n", - " Downloading https://pypi.tuna.tsinghua.edu.cn/packages/b2/c5/036a9a6e220ea7406a36130e80cca33a3e6b98b5328cfdba4b46b2ed0786/stevedore-3.4.0-py3-none-any.whl (49 kB)\n", - "Collecting autopage>=0.4.0\n", - " Downloading https://pypi.tuna.tsinghua.edu.cn/packages/18/a7/901e943318925f8ca3f7963616660065b3cf4e143b0327f88076ba5c4e22/autopage-0.4.0-py3-none-any.whl (20 kB)\n", - "Requirement already satisfied: colorama>=0.3.7 in d:\\programdata\\anaconda3\\lib\\site-packages (from cmd2>=1.0.0->cliff->optuna) (0.4.4)\n", - "Collecting pyreadline3\n", - " Downloading https://pypi.tuna.tsinghua.edu.cn/packages/89/29/10fbb29d957dbcee77a0832eabb9953da80d6bb9514f7ca1b3d82f50219f/pyreadline3-3.3-py3-none-any.whl (95 kB)\n", - "Requirement already satisfied: attrs>=16.3.0 in d:\\programdata\\anaconda3\\lib\\site-packages (from cmd2>=1.0.0->cliff->optuna) (20.3.0)\n", - "Collecting pyperclip>=1.6\n", - " Downloading https://pypi.tuna.tsinghua.edu.cn/packages/a7/2c/4c64579f847bd5d539803c8b909e54ba087a79d01bb3aba433a95879a6c5/pyperclip-1.8.2.tar.gz (20 kB)\n", - "Requirement already satisfied: wcwidth>=0.1.7 in d:\\programdata\\anaconda3\\lib\\site-packages (from cmd2>=1.0.0->cliff->optuna) (0.2.5)\n", - "Requirement already satisfied: zipp>=3.1.0 in d:\\programdata\\anaconda3\\lib\\site-packages (from importlib-resources->alembic->optuna) (3.4.1)\n", - "Requirement already satisfied: MarkupSafe>=0.9.2 in d:\\programdata\\anaconda3\\lib\\site-packages (from Mako->alembic->optuna) (1.1.1)\n", - "Building wheels for collected packages: pyperclip\n", - " Building wheel for pyperclip (setup.py): started\n", - " Building wheel for pyperclip (setup.py): finished with status 'done'\n", - " Created wheel for pyperclip: filename=pyperclip-1.8.2-py3-none-any.whl size=11107 sha256=96b5a96e64d8d6ae264ae796623c0148d1ca71677462878fc1f07c74e1e794ff\n", - " Stored in directory: c:\\users\\administrator\\appdata\\local\\pip\\cache\\wheels\\30\\c0\\21\\bc13df81c8b032076577671a8ef05db4e168a335e07e64d9a7\n", - "Successfully built pyperclip\n", - "Installing collected packages: pyreadline3, pyperclip, pbr, stevedore, PrettyTable, Mako, importlib-resources, cmd2, autopage, colorlog, cmaes, cliff, alembic, optuna\n", - "Successfully installed Mako-1.1.5 PrettyTable-2.2.0 alembic-1.7.1 autopage-0.4.0 cliff-3.9.0 cmaes-0.8.2 cmd2-2.1.2 colorlog-6.4.1 importlib-resources-5.2.2 optuna-2.9.1 pbr-5.6.0 pyperclip-1.8.2 pyreadline3-3.3 stevedore-3.4.0\n", - "Note: you may need to restart the kernel to use updated packages.\n" - ] - } - ], + "metadata": { + "scrolled": true + }, + "outputs": [], "source": [ - "pip install optuna" + "# pip install optuna" ] }, { "cell_type": "code", - "execution_count": 15, + "execution_count": 10, "id": "d8a0279a", "metadata": {}, "outputs": [], @@ -357,12 +376,12 @@ }, { "cell_type": "code", - "execution_count": 18, + "execution_count": 11, "id": "7433fdcb", "metadata": {}, "outputs": [], "source": [ - "def objective(trial,train_x, valid_x, train_y, valid_y):\n", + "def objective(trial,train_x, train_y, valid_x, valid_y):\n", " dtrain = lgb.Dataset(train_x, label=train_y)\n", " dvalid = lgb.Dataset(valid_x, label=valid_y)\n", "\n", @@ -395,7 +414,7 @@ }, { "cell_type": "code", - "execution_count": 19, + "execution_count": 12, "id": "b9018adb", "metadata": { "scrolled": true @@ -405,119 +424,119 @@ "name": "stderr", "output_type": "stream", "text": [ - "\u001b[32m[I 2021-09-07 09:26:30,952]\u001b[0m A new study created in memory with name: no-name-f2147511-069d-495f-90ec-5990dc3c3716\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:26:31,284]\u001b[0m Trial 0 finished with value: 0.5579165649228477 and parameters: {'lambda_l1': 1.6814939560853405e-06, 'lambda_l2': 8.772634980486007, 'num_leaves': 79, 'feature_fraction': 0.4415746779386105, 'bagging_fraction': 0.5306839081914155, 'bagging_freq': 5, 'min_child_samples': 13}. Best is trial 0 with value: 0.5579165649228477.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:26:31,523]\u001b[0m Trial 1 finished with value: 0.5577111827415846 and parameters: {'lambda_l1': 0.00013684423280171766, 'lambda_l2': 0.010116712675880523, 'num_leaves': 42, 'feature_fraction': 0.4758659908396936, 'bagging_fraction': 0.8916447942940564, 'bagging_freq': 7, 'min_child_samples': 32}. Best is trial 1 with value: 0.5577111827415846.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:26:31,986]\u001b[0m Trial 2 finished with value: 0.5382832732103667 and parameters: {'lambda_l1': 1.849141022226266, 'lambda_l2': 9.297088403542385e-06, 'num_leaves': 94, 'feature_fraction': 0.6894755218120849, 'bagging_fraction': 0.9503182129837274, 'bagging_freq': 2, 'min_child_samples': 98}. Best is trial 2 with value: 0.5382832732103667.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:26:32,156]\u001b[0m Trial 3 finished with value: 0.5786765226592324 and parameters: {'lambda_l1': 4.448151837364203e-05, 'lambda_l2': 1.8955982808109254e-08, 'num_leaves': 19, 'feature_fraction': 0.493099451522876, 'bagging_fraction': 0.6742062281671596, 'bagging_freq': 5, 'min_child_samples': 35}. Best is trial 2 with value: 0.5382832732103667.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:26:32,266]\u001b[0m Trial 4 finished with value: 0.6139367553603606 and parameters: {'lambda_l1': 0.0002791229907486381, 'lambda_l2': 5.269713608638115, 'num_leaves': 5, 'feature_fraction': 0.8303631732661438, 'bagging_fraction': 0.942024874372832, 'bagging_freq': 4, 'min_child_samples': 61}. Best is trial 2 with value: 0.5382832732103667.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:26:32,826]\u001b[0m Trial 5 finished with value: 0.5453617455111696 and parameters: {'lambda_l1': 0.06860061560766911, 'lambda_l2': 0.2723830502667369, 'num_leaves': 98, 'feature_fraction': 0.7105587363977491, 'bagging_fraction': 0.5899933873219961, 'bagging_freq': 4, 'min_child_samples': 71}. Best is trial 2 with value: 0.5382832732103667.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:26:32,880]\u001b[0m Trial 6 pruned. Trial was pruned at iteration 10.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:26:33,189]\u001b[0m Trial 7 finished with value: 0.5588682864793495 and parameters: {'lambda_l1': 0.4689515611654304, 'lambda_l2': 1.5494838308859912e-07, 'num_leaves': 65, 'feature_fraction': 0.5583496504073044, 'bagging_fraction': 0.6569918432147945, 'bagging_freq': 5, 'min_child_samples': 58}. Best is trial 2 with value: 0.5382832732103667.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:26:33,462]\u001b[0m Trial 8 finished with value: 0.5447098867148504 and parameters: {'lambda_l1': 0.0010101561223389774, 'lambda_l2': 0.01515390989225731, 'num_leaves': 47, 'feature_fraction': 0.6245633996226619, 'bagging_fraction': 0.419624870667929, 'bagging_freq': 3, 'min_child_samples': 48}. Best is trial 2 with value: 0.5382832732103667.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:26:33,693]\u001b[0m Trial 9 pruned. Trial was pruned at iteration 80.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:26:34,181]\u001b[0m Trial 10 finished with value: 0.541110883425217 and parameters: {'lambda_l1': 1.4756346287368392e-08, 'lambda_l2': 1.3544296321997672e-05, 'num_leaves': 189, 'feature_fraction': 0.9751158697609862, 'bagging_fraction': 0.8348333900598492, 'bagging_freq': 1, 'min_child_samples': 100}. Best is trial 2 with value: 0.5382832732103667.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:26:34,761]\u001b[0m Trial 11 finished with value: 0.5430244062824532 and parameters: {'lambda_l1': 1.2282790653635484e-08, 'lambda_l2': 8.373619608433318e-06, 'num_leaves': 196, 'feature_fraction': 0.9236444858767681, 'bagging_fraction': 0.8122951589315216, 'bagging_freq': 1, 'min_child_samples': 100}. Best is trial 2 with value: 0.5382832732103667.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:26:35,237]\u001b[0m Trial 12 finished with value: 0.5479941202014348 and parameters: {'lambda_l1': 8.66531632367352, 'lambda_l2': 2.7356786615324263e-05, 'num_leaves': 182, 'feature_fraction': 0.7257944800491923, 'bagging_fraction': 0.8151059105693412, 'bagging_freq': 1, 'min_child_samples': 98}. Best is trial 2 with value: 0.5382832732103667.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:26:36,132]\u001b[0m Trial 13 finished with value: 0.5426396090105863 and parameters: {'lambda_l1': 1.5311804262482413e-08, 'lambda_l2': 8.31070641463058e-06, 'num_leaves': 241, 'feature_fraction': 0.9996713520233095, 'bagging_fraction': 0.9659578077631608, 'bagging_freq': 2, 'min_child_samples': 84}. Best is trial 2 with value: 0.5382832732103667.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:26:36,961]\u001b[0m Trial 14 finished with value: 0.5367665957770634 and parameters: {'lambda_l1': 0.014095420115571123, 'lambda_l2': 0.0002563644914171101, 'num_leaves': 149, 'feature_fraction': 0.7883099949736571, 'bagging_fraction': 0.8086023984951658, 'bagging_freq': 2, 'min_child_samples': 85}. Best is trial 14 with value: 0.5367665957770634.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:26:37,824]\u001b[0m Trial 15 finished with value: 0.5348886658006932 and parameters: {'lambda_l1': 0.020071941122612545, 'lambda_l2': 0.0003014852059198554, 'num_leaves': 146, 'feature_fraction': 0.7936923799556383, 'bagging_fraction': 0.756430674635471, 'bagging_freq': 3, 'min_child_samples': 83}. Best is trial 15 with value: 0.5348886658006932.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:26:38,622]\u001b[0m Trial 16 finished with value: 0.5372333761766732 and parameters: {'lambda_l1': 0.015173696268463625, 'lambda_l2': 0.0016757129971514612, 'num_leaves': 138, 'feature_fraction': 0.8107104336197675, 'bagging_fraction': 0.7376130779961089, 'bagging_freq': 3, 'min_child_samples': 78}. Best is trial 15 with value: 0.5348886658006932.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:26:39,451]\u001b[0m Trial 17 finished with value: 0.5376252190781058 and parameters: {'lambda_l1': 0.007357005023837469, 'lambda_l2': 0.000250874148036676, 'num_leaves': 147, 'feature_fraction': 0.7829205508785075, 'bagging_fraction': 0.7478270069878126, 'bagging_freq': 3, 'min_child_samples': 67}. Best is trial 15 with value: 0.5348886658006932.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:26:39,567]\u001b[0m Trial 18 pruned. Trial was pruned at iteration 10.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:26:40,910]\u001b[0m Trial 19 finished with value: 0.5315793527934071 and parameters: {'lambda_l1': 0.013209811084277438, 'lambda_l2': 0.06726287443306718, 'num_leaves': 231, 'feature_fraction': 0.637876115312563, 'bagging_fraction': 0.8716817438233221, 'bagging_freq': 3, 'min_child_samples': 44}. Best is trial 19 with value: 0.5315793527934071.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:26:42,324]\u001b[0m Trial 20 finished with value: 0.5262550702976 and parameters: {'lambda_l1': 0.003113216663103784, 'lambda_l2': 0.24783337352027449, 'num_leaves': 242, 'feature_fraction': 0.6137607392321572, 'bagging_fraction': 0.8936473883645825, 'bagging_freq': 3, 'min_child_samples': 44}. Best is trial 20 with value: 0.5262550702976.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:26:43,182]\u001b[0m Trial 21 finished with value: 0.5297204439253479 and parameters: {'lambda_l1': 0.003978575421253158, 'lambda_l2': 0.11658574784670588, 'num_leaves': 254, 'feature_fraction': 0.6168955771891016, 'bagging_fraction': 0.8787433941466242, 'bagging_freq': 3, 'min_child_samples': 43}. Best is trial 20 with value: 0.5262550702976.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:26:43,310]\u001b[0m Trial 22 pruned. Trial was pruned at iteration 13.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:26:44,194]\u001b[0m Trial 23 finished with value: 0.5288803542960898 and parameters: {'lambda_l1': 8.341315535148706e-06, 'lambda_l2': 0.24030532904199514, 'num_leaves': 221, 'feature_fraction': 0.5901567080654435, 'bagging_fraction': 0.8875920940685119, 'bagging_freq': 3, 'min_child_samples': 25}. Best is trial 20 with value: 0.5262550702976.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:26:44,331]\u001b[0m Trial 24 pruned. Trial was pruned at iteration 10.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:26:45,248]\u001b[0m Trial 25 finished with value: 0.5310533243420329 and parameters: {'lambda_l1': 1.9500743317527624e-07, 'lambda_l2': 0.0502751774074574, 'num_leaves': 214, 'feature_fraction': 0.5734739546088348, 'bagging_fraction': 0.9115976551406307, 'bagging_freq': 2, 'min_child_samples': 24}. Best is trial 20 with value: 0.5262550702976.\u001b[0m\n" + "\u001b[32m[I 2021-09-07 16:27:35,965]\u001b[0m A new study created in memory with name: no-name-cc424a48-83c6-4329-92a3-261d08a5edb9\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:27:36,779]\u001b[0m Trial 0 finished with value: 0.5178928385298298 and parameters: {'lambda_l1': 0.9464949700025425, 'lambda_l2': 1.4692321446797693e-05, 'num_leaves': 135, 'feature_fraction': 0.6448685329355106, 'bagging_fraction': 0.9048970068857253, 'bagging_freq': 7, 'min_child_samples': 30}. Best is trial 0 with value: 0.5178928385298298.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:27:37,641]\u001b[0m Trial 1 finished with value: 0.5336017055299863 and parameters: {'lambda_l1': 0.6354399847075458, 'lambda_l2': 4.87700419620481, 'num_leaves': 237, 'feature_fraction': 0.895991820221385, 'bagging_fraction': 0.6704364287317786, 'bagging_freq': 3, 'min_child_samples': 49}. Best is trial 0 with value: 0.5178928385298298.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:27:38,288]\u001b[0m Trial 2 finished with value: 0.5345721353318472 and parameters: {'lambda_l1': 9.52099217128906e-05, 'lambda_l2': 1.1454590646374039e-07, 'num_leaves': 170, 'feature_fraction': 0.7899599630691868, 'bagging_fraction': 0.5059741439893897, 'bagging_freq': 2, 'min_child_samples': 50}. Best is trial 0 with value: 0.5178928385298298.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:27:38,956]\u001b[0m Trial 3 finished with value: 0.543693048566443 and parameters: {'lambda_l1': 0.1481577233973877, 'lambda_l2': 3.179336503731134e-07, 'num_leaves': 128, 'feature_fraction': 0.9783519466402901, 'bagging_fraction': 0.6889127950806107, 'bagging_freq': 5, 'min_child_samples': 37}. Best is trial 0 with value: 0.5178928385298298.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:27:39,424]\u001b[0m Trial 4 finished with value: 0.5501760993688642 and parameters: {'lambda_l1': 3.7638689664291936, 'lambda_l2': 0.020384770050374573, 'num_leaves': 175, 'feature_fraction': 0.4840841073088404, 'bagging_fraction': 0.8334033110992205, 'bagging_freq': 5, 'min_child_samples': 35}. Best is trial 0 with value: 0.5178928385298298.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:27:39,493]\u001b[0m Trial 5 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:27:39,774]\u001b[0m Trial 6 pruned. Trial was pruned at iteration 33.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:27:39,828]\u001b[0m Trial 7 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:27:40,187]\u001b[0m Trial 8 finished with value: 0.5249490156742078 and parameters: {'lambda_l1': 8.186728837628555e-05, 'lambda_l2': 2.6479736415938615, 'num_leaves': 72, 'feature_fraction': 0.7599981859922114, 'bagging_fraction': 0.9692259725506919, 'bagging_freq': 5, 'min_child_samples': 38}. Best is trial 0 with value: 0.5178928385298298.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:27:40,299]\u001b[0m Trial 9 pruned. Trial was pruned at iteration 11.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:27:40,353]\u001b[0m Trial 10 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:27:40,398]\u001b[0m Trial 11 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:27:40,470]\u001b[0m Trial 12 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:27:41,280]\u001b[0m Trial 13 finished with value: 0.5325142394678348 and parameters: {'lambda_l1': 0.009551573559223583, 'lambda_l2': 0.0003114297759461472, 'num_leaves': 152, 'feature_fraction': 0.8236610445865283, 'bagging_fraction': 0.8389170404125451, 'bagging_freq': 4, 'min_child_samples': 20}. Best is trial 0 with value: 0.5178928385298298.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:27:41,391]\u001b[0m Trial 14 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:27:42,015]\u001b[0m Trial 15 finished with value: 0.5314302398651312 and parameters: {'lambda_l1': 0.024227381422510946, 'lambda_l2': 0.33276527010426776, 'num_leaves': 91, 'feature_fraction': 0.7874026441153832, 'bagging_fraction': 0.7899555875604796, 'bagging_freq': 7, 'min_child_samples': 19}. Best is trial 0 with value: 0.5178928385298298.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:27:42,091]\u001b[0m Trial 16 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:27:42,154]\u001b[0m Trial 17 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:27:42,226]\u001b[0m Trial 18 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:27:42,338]\u001b[0m Trial 19 pruned. Trial was pruned at iteration 14.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:27:42,384]\u001b[0m Trial 20 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:27:42,829]\u001b[0m Trial 21 finished with value: 0.5245233417218278 and parameters: {'lambda_l1': 0.07296901453015396, 'lambda_l2': 0.19272462963641587, 'num_leaves': 90, 'feature_fraction': 0.752955332537125, 'bagging_fraction': 0.7944607588474272, 'bagging_freq': 7, 'min_child_samples': 15}. Best is trial 0 with value: 0.5178928385298298.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:27:42,901]\u001b[0m Trial 22 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:27:43,287]\u001b[0m Trial 23 pruned. Trial was pruned at iteration 97.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:27:43,371]\u001b[0m Trial 24 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:27:43,489]\u001b[0m Trial 25 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:27:43,572]\u001b[0m Trial 26 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:27:43,632]\u001b[0m Trial 27 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:27:44,171]\u001b[0m Trial 28 finished with value: 0.5255422897694476 and parameters: {'lambda_l1': 0.028260092955639447, 'lambda_l2': 0.08413723627697263, 'num_leaves': 74, 'feature_fraction': 0.8042469147694201, 'bagging_fraction': 0.7548217217698655, 'bagging_freq': 3, 'min_child_samples': 42}. Best is trial 0 with value: 0.5178928385298298.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:27:44,331]\u001b[0m Trial 29 pruned. Trial was pruned at iteration 13.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:27:44,516]\u001b[0m Trial 30 pruned. Trial was pruned at iteration 16.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:27:44,637]\u001b[0m Trial 31 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:27:44,723]\u001b[0m Trial 32 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:27:45,592]\u001b[0m Trial 33 finished with value: 0.5218146276752049 and parameters: {'lambda_l1': 0.0016497886750146257, 'lambda_l2': 0.21484669274687151, 'num_leaves': 133, 'feature_fraction': 0.6885034648040802, 'bagging_fraction': 0.9281320338482011, 'bagging_freq': 3, 'min_child_samples': 42}. Best is trial 0 with value: 0.5178928385298298.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:27:45,711]\u001b[0m Trial 34 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:27:46,669]\u001b[0m Trial 35 finished with value: 0.5205542553680208 and parameters: {'lambda_l1': 4.3493248665689235e-05, 'lambda_l2': 3.680853885692289, 'num_leaves': 169, 'feature_fraction': 0.7104567429350184, 'bagging_fraction': 0.8672533271233644, 'bagging_freq': 1, 'min_child_samples': 23}. Best is trial 0 with value: 0.5178928385298298.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:27:47,676]\u001b[0m Trial 36 finished with value: 0.5206023202395521 and parameters: {'lambda_l1': 0.0006187306261428898, 'lambda_l2': 6.130428276943865e-08, 'num_leaves': 168, 'feature_fraction': 0.7036284508576037, 'bagging_fraction': 0.868428878180794, 'bagging_freq': 2, 'min_child_samples': 15}. Best is trial 0 with value: 0.5178928385298298.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:27:47,808]\u001b[0m Trial 37 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:27:47,937]\u001b[0m Trial 38 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:27:48,844]\u001b[0m Trial 39 finished with value: 0.5191042263340773 and parameters: {'lambda_l1': 0.0005562321404093257, 'lambda_l2': 2.232696278872472e-08, 'num_leaves': 161, 'feature_fraction': 0.710527336424428, 'bagging_fraction': 0.9309118257124477, 'bagging_freq': 1, 'min_child_samples': 24}. Best is trial 0 with value: 0.5178928385298298.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:27:49,889]\u001b[0m Trial 40 finished with value: 0.5168505162425636 and parameters: {'lambda_l1': 0.0004441190594738051, 'lambda_l2': 1.0707946566246148e-08, 'num_leaves': 190, 'feature_fraction': 0.7132737394337006, 'bagging_fraction': 0.8296082050311246, 'bagging_freq': 1, 'min_child_samples': 16}. Best is trial 40 with value: 0.5168505162425636.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:27:50,929]\u001b[0m Trial 41 finished with value: 0.5262832333602245 and parameters: {'lambda_l1': 0.0004892025280466791, 'lambda_l2': 1.89573604152611e-08, 'num_leaves': 189, 'feature_fraction': 0.7134600565845959, 'bagging_fraction': 0.8334328552244646, 'bagging_freq': 1, 'min_child_samples': 16}. Best is trial 40 with value: 0.5168505162425636.\u001b[0m\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ - "\u001b[32m[I 2021-09-07 09:26:46,126]\u001b[0m Trial 26 finished with value: 0.5310399189972692 and parameters: {'lambda_l1': 5.65218762871428e-06, 'lambda_l2': 1.6944446205708956, 'num_leaves': 256, 'feature_fraction': 0.6693624417062835, 'bagging_fraction': 0.8606541857435407, 'bagging_freq': 4, 'min_child_samples': 25}. Best is trial 20 with value: 0.5262550702976.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:26:47,541]\u001b[0m Trial 27 finished with value: 0.5289141422774571 and parameters: {'lambda_l1': 3.372780137131952e-07, 'lambda_l2': 0.004231573716537988, 'num_leaves': 222, 'feature_fraction': 0.5834642111209242, 'bagging_fraction': 0.9283224808721449, 'bagging_freq': 3, 'min_child_samples': 33}. Best is trial 20 with value: 0.5262550702976.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:26:47,734]\u001b[0m Trial 28 pruned. Trial was pruned at iteration 10.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:26:47,883]\u001b[0m Trial 29 pruned. Trial was pruned at iteration 10.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:26:48,032]\u001b[0m Trial 30 pruned. Trial was pruned at iteration 10.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:26:48,233]\u001b[0m Trial 31 pruned. Trial was pruned at iteration 10.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:26:48,470]\u001b[0m Trial 32 pruned. Trial was pruned at iteration 12.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:26:50,256]\u001b[0m Trial 33 finished with value: 0.5307511312800164 and parameters: {'lambda_l1': 8.331346294111473e-08, 'lambda_l2': 0.018173663934055492, 'num_leaves': 238, 'feature_fraction': 0.5874989674601676, 'bagging_fraction': 0.8876263960076438, 'bagging_freq': 2, 'min_child_samples': 30}. Best is trial 20 with value: 0.5262550702976.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:26:52,081]\u001b[0m Trial 34 finished with value: 0.5364692667074208 and parameters: {'lambda_l1': 8.939808862490256e-07, 'lambda_l2': 1.3094120587455667, 'num_leaves': 249, 'feature_fraction': 0.739630943171398, 'bagging_fraction': 0.9999691110926188, 'bagging_freq': 3, 'min_child_samples': 17}. Best is trial 20 with value: 0.5262550702976.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:26:52,228]\u001b[0m Trial 35 pruned. Trial was pruned at iteration 10.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:26:52,401]\u001b[0m Trial 36 pruned. Trial was pruned at iteration 10.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:26:52,527]\u001b[0m Trial 37 pruned. Trial was pruned at iteration 10.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:26:52,646]\u001b[0m Trial 38 pruned. Trial was pruned at iteration 10.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:26:52,746]\u001b[0m Trial 39 pruned. Trial was pruned at iteration 10.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:26:53,973]\u001b[0m Trial 40 finished with value: 0.5256364275490798 and parameters: {'lambda_l1': 0.0710216045724967, 'lambda_l2': 0.10020172057765622, 'num_leaves': 206, 'feature_fraction': 0.6441352660567405, 'bagging_fraction': 0.8538258125595672, 'bagging_freq': 5, 'min_child_samples': 8}. Best is trial 40 with value: 0.5256364275490798.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:26:54,896]\u001b[0m Trial 41 finished with value: 0.5233364787469251 and parameters: {'lambda_l1': 0.23671365201549266, 'lambda_l2': 0.0749439220683321, 'num_leaves': 210, 'feature_fraction': 0.6436961501856725, 'bagging_fraction': 0.8521968194803821, 'bagging_freq': 5, 'min_child_samples': 5}. Best is trial 41 with value: 0.5233364787469251.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:26:55,831]\u001b[0m Trial 42 finished with value: 0.5293150200028972 and parameters: {'lambda_l1': 0.6574943326302751, 'lambda_l2': 0.0361443234291568, 'num_leaves': 209, 'feature_fraction': 0.6596272574104582, 'bagging_fraction': 0.850673456207462, 'bagging_freq': 6, 'min_child_samples': 5}. Best is trial 41 with value: 0.5233364787469251.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:26:56,858]\u001b[0m Trial 43 finished with value: 0.5308606295175099 and parameters: {'lambda_l1': 0.07618036054920922, 'lambda_l2': 0.22544030283002933, 'num_leaves': 226, 'feature_fraction': 0.6984185669610476, 'bagging_fraction': 0.7845150865412134, 'bagging_freq': 5, 'min_child_samples': 10}. Best is trial 41 with value: 0.5233364787469251.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:26:57,019]\u001b[0m Trial 44 pruned. Trial was pruned at iteration 13.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:26:57,787]\u001b[0m Trial 45 finished with value: 0.5331475267867624 and parameters: {'lambda_l1': 1.4967349278475324, 'lambda_l2': 0.0008951722845905283, 'num_leaves': 207, 'feature_fraction': 0.7339992851750987, 'bagging_fraction': 0.919959739142468, 'bagging_freq': 5, 'min_child_samples': 26}. Best is trial 41 with value: 0.5233364787469251.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:26:57,865]\u001b[0m Trial 46 pruned. Trial was pruned at iteration 10.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:26:57,971]\u001b[0m Trial 47 pruned. Trial was pruned at iteration 10.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:26:58,069]\u001b[0m Trial 48 pruned. Trial was pruned at iteration 10.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:26:58,168]\u001b[0m Trial 49 pruned. Trial was pruned at iteration 10.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:26:58,317]\u001b[0m Trial 50 pruned. Trial was pruned at iteration 30.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:26:58,445]\u001b[0m Trial 51 pruned. Trial was pruned at iteration 13.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:26:59,558]\u001b[0m Trial 52 finished with value: 0.5287423259473492 and parameters: {'lambda_l1': 0.44209025556975773, 'lambda_l2': 0.02237337201530471, 'num_leaves': 180, 'feature_fraction': 0.6816257587535688, 'bagging_fraction': 0.8645245816570197, 'bagging_freq': 7, 'min_child_samples': 5}. Best is trial 41 with value: 0.5233364787469251.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:27:00,776]\u001b[0m Trial 53 finished with value: 0.5248984867652216 and parameters: {'lambda_l1': 0.02970872423104034, 'lambda_l2': 0.0197319338622143, 'num_leaves': 177, 'feature_fraction': 0.7150825328146555, 'bagging_fraction': 0.8972252525350597, 'bagging_freq': 7, 'min_child_samples': 12}. Best is trial 41 with value: 0.5233364787469251.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:27:01,886]\u001b[0m Trial 54 finished with value: 0.5310139507545261 and parameters: {'lambda_l1': 0.03922536031082296, 'lambda_l2': 1.7618527190204045e-06, 'num_leaves': 180, 'feature_fraction': 0.7089331672809696, 'bagging_fraction': 0.8021837464466384, 'bagging_freq': 7, 'min_child_samples': 11}. Best is trial 41 with value: 0.5233364787469251.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:27:02,043]\u001b[0m Trial 55 pruned. Trial was pruned at iteration 13.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:27:02,158]\u001b[0m Trial 56 pruned. Trial was pruned at iteration 10.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:27:02,937]\u001b[0m Trial 57 pruned. Trial was pruned at iteration 78.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:27:03,912]\u001b[0m Trial 58 finished with value: 0.5310990808119093 and parameters: {'lambda_l1': 0.008908141404368873, 'lambda_l2': 1.0024604099274968e-08, 'num_leaves': 156, 'feature_fraction': 0.7135958388327297, 'bagging_fraction': 0.8918708775644101, 'bagging_freq': 7, 'min_child_samples': 10}. Best is trial 41 with value: 0.5233364787469251.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:27:05,509]\u001b[0m Trial 59 finished with value: 0.5303075036750562 and parameters: {'lambda_l1': 0.0036415543019748704, 'lambda_l2': 0.060450481568631226, 'num_leaves': 245, 'feature_fraction': 0.6322922933331399, 'bagging_fraction': 0.8119277832780933, 'bagging_freq': 6, 'min_child_samples': 18}. Best is trial 41 with value: 0.5233364787469251.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:27:05,675]\u001b[0m Trial 60 pruned. Trial was pruned at iteration 10.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:27:06,717]\u001b[0m Trial 61 finished with value: 0.5266068329070613 and parameters: {'lambda_l1': 0.08187221145217663, 'lambda_l2': 0.12038514531389062, 'num_leaves': 230, 'feature_fraction': 0.5893130926352276, 'bagging_fraction': 0.9218811557192532, 'bagging_freq': 3, 'min_child_samples': 30}. Best is trial 41 with value: 0.5233364787469251.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:27:06,839]\u001b[0m Trial 62 pruned. Trial was pruned at iteration 10.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:27:06,957]\u001b[0m Trial 63 pruned. Trial was pruned at iteration 10.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:27:07,060]\u001b[0m Trial 64 pruned. Trial was pruned at iteration 10.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:27:07,235]\u001b[0m Trial 65 pruned. Trial was pruned at iteration 13.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:27:07,371]\u001b[0m Trial 66 pruned. Trial was pruned at iteration 13.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:27:08,252]\u001b[0m Trial 67 finished with value: 0.524615381244906 and parameters: {'lambda_l1': 0.0007372437461333507, 'lambda_l2': 0.07197819258619385, 'num_leaves': 217, 'feature_fraction': 0.7085126717217893, 'bagging_fraction': 0.9783185592011208, 'bagging_freq': 4, 'min_child_samples': 7}. Best is trial 41 with value: 0.5233364787469251.\u001b[0m\n" + "\u001b[32m[I 2021-09-07 16:27:51,023]\u001b[0m Trial 42 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:27:51,805]\u001b[0m Trial 43 finished with value: 0.516065766031832 and parameters: {'lambda_l1': 0.00021869661941011055, 'lambda_l2': 1.0674672013946999e-08, 'num_leaves': 201, 'feature_fraction': 0.7039965594752932, 'bagging_fraction': 0.8165783106186105, 'bagging_freq': 1, 'min_child_samples': 10}. Best is trial 43 with value: 0.516065766031832.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:27:51,940]\u001b[0m Trial 44 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:27:52,044]\u001b[0m Trial 45 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:27:52,218]\u001b[0m Trial 46 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:27:53,067]\u001b[0m Trial 47 finished with value: 0.5256815758791472 and parameters: {'lambda_l1': 6.162602167779085e-05, 'lambda_l2': 1.6462819728079273e-07, 'num_leaves': 201, 'feature_fraction': 0.7163097853297453, 'bagging_fraction': 0.9420618119436382, 'bagging_freq': 1, 'min_child_samples': 24}. Best is trial 43 with value: 0.516065766031832.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:27:53,178]\u001b[0m Trial 48 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:27:53,762]\u001b[0m Trial 49 finished with value: 0.5213791636070063 and parameters: {'lambda_l1': 2.774750232046531e-07, 'lambda_l2': 2.4404405074646332e-08, 'num_leaves': 154, 'feature_fraction': 0.6882189607163384, 'bagging_fraction': 0.9871043449165129, 'bagging_freq': 1, 'min_child_samples': 9}. Best is trial 43 with value: 0.516065766031832.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:27:53,875]\u001b[0m Trial 50 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:27:54,587]\u001b[0m Trial 51 finished with value: 0.5174618905113623 and parameters: {'lambda_l1': 0.000381507052985562, 'lambda_l2': 4.901891994722806e-08, 'num_leaves': 166, 'feature_fraction': 0.7117978094083592, 'bagging_fraction': 0.8956613846537137, 'bagging_freq': 1, 'min_child_samples': 12}. Best is trial 43 with value: 0.516065766031832.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:27:55,304]\u001b[0m Trial 52 finished with value: 0.5219780933938188 and parameters: {'lambda_l1': 0.0003055316771241415, 'lambda_l2': 1.0975399369852818e-08, 'num_leaves': 175, 'feature_fraction': 0.7709159871191618, 'bagging_fraction': 0.8946659725371712, 'bagging_freq': 1, 'min_child_samples': 12}. Best is trial 43 with value: 0.516065766031832.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:27:56,481]\u001b[0m Trial 53 finished with value: 0.522019841081264 and parameters: {'lambda_l1': 3.0853077659712017e-06, 'lambda_l2': 3.167435996190143e-08, 'num_leaves': 199, 'feature_fraction': 0.700370780125537, 'bagging_fraction': 0.9659440325481246, 'bagging_freq': 1, 'min_child_samples': 5}. Best is trial 43 with value: 0.516065766031832.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:27:57,422]\u001b[0m Trial 54 finished with value: 0.5210662969810745 and parameters: {'lambda_l1': 7.857450656938453e-05, 'lambda_l2': 4.5159973730477044e-07, 'num_leaves': 153, 'feature_fraction': 0.7263108852936352, 'bagging_fraction': 0.936630215722916, 'bagging_freq': 1, 'min_child_samples': 19}. Best is trial 43 with value: 0.516065766031832.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:27:57,568]\u001b[0m Trial 55 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:27:57,678]\u001b[0m Trial 56 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:27:57,803]\u001b[0m Trial 57 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:27:57,889]\u001b[0m Trial 58 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:27:58,284]\u001b[0m Trial 59 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:27:58,510]\u001b[0m Trial 60 pruned. Trial was pruned at iteration 15.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:27:59,683]\u001b[0m Trial 61 finished with value: 0.5238780309760502 and parameters: {'lambda_l1': 0.0005622649168938934, 'lambda_l2': 7.041773458860283e-08, 'num_leaves': 165, 'feature_fraction': 0.6971774484774765, 'bagging_fraction': 0.8845735572162268, 'bagging_freq': 2, 'min_child_samples': 16}. Best is trial 43 with value: 0.516065766031832.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:27:59,867]\u001b[0m Trial 62 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:01,236]\u001b[0m Trial 63 finished with value: 0.5172018048571797 and parameters: {'lambda_l1': 4.4774427854289624e-05, 'lambda_l2': 1.0431434685856767e-07, 'num_leaves': 175, 'feature_fraction': 0.7067550902029666, 'bagging_fraction': 0.7960327498051027, 'bagging_freq': 2, 'min_child_samples': 16}. Best is trial 43 with value: 0.516065766031832.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:01,369]\u001b[0m Trial 64 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:01,513]\u001b[0m Trial 65 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:02,393]\u001b[0m Trial 66 pruned. Trial was pruned at iteration 76.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:02,529]\u001b[0m Trial 67 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:02,721]\u001b[0m Trial 68 pruned. Trial was pruned at iteration 12.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:03,060]\u001b[0m Trial 69 pruned. Trial was pruned at iteration 44.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:03,135]\u001b[0m Trial 70 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:03,486]\u001b[0m Trial 71 pruned. Trial was pruned at iteration 56.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:03,587]\u001b[0m Trial 72 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:04,107]\u001b[0m Trial 73 pruned. Trial was pruned at iteration 68.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:04,203]\u001b[0m Trial 74 pruned. Trial was pruned at iteration 11.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:04,307]\u001b[0m Trial 75 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:04,631]\u001b[0m Trial 76 pruned. Trial was pruned at iteration 39.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:04,725]\u001b[0m Trial 77 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:04,831]\u001b[0m Trial 78 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:04,920]\u001b[0m Trial 79 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:04,992]\u001b[0m Trial 80 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:05,099]\u001b[0m Trial 81 pruned. Trial was pruned at iteration 11.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:05,766]\u001b[0m Trial 82 finished with value: 0.5178938881576146 and parameters: {'lambda_l1': 1.2203483524616316e-08, 'lambda_l2': 5.507928324956204e-07, 'num_leaves': 185, 'feature_fraction': 0.7219415365227362, 'bagging_fraction': 0.9438920094595108, 'bagging_freq': 1, 'min_child_samples': 17}. Best is trial 43 with value: 0.516065766031832.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:05,866]\u001b[0m Trial 83 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:05,961]\u001b[0m Trial 84 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:06,778]\u001b[0m Trial 85 finished with value: 0.5122528701381253 and parameters: {'lambda_l1': 6.371822640459341e-07, 'lambda_l2': 3.03967794068636e-08, 'num_leaves': 189, 'feature_fraction': 0.7066521328434867, 'bagging_fraction': 0.8972190570020028, 'bagging_freq': 1, 'min_child_samples': 11}. Best is trial 85 with value: 0.5122528701381253.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:07,709]\u001b[0m Trial 86 finished with value: 0.5128398316765018 and parameters: {'lambda_l1': 1.5059254193489838e-07, 'lambda_l2': 2.6171717314455622e-08, 'num_leaves': 192, 'feature_fraction': 0.711268040370677, 'bagging_fraction': 0.9174432395706226, 'bagging_freq': 1, 'min_child_samples': 11}. Best is trial 85 with value: 0.5122528701381253.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:09,411]\u001b[0m Trial 87 finished with value: 0.5191456815789754 and parameters: {'lambda_l1': 5.687878085793806e-08, 'lambda_l2': 2.6772763929664963e-08, 'num_leaves': 216, 'feature_fraction': 0.7413398142797374, 'bagging_fraction': 0.9993988673163303, 'bagging_freq': 1, 'min_child_samples': 11}. Best is trial 85 with value: 0.5122528701381253.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:09,813]\u001b[0m Trial 88 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:12,526]\u001b[0m Trial 89 finished with value: 0.5102536883069437 and parameters: {'lambda_l1': 1.6446541932580473e-07, 'lambda_l2': 1.0734412329614426e-07, 'num_leaves': 193, 'feature_fraction': 0.8123395009389368, 'bagging_fraction': 0.9184649865458648, 'bagging_freq': 1, 'min_child_samples': 10}. Best is trial 89 with value: 0.5102536883069437.\u001b[0m\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ - "\u001b[32m[I 2021-09-07 09:27:09,210]\u001b[0m Trial 68 finished with value: 0.524942396458539 and parameters: {'lambda_l1': 0.0011590093830295628, 'lambda_l2': 0.06353809127053359, 'num_leaves': 198, 'feature_fraction': 0.7106295012909083, 'bagging_fraction': 0.973655849802224, 'bagging_freq': 5, 'min_child_samples': 7}. Best is trial 41 with value: 0.5233364787469251.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:27:09,572]\u001b[0m Trial 69 pruned. Trial was pruned at iteration 29.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:27:10,511]\u001b[0m Trial 70 finished with value: 0.5227354154865925 and parameters: {'lambda_l1': 0.0019204609823774911, 'lambda_l2': 0.006957053438847566, 'num_leaves': 200, 'feature_fraction': 0.7174135338967843, 'bagging_fraction': 0.9747628414441452, 'bagging_freq': 5, 'min_child_samples': 8}. Best is trial 70 with value: 0.5227354154865925.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:27:11,445]\u001b[0m Trial 71 pruned. Trial was pruned at iteration 72.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:27:12,707]\u001b[0m Trial 72 finished with value: 0.5273794666616337 and parameters: {'lambda_l1': 0.00253464769953144, 'lambda_l2': 0.04250761519699366, 'num_leaves': 203, 'feature_fraction': 0.723989872880134, 'bagging_fraction': 0.9954485306248265, 'bagging_freq': 5, 'min_child_samples': 10}. Best is trial 70 with value: 0.5227354154865925.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:27:14,118]\u001b[0m Trial 73 finished with value: 0.5290081420408789 and parameters: {'lambda_l1': 0.0014187815549416435, 'lambda_l2': 0.12791352246804705, 'num_leaves': 218, 'feature_fraction': 0.7758653557130019, 'bagging_fraction': 0.9488928693793105, 'bagging_freq': 5, 'min_child_samples': 18}. Best is trial 70 with value: 0.5227354154865925.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:27:14,196]\u001b[0m Trial 74 pruned. Trial was pruned at iteration 10.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:27:15,517]\u001b[0m Trial 75 finished with value: 0.5325397214424649 and parameters: {'lambda_l1': 0.0005872739303922063, 'lambda_l2': 0.0096136159649407, 'num_leaves': 211, 'feature_fraction': 0.8012899648531023, 'bagging_fraction': 0.931501707649951, 'bagging_freq': 4, 'min_child_samples': 13}. Best is trial 70 with value: 0.5227354154865925.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:27:16,903]\u001b[0m Trial 76 finished with value: 0.5296781377317934 and parameters: {'lambda_l1': 0.006787205212552698, 'lambda_l2': 0.0790698959620889, 'num_leaves': 239, 'feature_fraction': 0.7454753912863651, 'bagging_fraction': 0.9799306593714261, 'bagging_freq': 5, 'min_child_samples': 7}. Best is trial 70 with value: 0.5227354154865925.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:27:17,956]\u001b[0m Trial 77 finished with value: 0.5309336374271006 and parameters: {'lambda_l1': 0.0037427478770181, 'lambda_l2': 0.8130901726120456, 'num_leaves': 232, 'feature_fraction': 0.7224358819185711, 'bagging_fraction': 0.9520121758601578, 'bagging_freq': 4, 'min_child_samples': 12}. Best is trial 70 with value: 0.5227354154865925.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:27:18,071]\u001b[0m Trial 78 pruned. Trial was pruned at iteration 13.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:27:19,373]\u001b[0m Trial 79 finished with value: 0.5347521802448649 and parameters: {'lambda_l1': 0.017570019491239652, 'lambda_l2': 0.05059260326524094, 'num_leaves': 226, 'feature_fraction': 0.6981808494111595, 'bagging_fraction': 0.9993645575870086, 'bagging_freq': 4, 'min_child_samples': 20}. Best is trial 70 with value: 0.5227354154865925.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:27:19,471]\u001b[0m Trial 80 pruned. Trial was pruned at iteration 10.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:27:21,230]\u001b[0m Trial 81 finished with value: 0.5227732830528738 and parameters: {'lambda_l1': 0.0003058538457597125, 'lambda_l2': 0.04324937485563167, 'num_leaves': 202, 'feature_fraction': 0.73206644303025, 'bagging_fraction': 0.9911170826848787, 'bagging_freq': 5, 'min_child_samples': 10}. Best is trial 70 with value: 0.5227354154865925.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:27:22,266]\u001b[0m Trial 82 finished with value: 0.5279916258627607 and parameters: {'lambda_l1': 0.0002580487853233371, 'lambda_l2': 0.015002378165105253, 'num_leaves': 213, 'feature_fraction': 0.665112654945876, 'bagging_fraction': 0.9341515771267435, 'bagging_freq': 5, 'min_child_samples': 11}. Best is trial 70 with value: 0.5227354154865925.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:27:26,040]\u001b[0m Trial 83 pruned. Trial was pruned at iteration 51.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:27:29,359]\u001b[0m Trial 84 finished with value: 0.5257703648298483 and parameters: {'lambda_l1': 0.09313058634431053, 'lambda_l2': 0.0794176561682967, 'num_leaves': 174, 'feature_fraction': 0.7330255912724464, 'bagging_fraction': 0.9556166881594967, 'bagging_freq': 6, 'min_child_samples': 14}. Best is trial 70 with value: 0.5227354154865925.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:27:29,749]\u001b[0m Trial 85 pruned. Trial was pruned at iteration 30.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:27:29,919]\u001b[0m Trial 86 pruned. Trial was pruned at iteration 17.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:27:30,026]\u001b[0m Trial 87 pruned. Trial was pruned at iteration 10.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:27:30,861]\u001b[0m Trial 88 finished with value: 0.5270306752854106 and parameters: {'lambda_l1': 0.0003495273937948516, 'lambda_l2': 0.057992141622577896, 'num_leaves': 167, 'feature_fraction': 0.7406493057210397, 'bagging_fraction': 0.8812843020741449, 'bagging_freq': 5, 'min_child_samples': 10}. Best is trial 70 with value: 0.5227354154865925.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:27:30,933]\u001b[0m Trial 89 pruned. Trial was pruned at iteration 11.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:27:31,029]\u001b[0m Trial 90 pruned. Trial was pruned at iteration 10.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:27:31,358]\u001b[0m Trial 91 pruned. Trial was pruned at iteration 48.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:27:31,714]\u001b[0m Trial 92 pruned. Trial was pruned at iteration 41.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:27:32,543]\u001b[0m Trial 93 finished with value: 0.5264391883178012 and parameters: {'lambda_l1': 0.055429809241931895, 'lambda_l2': 0.013315511259061849, 'num_leaves': 235, 'feature_fraction': 0.6209901742544794, 'bagging_fraction': 0.9603625243176316, 'bagging_freq': 5, 'min_child_samples': 9}. Best is trial 70 with value: 0.5227354154865925.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:27:32,825]\u001b[0m Trial 94 pruned. Trial was pruned at iteration 40.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:27:32,940]\u001b[0m Trial 95 pruned. Trial was pruned at iteration 13.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:27:33,525]\u001b[0m Trial 96 finished with value: 0.521566218012535 and parameters: {'lambda_l1': 0.001072724146880552, 'lambda_l2': 0.043446953857601264, 'num_leaves': 142, 'feature_fraction': 0.6922532844601723, 'bagging_fraction': 0.9898097804978216, 'bagging_freq': 5, 'min_child_samples': 5}. Best is trial 96 with value: 0.521566218012535.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:27:34,149]\u001b[0m Trial 97 finished with value: 0.5196727190561812 and parameters: {'lambda_l1': 0.0025915189209523223, 'lambda_l2': 0.37941398551753863, 'num_leaves': 159, 'feature_fraction': 0.6942886423659413, 'bagging_fraction': 0.9888819332176355, 'bagging_freq': 6, 'min_child_samples': 5}. Best is trial 97 with value: 0.5196727190561812.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:27:34,855]\u001b[0m Trial 98 finished with value: 0.5265000531725292 and parameters: {'lambda_l1': 0.0006868362572582551, 'lambda_l2': 0.040525833784093586, 'num_leaves': 143, 'feature_fraction': 0.6930600995921747, 'bagging_fraction': 0.9897374239207342, 'bagging_freq': 6, 'min_child_samples': 6}. Best is trial 97 with value: 0.5196727190561812.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:27:34,935]\u001b[0m Trial 99 pruned. Trial was pruned at iteration 10.\u001b[0m\n" + "\u001b[32m[I 2021-09-07 16:28:12,792]\u001b[0m Trial 90 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:13,099]\u001b[0m Trial 91 pruned. Trial was pruned at iteration 13.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:13,456]\u001b[0m Trial 92 pruned. Trial was pruned at iteration 14.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:13,664]\u001b[0m Trial 93 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:14,351]\u001b[0m Trial 94 pruned. Trial was pruned at iteration 45.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:14,508]\u001b[0m Trial 95 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:14,828]\u001b[0m Trial 96 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:14,962]\u001b[0m Trial 97 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:16,197]\u001b[0m Trial 98 finished with value: 0.5171555001746302 and parameters: {'lambda_l1': 1.0103137663726183e-08, 'lambda_l2': 1.7418953701287595e-06, 'num_leaves': 203, 'feature_fraction': 0.7265189263478641, 'bagging_fraction': 0.9317931154547426, 'bagging_freq': 1, 'min_child_samples': 10}. Best is trial 89 with value: 0.5102536883069437.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:16,557]\u001b[0m Trial 99 pruned. Trial was pruned at iteration 16.\u001b[0m\n" ] }, { @@ -526,108 +545,635 @@ "text": [ "Number of finished trials: 100\n", "Best trial:\n", - " Value: 0.5196727190561812\n", + " Value: 0.5102536883069437\n", " Params: \n", - " lambda_l1: 0.0025915189209523223\n", - " lambda_l2: 0.37941398551753863\n", - " num_leaves: 159\n", - " feature_fraction: 0.6942886423659413\n", - " bagging_fraction: 0.9888819332176355\n", - " bagging_freq: 6\n", - " min_child_samples: 5\n" + " lambda_l1: 1.6446541932580473e-07\n", + " lambda_l2: 1.0734412329614426e-07\n", + " num_leaves: 193\n", + " feature_fraction: 0.8123395009389368\n", + " bagging_fraction: 0.9184649865458648\n", + " bagging_freq: 1\n", + " min_child_samples: 10\n", + "[LightGBM] [Warning] Auto-choosing col-wise multi-threading, the overhead of testing was 0.000712 seconds.\n", + "You can set `force_col_wise=true` to remove the overhead.\n", + "[LightGBM] [Info] Total Bins 1837\n", + "[LightGBM] [Info] Number of data points in the train set: 14860, number of used features: 8\n", + "[LightGBM] [Info] Start training from score 2.073247\n", + "Training until validation scores don't improve for 20 rounds\n", + "[50]\tvalid_0's rmse: 0.453164\n", + "[100]\tvalid_0's rmse: 0.442958\n", + "Early stopping, best iteration is:\n", + "[110]\tvalid_0's rmse: 0.442157\n" ] - } - ], - "source": [ - "if __name__ == \"__main__\":\n", - " study = optuna.create_study(\n", - " pruner=optuna.pruners.MedianPruner(n_warmup_steps=10), direction=\"minimize\"\n", - " )\n", - " study.optimize(lambda trial: objective(trial, train_x, valid_x, train_y, valid_y), n_trials=100)\n", - "\n", - " print(\"Number of finished trials: {}\".format(len(study.trials)))\n", - "\n", - " print(\"Best trial:\")\n", - " trial = study.best_trial\n", - "\n", - " print(\" Value: {}\".format(trial.value))\n", - "\n", - " print(\" Params: \")\n", - " for key, value in trial.params.items():\n", - " print(\" {}: {}\".format(key, value))" - ] - }, - { - "cell_type": "code", - "execution_count": 24, - "id": "681d9cc2", - "metadata": {}, - "outputs": [ + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\u001b[32m[I 2021-09-07 16:28:17,677]\u001b[0m Trial 100 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:17,796]\u001b[0m Trial 101 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:17,941]\u001b[0m Trial 102 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:18,090]\u001b[0m Trial 103 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:18,293]\u001b[0m Trial 104 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:18,412]\u001b[0m Trial 105 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:18,524]\u001b[0m Trial 106 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:18,647]\u001b[0m Trial 107 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:18,797]\u001b[0m Trial 108 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:18,911]\u001b[0m Trial 109 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:19,029]\u001b[0m Trial 110 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:19,211]\u001b[0m Trial 111 pruned. Trial was pruned at iteration 16.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:19,595]\u001b[0m Trial 112 pruned. Trial was pruned at iteration 30.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:19,744]\u001b[0m Trial 113 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:19,853]\u001b[0m Trial 114 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:20,146]\u001b[0m Trial 115 pruned. Trial was pruned at iteration 23.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:20,267]\u001b[0m Trial 116 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:20,420]\u001b[0m Trial 117 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:20,594]\u001b[0m Trial 118 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:20,780]\u001b[0m Trial 119 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:20,942]\u001b[0m Trial 120 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:21,073]\u001b[0m Trial 121 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:21,410]\u001b[0m Trial 122 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:21,601]\u001b[0m Trial 123 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:21,799]\u001b[0m Trial 124 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:21,944]\u001b[0m Trial 125 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:22,080]\u001b[0m Trial 126 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:22,209]\u001b[0m Trial 127 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:22,367]\u001b[0m Trial 128 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:22,509]\u001b[0m Trial 129 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:22,673]\u001b[0m Trial 130 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:22,754]\u001b[0m Trial 131 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:22,923]\u001b[0m Trial 132 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:23,074]\u001b[0m Trial 133 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:23,442]\u001b[0m Trial 134 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:23,682]\u001b[0m Trial 135 pruned. Trial was pruned at iteration 16.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:23,824]\u001b[0m Trial 136 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:24,010]\u001b[0m Trial 137 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:24,160]\u001b[0m Trial 138 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:24,458]\u001b[0m Trial 139 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:24,678]\u001b[0m Trial 140 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:24,849]\u001b[0m Trial 141 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:25,081]\u001b[0m Trial 142 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:25,256]\u001b[0m Trial 143 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:25,501]\u001b[0m Trial 144 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:26,208]\u001b[0m Trial 145 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:26,709]\u001b[0m Trial 146 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:26,919]\u001b[0m Trial 147 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:27,534]\u001b[0m Trial 148 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:27,729]\u001b[0m Trial 149 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:28,152]\u001b[0m Trial 150 pruned. Trial was pruned at iteration 24.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:28,278]\u001b[0m Trial 151 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:28,393]\u001b[0m Trial 152 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:28,543]\u001b[0m Trial 153 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:28,653]\u001b[0m Trial 154 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:28,757]\u001b[0m Trial 155 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:28,887]\u001b[0m Trial 156 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:29,001]\u001b[0m Trial 157 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:29,176]\u001b[0m Trial 158 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:29,332]\u001b[0m Trial 159 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:29,541]\u001b[0m Trial 160 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:29,732]\u001b[0m Trial 161 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:29,884]\u001b[0m Trial 162 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:29,997]\u001b[0m Trial 163 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:30,103]\u001b[0m Trial 164 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:30,225]\u001b[0m Trial 165 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:30,330]\u001b[0m Trial 166 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:30,487]\u001b[0m Trial 167 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:30,579]\u001b[0m Trial 168 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:30,685]\u001b[0m Trial 169 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:30,798]\u001b[0m Trial 170 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:30,902]\u001b[0m Trial 171 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:30,999]\u001b[0m Trial 172 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:31,099]\u001b[0m Trial 173 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:31,206]\u001b[0m Trial 174 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:31,352]\u001b[0m Trial 175 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:31,476]\u001b[0m Trial 176 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:31,716]\u001b[0m Trial 177 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:31,834]\u001b[0m Trial 178 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:31,983]\u001b[0m Trial 179 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:32,099]\u001b[0m Trial 180 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:32,272]\u001b[0m Trial 181 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:32,431]\u001b[0m Trial 182 pruned. Trial was pruned at iteration 12.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:32,546]\u001b[0m Trial 183 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:32,656]\u001b[0m Trial 184 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:32,763]\u001b[0m Trial 185 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:32,965]\u001b[0m Trial 186 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:33,183]\u001b[0m Trial 187 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:33,409]\u001b[0m Trial 188 pruned. Trial was pruned at iteration 10.\u001b[0m\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\u001b[32m[I 2021-09-07 16:28:33,571]\u001b[0m Trial 189 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:33,719]\u001b[0m Trial 190 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:33,919]\u001b[0m Trial 191 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:34,112]\u001b[0m Trial 192 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:34,276]\u001b[0m Trial 193 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:34,426]\u001b[0m Trial 194 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:34,633]\u001b[0m Trial 195 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:34,781]\u001b[0m Trial 196 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:35,002]\u001b[0m Trial 197 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:35,242]\u001b[0m Trial 198 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:35,408]\u001b[0m Trial 199 pruned. Trial was pruned at iteration 10.\u001b[0m\n" + ] + }, { "name": "stdout", "output_type": "stream", "text": [ - "{'boosting_type': 'gbdt', 'objective': 'regression', 'metric': 'rmse', 'lambda_l1': 0.0025915189209523223, 'lambda_l2': 0.37941398551753863, 'num_leaves': 159, 'feature_fraction': 0.6942886423659413, 'bagging_fraction': 0.9888819332176355, 'bagging_freq': 6, 'min_child_samples': 5}\n" + "Number of finished trials: 200\n", + "Best trial:\n", + " Value: 0.5102536883069437\n", + " Params: \n", + " lambda_l1: 1.6446541932580473e-07\n", + " lambda_l2: 1.0734412329614426e-07\n", + " num_leaves: 193\n", + " feature_fraction: 0.8123395009389368\n", + " bagging_fraction: 0.9184649865458648\n", + " bagging_freq: 1\n", + " min_child_samples: 10\n", + "[LightGBM] [Warning] Auto-choosing col-wise multi-threading, the overhead of testing was 0.000777 seconds.\n", + "You can set `force_col_wise=true` to remove the overhead.\n", + "[LightGBM] [Info] Total Bins 1838\n", + "[LightGBM] [Info] Number of data points in the train set: 14861, number of used features: 8\n", + "[LightGBM] [Info] Start training from score 2.070396\n", + "Training until validation scores don't improve for 20 rounds\n", + "[50]\tvalid_0's rmse: 0.465875\n", + "Early stopping, best iteration is:\n", + "[76]\tvalid_0's rmse: 0.460117\n" ] - } - ], - "source": [ - "params = {'boosting_type': 'gbdt',\n", - " 'objective': 'regression',\n", - " \"metric\": 'rmse'}\n", - "for key, value in trial.params.items():\n", - " params[key]=value\n", - "print(params)" - ] - }, - { - "cell_type": "code", - "execution_count": 25, - "id": "f5534bab", - "metadata": {}, - "outputs": [ + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\u001b[32m[I 2021-09-07 16:28:36,810]\u001b[0m Trial 200 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:36,973]\u001b[0m Trial 201 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:37,136]\u001b[0m Trial 202 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:37,255]\u001b[0m Trial 203 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:37,402]\u001b[0m Trial 204 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:37,545]\u001b[0m Trial 205 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:37,742]\u001b[0m Trial 206 pruned. Trial was pruned at iteration 11.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:37,864]\u001b[0m Trial 207 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:38,005]\u001b[0m Trial 208 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:38,155]\u001b[0m Trial 209 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:38,307]\u001b[0m Trial 210 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:38,419]\u001b[0m Trial 211 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:38,537]\u001b[0m Trial 212 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:38,637]\u001b[0m Trial 213 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:38,786]\u001b[0m Trial 214 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:38,962]\u001b[0m Trial 215 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:39,091]\u001b[0m Trial 216 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:39,246]\u001b[0m Trial 217 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:39,368]\u001b[0m Trial 218 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:39,505]\u001b[0m Trial 219 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:39,637]\u001b[0m Trial 220 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:39,737]\u001b[0m Trial 221 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:39,847]\u001b[0m Trial 222 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:39,948]\u001b[0m Trial 223 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:40,046]\u001b[0m Trial 224 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:40,138]\u001b[0m Trial 225 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:40,235]\u001b[0m Trial 226 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:40,324]\u001b[0m Trial 227 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:40,398]\u001b[0m Trial 228 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:40,483]\u001b[0m Trial 229 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:40,621]\u001b[0m Trial 230 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:40,779]\u001b[0m Trial 231 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:40,860]\u001b[0m Trial 232 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:40,937]\u001b[0m Trial 233 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:41,077]\u001b[0m Trial 234 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:41,188]\u001b[0m Trial 235 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:41,312]\u001b[0m Trial 236 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:41,431]\u001b[0m Trial 237 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:41,533]\u001b[0m Trial 238 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:41,664]\u001b[0m Trial 239 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:41,790]\u001b[0m Trial 240 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:41,902]\u001b[0m Trial 241 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:42,009]\u001b[0m Trial 242 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:42,110]\u001b[0m Trial 243 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:42,233]\u001b[0m Trial 244 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:42,443]\u001b[0m Trial 245 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:42,587]\u001b[0m Trial 246 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:42,674]\u001b[0m Trial 247 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:42,779]\u001b[0m Trial 248 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:42,859]\u001b[0m Trial 249 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:42,958]\u001b[0m Trial 250 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:43,070]\u001b[0m Trial 251 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:43,257]\u001b[0m Trial 252 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:43,356]\u001b[0m Trial 253 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:43,462]\u001b[0m Trial 254 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:43,566]\u001b[0m Trial 255 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:43,627]\u001b[0m Trial 256 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:43,743]\u001b[0m Trial 257 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:43,856]\u001b[0m Trial 258 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:43,945]\u001b[0m Trial 259 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:44,059]\u001b[0m Trial 260 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:44,164]\u001b[0m Trial 261 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:44,279]\u001b[0m Trial 262 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:44,377]\u001b[0m Trial 263 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:44,478]\u001b[0m Trial 264 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:44,585]\u001b[0m Trial 265 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:44,688]\u001b[0m Trial 266 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:44,794]\u001b[0m Trial 267 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:44,914]\u001b[0m Trial 268 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:45,016]\u001b[0m Trial 269 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:45,111]\u001b[0m Trial 270 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:45,248]\u001b[0m Trial 271 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:45,424]\u001b[0m Trial 272 pruned. Trial was pruned at iteration 11.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:45,578]\u001b[0m Trial 273 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:45,809]\u001b[0m Trial 274 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:45,937]\u001b[0m Trial 275 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:46,068]\u001b[0m Trial 276 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:46,210]\u001b[0m Trial 277 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:46,357]\u001b[0m Trial 278 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:46,505]\u001b[0m Trial 279 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:46,662]\u001b[0m Trial 280 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:46,828]\u001b[0m Trial 281 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:46,927]\u001b[0m Trial 282 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:47,157]\u001b[0m Trial 283 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:47,394]\u001b[0m Trial 284 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:47,564]\u001b[0m Trial 285 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:47,701]\u001b[0m Trial 286 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:47,936]\u001b[0m Trial 287 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:48,091]\u001b[0m Trial 288 pruned. Trial was pruned at iteration 10.\u001b[0m\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\u001b[32m[I 2021-09-07 16:28:48,268]\u001b[0m Trial 289 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:48,470]\u001b[0m Trial 290 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:48,650]\u001b[0m Trial 291 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:48,799]\u001b[0m Trial 292 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:48,953]\u001b[0m Trial 293 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:49,112]\u001b[0m Trial 294 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:49,267]\u001b[0m Trial 295 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:49,454]\u001b[0m Trial 296 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:49,631]\u001b[0m Trial 297 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:49,744]\u001b[0m Trial 298 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:49,933]\u001b[0m Trial 299 pruned. Trial was pruned at iteration 10.\u001b[0m\n" + ] + }, { "name": "stdout", "output_type": "stream", "text": [ - "[LightGBM] [Warning] Auto-choosing row-wise multi-threading, the overhead of testing was 0.000585 seconds.\n", - "You can set `force_row_wise=true` to remove the overhead.\n", - "And if memory is not enough, you can set `force_col_wise=true`.\n", + "Number of finished trials: 300\n", + "Best trial:\n", + " Value: 0.5102536883069437\n", + " Params: \n", + " lambda_l1: 1.6446541932580473e-07\n", + " lambda_l2: 1.0734412329614426e-07\n", + " num_leaves: 193\n", + " feature_fraction: 0.8123395009389368\n", + " bagging_fraction: 0.9184649865458648\n", + " bagging_freq: 1\n", + " min_child_samples: 10\n", + "[LightGBM] [Warning] Auto-choosing col-wise multi-threading, the overhead of testing was 0.000797 seconds.\n", + "You can set `force_col_wise=true` to remove the overhead.\n", "[LightGBM] [Info] Total Bins 1837\n", - "[LightGBM] [Info] Number of data points in the train set: 13932, number of used features: 8\n", - "[LightGBM] [Info] Start training from score 2.072422\n", + "[LightGBM] [Info] Number of data points in the train set: 14861, number of used features: 8\n", + "[LightGBM] [Info] Start training from score 2.069154\n", + "Training until validation scores don't improve for 20 rounds\n", + "[50]\tvalid_0's rmse: 0.451573\n", + "Early stopping, best iteration is:\n", + "[63]\tvalid_0's rmse: 0.44707\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\u001b[32m[I 2021-09-07 16:28:51,004]\u001b[0m Trial 300 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:51,159]\u001b[0m Trial 301 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:51,315]\u001b[0m Trial 302 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:51,412]\u001b[0m Trial 303 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:51,543]\u001b[0m Trial 304 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:51,686]\u001b[0m Trial 305 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:51,842]\u001b[0m Trial 306 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:51,977]\u001b[0m Trial 307 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:52,105]\u001b[0m Trial 308 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:52,202]\u001b[0m Trial 309 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:52,341]\u001b[0m Trial 310 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:52,426]\u001b[0m Trial 311 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:52,522]\u001b[0m Trial 312 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:52,642]\u001b[0m Trial 313 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:52,755]\u001b[0m Trial 314 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:52,880]\u001b[0m Trial 315 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:52,980]\u001b[0m Trial 316 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:53,093]\u001b[0m Trial 317 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:53,202]\u001b[0m Trial 318 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:53,333]\u001b[0m Trial 319 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:53,488]\u001b[0m Trial 320 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:53,634]\u001b[0m Trial 321 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:53,749]\u001b[0m Trial 322 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:53,863]\u001b[0m Trial 323 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:53,983]\u001b[0m Trial 324 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:54,088]\u001b[0m Trial 325 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:54,198]\u001b[0m Trial 326 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:54,321]\u001b[0m Trial 327 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:54,417]\u001b[0m Trial 328 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:54,486]\u001b[0m Trial 329 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:54,630]\u001b[0m Trial 330 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:54,711]\u001b[0m Trial 331 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:54,838]\u001b[0m Trial 332 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:54,968]\u001b[0m Trial 333 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:55,077]\u001b[0m Trial 334 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:55,179]\u001b[0m Trial 335 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:55,319]\u001b[0m Trial 336 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:55,518]\u001b[0m Trial 337 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:55,609]\u001b[0m Trial 338 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:55,741]\u001b[0m Trial 339 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:55,903]\u001b[0m Trial 340 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:56,024]\u001b[0m Trial 341 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:56,140]\u001b[0m Trial 342 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:56,425]\u001b[0m Trial 343 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:56,577]\u001b[0m Trial 344 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:56,697]\u001b[0m Trial 345 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:56,814]\u001b[0m Trial 346 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:56,931]\u001b[0m Trial 347 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:57,047]\u001b[0m Trial 348 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:57,165]\u001b[0m Trial 349 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:57,318]\u001b[0m Trial 350 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:57,470]\u001b[0m Trial 351 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:57,640]\u001b[0m Trial 352 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:57,826]\u001b[0m Trial 353 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:57,991]\u001b[0m Trial 354 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:58,135]\u001b[0m Trial 355 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:58,300]\u001b[0m Trial 356 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:58,479]\u001b[0m Trial 357 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:58,638]\u001b[0m Trial 358 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:58,813]\u001b[0m Trial 359 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:59,027]\u001b[0m Trial 360 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:59,183]\u001b[0m Trial 361 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:59,346]\u001b[0m Trial 362 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:59,510]\u001b[0m Trial 363 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:59,627]\u001b[0m Trial 364 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:59,792]\u001b[0m Trial 365 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:59,954]\u001b[0m Trial 366 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:00,188]\u001b[0m Trial 367 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:00,393]\u001b[0m Trial 368 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:00,557]\u001b[0m Trial 369 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:00,707]\u001b[0m Trial 370 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:00,836]\u001b[0m Trial 371 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:01,018]\u001b[0m Trial 372 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:01,171]\u001b[0m Trial 373 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:01,355]\u001b[0m Trial 374 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:01,634]\u001b[0m Trial 375 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:01,803]\u001b[0m Trial 376 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:01,949]\u001b[0m Trial 377 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:02,110]\u001b[0m Trial 378 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:02,225]\u001b[0m Trial 379 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:02,409]\u001b[0m Trial 380 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:02,574]\u001b[0m Trial 381 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:02,733]\u001b[0m Trial 382 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:02,895]\u001b[0m Trial 383 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:03,059]\u001b[0m Trial 384 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:03,184]\u001b[0m Trial 385 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:03,408]\u001b[0m Trial 386 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:03,583]\u001b[0m Trial 387 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:03,729]\u001b[0m Trial 388 pruned. Trial was pruned at iteration 10.\u001b[0m\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\u001b[32m[I 2021-09-07 16:29:03,924]\u001b[0m Trial 389 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:04,120]\u001b[0m Trial 390 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:04,292]\u001b[0m Trial 391 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:04,406]\u001b[0m Trial 392 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:04,514]\u001b[0m Trial 393 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:04,671]\u001b[0m Trial 394 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:04,760]\u001b[0m Trial 395 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:04,879]\u001b[0m Trial 396 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:05,049]\u001b[0m Trial 397 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:05,188]\u001b[0m Trial 398 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:05,277]\u001b[0m Trial 399 pruned. Trial was pruned at iteration 10.\u001b[0m\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Number of finished trials: 400\n", + "Best trial:\n", + " Value: 0.5102536883069437\n", + " Params: \n", + " lambda_l1: 1.6446541932580473e-07\n", + " lambda_l2: 1.0734412329614426e-07\n", + " num_leaves: 193\n", + " feature_fraction: 0.8123395009389368\n", + " bagging_fraction: 0.9184649865458648\n", + " bagging_freq: 1\n", + " min_child_samples: 10\n", + "[LightGBM] [Warning] Auto-choosing col-wise multi-threading, the overhead of testing was 0.000424 seconds.\n", + "You can set `force_col_wise=true` to remove the overhead.\n", + "[LightGBM] [Info] Total Bins 1838\n", + "[LightGBM] [Info] Number of data points in the train set: 14861, number of used features: 8\n", + "[LightGBM] [Info] Start training from score 2.063366\n", + "Training until validation scores don't improve for 20 rounds\n", + "[50]\tvalid_0's rmse: 0.458105\n", + "[100]\tvalid_0's rmse: 0.449598\n", + "[150]\tvalid_0's rmse: 0.449092\n", + "Early stopping, best iteration is:\n", + "[138]\tvalid_0's rmse: 0.448682\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\u001b[32m[I 2021-09-07 16:29:06,435]\u001b[0m Trial 400 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:06,586]\u001b[0m Trial 401 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:06,756]\u001b[0m Trial 402 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:06,867]\u001b[0m Trial 403 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:06,968]\u001b[0m Trial 404 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:07,051]\u001b[0m Trial 405 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:07,151]\u001b[0m Trial 406 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:07,290]\u001b[0m Trial 407 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:07,400]\u001b[0m Trial 408 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:07,530]\u001b[0m Trial 409 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:07,649]\u001b[0m Trial 410 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:07,752]\u001b[0m Trial 411 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:07,880]\u001b[0m Trial 412 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:08,002]\u001b[0m Trial 413 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:08,140]\u001b[0m Trial 414 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:08,264]\u001b[0m Trial 415 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:08,434]\u001b[0m Trial 416 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:08,553]\u001b[0m Trial 417 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:08,677]\u001b[0m Trial 418 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:08,794]\u001b[0m Trial 419 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:08,902]\u001b[0m Trial 420 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:09,060]\u001b[0m Trial 421 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:09,181]\u001b[0m Trial 422 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:09,279]\u001b[0m Trial 423 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:09,517]\u001b[0m Trial 424 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:09,691]\u001b[0m Trial 425 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:09,858]\u001b[0m Trial 426 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:10,005]\u001b[0m Trial 427 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:10,197]\u001b[0m Trial 428 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:10,377]\u001b[0m Trial 429 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:10,567]\u001b[0m Trial 430 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:10,749]\u001b[0m Trial 431 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:10,908]\u001b[0m Trial 432 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:11,012]\u001b[0m Trial 433 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:11,264]\u001b[0m Trial 434 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:11,460]\u001b[0m Trial 435 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:11,636]\u001b[0m Trial 436 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:11,805]\u001b[0m Trial 437 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:11,985]\u001b[0m Trial 438 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:12,116]\u001b[0m Trial 439 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:12,257]\u001b[0m Trial 440 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:12,455]\u001b[0m Trial 441 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:12,626]\u001b[0m Trial 442 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:12,785]\u001b[0m Trial 443 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:12,916]\u001b[0m Trial 444 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:13,059]\u001b[0m Trial 445 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:13,202]\u001b[0m Trial 446 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:13,402]\u001b[0m Trial 447 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:13,571]\u001b[0m Trial 448 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:13,751]\u001b[0m Trial 449 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:13,967]\u001b[0m Trial 450 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:14,171]\u001b[0m Trial 451 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:14,354]\u001b[0m Trial 452 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:14,580]\u001b[0m Trial 453 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:14,763]\u001b[0m Trial 454 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:14,944]\u001b[0m Trial 455 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:15,124]\u001b[0m Trial 456 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:15,326]\u001b[0m Trial 457 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:15,508]\u001b[0m Trial 458 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:15,707]\u001b[0m Trial 459 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:15,875]\u001b[0m Trial 460 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:16,043]\u001b[0m Trial 461 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:16,205]\u001b[0m Trial 462 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:16,355]\u001b[0m Trial 463 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:16,499]\u001b[0m Trial 464 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:16,601]\u001b[0m Trial 465 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:16,757]\u001b[0m Trial 466 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:16,899]\u001b[0m Trial 467 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:17,040]\u001b[0m Trial 468 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:17,256]\u001b[0m Trial 469 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:17,440]\u001b[0m Trial 470 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:17,613]\u001b[0m Trial 471 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:17,735]\u001b[0m Trial 472 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:17,851]\u001b[0m Trial 473 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:17,940]\u001b[0m Trial 474 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:18,015]\u001b[0m Trial 475 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:18,140]\u001b[0m Trial 476 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:18,318]\u001b[0m Trial 477 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:18,485]\u001b[0m Trial 478 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:18,594]\u001b[0m Trial 479 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:18,767]\u001b[0m Trial 480 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:18,864]\u001b[0m Trial 481 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:19,000]\u001b[0m Trial 482 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:19,141]\u001b[0m Trial 483 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:19,241]\u001b[0m Trial 484 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:19,362]\u001b[0m Trial 485 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:19,512]\u001b[0m Trial 486 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:19,651]\u001b[0m Trial 487 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:19,777]\u001b[0m Trial 488 pruned. Trial was pruned at iteration 10.\u001b[0m\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\u001b[32m[I 2021-09-07 16:29:19,892]\u001b[0m Trial 489 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:20,026]\u001b[0m Trial 490 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:20,152]\u001b[0m Trial 491 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:20,291]\u001b[0m Trial 492 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:20,385]\u001b[0m Trial 493 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:20,508]\u001b[0m Trial 494 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:20,644]\u001b[0m Trial 495 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:20,766]\u001b[0m Trial 496 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:20,912]\u001b[0m Trial 497 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:21,093]\u001b[0m Trial 498 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:21,228]\u001b[0m Trial 499 pruned. Trial was pruned at iteration 10.\u001b[0m\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Number of finished trials: 500\n", + "Best trial:\n", + " Value: 0.5102536883069437\n", + " Params: \n", + " lambda_l1: 1.6446541932580473e-07\n", + " lambda_l2: 1.0734412329614426e-07\n", + " num_leaves: 193\n", + " feature_fraction: 0.8123395009389368\n", + " bagging_fraction: 0.9184649865458648\n", + " bagging_freq: 1\n", + " min_child_samples: 10\n", + "[LightGBM] [Warning] Auto-choosing col-wise multi-threading, the overhead of testing was 0.001110 seconds.\n", + "You can set `force_col_wise=true` to remove the overhead.\n", + "[LightGBM] [Info] Total Bins 1838\n", + "[LightGBM] [Info] Number of data points in the train set: 14861, number of used features: 8\n", + "[LightGBM] [Info] Start training from score 2.066924\n", "Training until validation scores don't improve for 20 rounds\n", - "[50]\tvalid_0's rmse: 0.459286\n", - "[100]\tvalid_0's rmse: 0.449051\n", - "[150]\tvalid_0's rmse: 0.448356\n", + "[50]\tvalid_0's rmse: 0.444941\n", + "[100]\tvalid_0's rmse: 0.437147\n", "Early stopping, best iteration is:\n", - "[166]\tvalid_0's rmse: 0.447872\n" + "[90]\tvalid_0's rmse: 0.43678\n", + "Wall time: 1min 46s\n" ] } ], "source": [ - "dtrain = lgb.Dataset(train_x, label=train_y)\n", - "dvalid = lgb.Dataset(valid_x, label=valid_y)\n", - "model = lgb.train(params=params, train_set=dtrain,valid_sets=[dvalid],\n", - " verbose_eval=50,\n", - " early_stopping_rounds=20,\n", - " num_boost_round=5000)\n", - "predict = model.predict(test_x)" + "%%time\n", + "if __name__ == \"__main__\":\n", + " test_predict = np.zeros(shape=[test_x.shape[0], 5],dtype=float)\n", + " study = optuna.create_study(\n", + " pruner=optuna.pruners.MedianPruner(n_warmup_steps=10), direction=\"minimize\" # 指定是越小越好\n", + " )\n", + " for i, (trn_idx, val_idx) in enumerate(skf.split(train_x, train_y)):\n", + " study.optimize(lambda trial: objective(trial, train_x.iloc[trn_idx], train_y.iloc[trn_idx], \n", + " train_x.iloc[val_idx], train_y.iloc[val_idx]), n_trials=100)\n", + "\n", + " print(\"Number of finished trials: {}\".format(len(study.trials)))\n", + "\n", + " print(\"Best trial:\")\n", + " trial = study.best_trial\n", + "\n", + " print(\" Value: {}\".format(trial.value))\n", + "\n", + " print(\" Params: \")\n", + " for key, value in trial.params.items():\n", + " print(\" {}: {}\".format(key, value))\n", + " \n", + " params = {'boosting_type': 'gbdt',\n", + " 'objective': 'regression',\n", + " \"metric\": 'rmse'}\n", + " for key, value in trial.params.items():\n", + " params[key]=value\n", + " \n", + " dtrain = lgb.Dataset(train_x.iloc[trn_idx], label=train_y.iloc[trn_idx])\n", + " dvalid = lgb.Dataset(train_x.iloc[val_idx], label=train_y.iloc[val_idx])\n", + " model = lgb.train(params=params, train_set=dtrain,valid_sets=[dvalid],\n", + " verbose_eval=50,\n", + " early_stopping_rounds=20,\n", + " num_boost_round=5000)\n", + " test_predict[:,i] = model.predict(test_x)\n", + " predict = np.mean(test_predict,axis=1)" ] }, { "cell_type": "code", - "execution_count": 26, + "execution_count": 13, "id": "f28d82da", "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "0.44403838770137805" + "0.432341765333029" ] }, - "execution_count": 26, + "execution_count": 13, "metadata": {}, "output_type": "execute_result" } @@ -639,17 +1185,17 @@ }, { "cell_type": "markdown", - "id": "5e292bdb", + "id": "0c7ee3c5", "metadata": {}, "source": [ "### 回归任务的结论\n", - "不使用optuna的分数是0.4531666044672748,使用的分数是0.44403838770137805,提升了0.00912821676589675。" + "不使用optuna的分数是0.4346521330333544,使用的分数是0.432341765333029,提升了0.0023103677003254。作者测了很多次,基本再0.003-0.002之间,感兴趣的可以多跑几次。" ] }, { "cell_type": "code", "execution_count": null, - "id": "36384535", + "id": "82ea6cab", "metadata": {}, "outputs": [], "source": [] diff --git a/竞赛优胜技巧/AutoAI Tools.ipynb b/竞赛优胜技巧/AutoAI Tools.ipynb index 3744e19..7388f26 100644 --- a/竞赛优胜技巧/AutoAI Tools.ipynb +++ b/竞赛优胜技巧/AutoAI Tools.ipynb @@ -28,7 +28,7 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": 1, "id": "f5e0e977", "metadata": {}, "outputs": [], @@ -181,85 +181,160 @@ "name": "stdout", "output_type": "stream", "text": [ - "训练集: (13932, 8)\n", - "验证集: (4644, 8)\n", + "训练集: (18576, 8)\n", "测试集: (2064, 8)\n" ] } ], "source": [ "# 切分训练和测试集\n", - "train_valid_x, test_x, train_valid_y, test_y = train_test_split(X, y,random_state=42,test_size=0.1)\n", + "train_x, test_x, train_y, test_y = train_test_split(X, y,random_state=42,test_size=0.1)\n", + "train_y = pd.DataFrame(train_y,columns=['result'])\n", + "test_y = pd.DataFrame(test_y,columns=['result'])\n", "\n", - "# 切分训练和验证集\n", - "train_x, valid_x, train_y, valid_y = train_test_split(train_valid_x, train_valid_y,random_state=42)\n", "print('训练集:',train_x.shape)\n", - "print('验证集:',valid_x.shape)\n", "print('测试集:', test_x.shape)" ] }, + { + "cell_type": "code", + "execution_count": 6, + "id": "69c4867e", + "metadata": {}, + "outputs": [], + "source": [ + "from sklearn.model_selection import KFold\n", + "skf = KFold(n_splits=5, shuffle=True, random_state=42)" + ] + }, { "cell_type": "markdown", "id": "e87a2121", "metadata": {}, "source": [ - "### 使用LGB作为模型,不使用optuna调参" + "### 使用LGB作为模型并CV,不使用optuna调参" ] }, { "cell_type": "code", - "execution_count": 12, + "execution_count": 7, "id": "5cad8967", - "metadata": {}, + "metadata": { + "scrolled": true + }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "[LightGBM] [Warning] Auto-choosing col-wise multi-threading, the overhead of testing was 0.001822 seconds.\n", + "[LightGBM] [Warning] Auto-choosing col-wise multi-threading, the overhead of testing was 0.000866 seconds.\n", "You can set `force_col_wise=true` to remove the overhead.\n", "[LightGBM] [Info] Total Bins 1837\n", - "[LightGBM] [Info] Number of data points in the train set: 13932, number of used features: 8\n", - "[LightGBM] [Info] Start training from score 2.072422\n", + "[LightGBM] [Info] Number of data points in the train set: 14860, number of used features: 8\n", + "[LightGBM] [Info] Start training from score 2.073247\n", "Training until validation scores don't improve for 20 rounds\n", "[50]\tvalid_0's rmse: 0.492877\n", - "[100]\tvalid_0's rmse: 0.471803\n", - "[150]\tvalid_0's rmse: 0.46445\n", - "[200]\tvalid_0's rmse: 0.459716\n", - "[250]\tvalid_0's rmse: 0.456658\n", - "[300]\tvalid_0's rmse: 0.454223\n", - "[350]\tvalid_0's rmse: 0.452644\n", + "[100]\tvalid_0's rmse: 0.469235\n", + "[150]\tvalid_0's rmse: 0.460357\n", + "[200]\tvalid_0's rmse: 0.455302\n", + "[250]\tvalid_0's rmse: 0.452674\n", + "[300]\tvalid_0's rmse: 0.451179\n", + "Early stopping, best iteration is:\n", + "[313]\tvalid_0's rmse: 0.450303\n", + "[LightGBM] [Warning] Auto-choosing col-wise multi-threading, the overhead of testing was 0.000494 seconds.\n", + "You can set `force_col_wise=true` to remove the overhead.\n", + "[LightGBM] [Info] Total Bins 1838\n", + "[LightGBM] [Info] Number of data points in the train set: 14861, number of used features: 8\n", + "[LightGBM] [Info] Start training from score 2.070396\n", + "Training until validation scores don't improve for 20 rounds\n", + "[50]\tvalid_0's rmse: 0.496484\n", + "[100]\tvalid_0's rmse: 0.476225\n", + "[150]\tvalid_0's rmse: 0.469582\n", + "[200]\tvalid_0's rmse: 0.464643\n", + "[250]\tvalid_0's rmse: 0.463244\n", + "[300]\tvalid_0's rmse: 0.461886\n", + "[350]\tvalid_0's rmse: 0.460305\n", + "[400]\tvalid_0's rmse: 0.4596\n", + "[450]\tvalid_0's rmse: 0.458645\n", + "Early stopping, best iteration is:\n", + "[435]\tvalid_0's rmse: 0.458368\n", + "[LightGBM] [Warning] Auto-choosing col-wise multi-threading, the overhead of testing was 0.000800 seconds.\n", + "You can set `force_col_wise=true` to remove the overhead.\n", + "[LightGBM] [Info] Total Bins 1837\n", + "[LightGBM] [Info] Number of data points in the train set: 14861, number of used features: 8\n", + "[LightGBM] [Info] Start training from score 2.069154\n", + "Training until validation scores don't improve for 20 rounds\n", + "[50]\tvalid_0's rmse: 0.48676\n", + "[100]\tvalid_0's rmse: 0.466911\n", + "[150]\tvalid_0's rmse: 0.459693\n", + "[200]\tvalid_0's rmse: 0.457631\n", + "[250]\tvalid_0's rmse: 0.452574\n", "Early stopping, best iteration is:\n", - "[342]\tvalid_0's rmse: 0.452522\n" + "[264]\tvalid_0's rmse: 0.451947\n", + "[LightGBM] [Warning] Auto-choosing col-wise multi-threading, the overhead of testing was 0.000779 seconds.\n", + "You can set `force_col_wise=true` to remove the overhead.\n", + "[LightGBM] [Info] Total Bins 1838\n", + "[LightGBM] [Info] Number of data points in the train set: 14861, number of used features: 8\n", + "[LightGBM] [Info] Start training from score 2.063366\n", + "Training until validation scores don't improve for 20 rounds\n", + "[50]\tvalid_0's rmse: 0.492482\n", + "[100]\tvalid_0's rmse: 0.468929\n", + "[150]\tvalid_0's rmse: 0.459713\n", + "[200]\tvalid_0's rmse: 0.456203\n", + "[250]\tvalid_0's rmse: 0.454063\n", + "[300]\tvalid_0's rmse: 0.451901\n", + "Early stopping, best iteration is:\n", + "[322]\tvalid_0's rmse: 0.450961\n", + "[LightGBM] [Warning] Auto-choosing col-wise multi-threading, the overhead of testing was 0.000694 seconds.\n", + "You can set `force_col_wise=true` to remove the overhead.\n", + "[LightGBM] [Info] Total Bins 1838\n", + "[LightGBM] [Info] Number of data points in the train set: 14861, number of used features: 8\n", + "[LightGBM] [Info] Start training from score 2.066924\n", + "Training until validation scores don't improve for 20 rounds\n", + "[50]\tvalid_0's rmse: 0.488385\n", + "[100]\tvalid_0's rmse: 0.464851\n", + "[150]\tvalid_0's rmse: 0.455183\n", + "[200]\tvalid_0's rmse: 0.450506\n", + "[250]\tvalid_0's rmse: 0.4461\n", + "[300]\tvalid_0's rmse: 0.44438\n", + "[350]\tvalid_0's rmse: 0.441808\n", + "[400]\tvalid_0's rmse: 0.44079\n", + "Early stopping, best iteration is:\n", + "[411]\tvalid_0's rmse: 0.440543\n", + "Wall time: 3.44 s\n" ] } ], "source": [ + "%%time\n", + "test_predict = np.zeros(shape=[test_x.shape[0], 5],dtype=float)\n", "params = {'boosting_type': 'gbdt',\n", " 'objective': 'regression',\n", " \"metric\": 'rmse'}\n", - "dtrain = lgb.Dataset(train_x, label=train_y)\n", - "dvalid = lgb.Dataset(valid_x, label=valid_y)\n", - "model = lgb.train(params=params, train_set=dtrain,valid_sets=[dvalid],\n", - " verbose_eval=50,\n", - " early_stopping_rounds=20,\n", - " num_boost_round=5000)\n", - "predict = model.predict(test_x)" + "for i, (trn_idx, val_idx) in enumerate(skf.split(train_x, train_y)):\n", + " dtrain = lgb.Dataset(train_x.iloc[trn_idx], label=train_y.iloc[trn_idx])\n", + " dvalid = lgb.Dataset(train_x.iloc[val_idx], label=train_y.iloc[val_idx])\n", + " model = lgb.train(params=params, train_set=dtrain,valid_sets=[dvalid],\n", + " verbose_eval=50,\n", + " early_stopping_rounds=20,\n", + " num_boost_round=5000)\n", + " test_predict[:,i] = model.predict(test_x)\n", + "predict = np.mean(test_predict,axis=1)" ] }, { "cell_type": "code", - "execution_count": 14, + "execution_count": 8, "id": "886bfdad", "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "0.4531666044672748" + "0.4346521330333544" ] }, - "execution_count": 14, + "execution_count": 8, "metadata": {}, "output_type": "execute_result" } @@ -274,80 +349,24 @@ "id": "c271442c", "metadata": {}, "source": [ - "### 使用LGB作为模型,使用optuna调参" + "### 使用LGB作为模型并CV,使用optuna调参" ] }, { "cell_type": "code", - "execution_count": 41, + "execution_count": 9, "id": "9a87f9db", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Looking in indexes: https://pypi.tuna.tsinghua.edu.cn/simple\n", - "Collecting optuna\n", - " Downloading https://pypi.tuna.tsinghua.edu.cn/packages/2a/b4/061c8721b5da1614794d1b66fcb212eee156efd5284f66854d02f295b0be/optuna-2.9.1-py3-none-any.whl (302 kB)\n", - "Requirement already satisfied: tqdm in d:\\programdata\\anaconda3\\lib\\site-packages (from optuna) (4.59.0)\n", - "Collecting cmaes>=0.8.2\n", - " Downloading https://pypi.tuna.tsinghua.edu.cn/packages/01/1f/43b01223a0366171f474320c6e966c39a11587287f098a5f09809b45e05f/cmaes-0.8.2-py3-none-any.whl (15 kB)\n", - "Requirement already satisfied: scipy!=1.4.0 in d:\\programdata\\anaconda3\\lib\\site-packages (from optuna) (1.4.1)\n", - "Collecting cliff\n", - " Downloading https://pypi.tuna.tsinghua.edu.cn/packages/18/f7/2a98b032a43b2925ea32bc13a8feb6cf9416e7d2b2c0f6d2ce14636a03b1/cliff-3.9.0-py3-none-any.whl (80 kB)\n", - "Collecting alembic\n", - " Downloading https://pypi.tuna.tsinghua.edu.cn/packages/7a/5a/be479a2c379e6b3c57dc56ea3b139ad4d46c2d244a0035ac4d7475116076/alembic-1.7.1-py3-none-any.whl (208 kB)\n", - "Requirement already satisfied: packaging>=20.0 in d:\\programdata\\anaconda3\\lib\\site-packages (from optuna) (20.9)\n", - "Requirement already satisfied: numpy in d:\\programdata\\anaconda3\\lib\\site-packages (from optuna) (1.18.5)\n", - "Collecting colorlog\n", - " Downloading https://pypi.tuna.tsinghua.edu.cn/packages/2d/93/4b0bb101e54206e92feb3c986c274902212b2ed8c55423e6e7f6d8b693ca/colorlog-6.4.1-py2.py3-none-any.whl (11 kB)\n", - "Requirement already satisfied: PyYAML in d:\\programdata\\anaconda3\\lib\\site-packages (from optuna) (5.4.1)\n", - "Requirement already satisfied: sqlalchemy>=1.1.0 in d:\\programdata\\anaconda3\\lib\\site-packages (from optuna) (1.4.7)\n", - "Requirement already satisfied: pyparsing>=2.0.2 in d:\\programdata\\anaconda3\\lib\\site-packages (from packaging>=20.0->optuna) (2.4.7)\n", - "Requirement already satisfied: greenlet!=0.4.17 in d:\\programdata\\anaconda3\\lib\\site-packages (from sqlalchemy>=1.1.0->optuna) (1.0.0)\n", - "Collecting importlib-resources\n", - " Downloading https://pypi.tuna.tsinghua.edu.cn/packages/f2/6c/2f3b930513bb971172ffceb63cf4e910944e57451724e69b1dec97cfefa6/importlib_resources-5.2.2-py3-none-any.whl (27 kB)\n", - "Collecting Mako\n", - " Downloading https://pypi.tuna.tsinghua.edu.cn/packages/75/69/c3ab0db9234fa5681a85a1c55203763a62902d56ad76b6d9b9bfa2c83694/Mako-1.1.5-py2.py3-none-any.whl (75 kB)\n", - "Collecting PrettyTable>=0.7.2\n", - " Downloading https://pypi.tuna.tsinghua.edu.cn/packages/bd/b5/c09f8d237e060a9e7b5d2d1577c2a6bc49fa298a7b4aefd52146f2b9a62e/prettytable-2.2.0-py3-none-any.whl (23 kB)\n", - "Collecting pbr!=2.1.0,>=2.0.0\n", - " Downloading https://pypi.tuna.tsinghua.edu.cn/packages/18/e0/1d4702dd81121d04a477c272d47ee5b6bc970d1a0990b11befa275c55cf2/pbr-5.6.0-py2.py3-none-any.whl (111 kB)\n", - "Collecting cmd2>=1.0.0\n", - " Downloading https://pypi.tuna.tsinghua.edu.cn/packages/44/ca/d407811641ec1d8bd8a38ee3165d73aa44776d7700436bd4d4a6606f2736/cmd2-2.1.2-py3-none-any.whl (141 kB)\n", - "Collecting stevedore>=2.0.1\n", - " Downloading https://pypi.tuna.tsinghua.edu.cn/packages/b2/c5/036a9a6e220ea7406a36130e80cca33a3e6b98b5328cfdba4b46b2ed0786/stevedore-3.4.0-py3-none-any.whl (49 kB)\n", - "Collecting autopage>=0.4.0\n", - " Downloading https://pypi.tuna.tsinghua.edu.cn/packages/18/a7/901e943318925f8ca3f7963616660065b3cf4e143b0327f88076ba5c4e22/autopage-0.4.0-py3-none-any.whl (20 kB)\n", - "Requirement already satisfied: colorama>=0.3.7 in d:\\programdata\\anaconda3\\lib\\site-packages (from cmd2>=1.0.0->cliff->optuna) (0.4.4)\n", - "Collecting pyreadline3\n", - " Downloading https://pypi.tuna.tsinghua.edu.cn/packages/89/29/10fbb29d957dbcee77a0832eabb9953da80d6bb9514f7ca1b3d82f50219f/pyreadline3-3.3-py3-none-any.whl (95 kB)\n", - "Requirement already satisfied: attrs>=16.3.0 in d:\\programdata\\anaconda3\\lib\\site-packages (from cmd2>=1.0.0->cliff->optuna) (20.3.0)\n", - "Collecting pyperclip>=1.6\n", - " Downloading https://pypi.tuna.tsinghua.edu.cn/packages/a7/2c/4c64579f847bd5d539803c8b909e54ba087a79d01bb3aba433a95879a6c5/pyperclip-1.8.2.tar.gz (20 kB)\n", - "Requirement already satisfied: wcwidth>=0.1.7 in d:\\programdata\\anaconda3\\lib\\site-packages (from cmd2>=1.0.0->cliff->optuna) (0.2.5)\n", - "Requirement already satisfied: zipp>=3.1.0 in d:\\programdata\\anaconda3\\lib\\site-packages (from importlib-resources->alembic->optuna) (3.4.1)\n", - "Requirement already satisfied: MarkupSafe>=0.9.2 in d:\\programdata\\anaconda3\\lib\\site-packages (from Mako->alembic->optuna) (1.1.1)\n", - "Building wheels for collected packages: pyperclip\n", - " Building wheel for pyperclip (setup.py): started\n", - " Building wheel for pyperclip (setup.py): finished with status 'done'\n", - " Created wheel for pyperclip: filename=pyperclip-1.8.2-py3-none-any.whl size=11107 sha256=96b5a96e64d8d6ae264ae796623c0148d1ca71677462878fc1f07c74e1e794ff\n", - " Stored in directory: c:\\users\\administrator\\appdata\\local\\pip\\cache\\wheels\\30\\c0\\21\\bc13df81c8b032076577671a8ef05db4e168a335e07e64d9a7\n", - "Successfully built pyperclip\n", - "Installing collected packages: pyreadline3, pyperclip, pbr, stevedore, PrettyTable, Mako, importlib-resources, cmd2, autopage, colorlog, cmaes, cliff, alembic, optuna\n", - "Successfully installed Mako-1.1.5 PrettyTable-2.2.0 alembic-1.7.1 autopage-0.4.0 cliff-3.9.0 cmaes-0.8.2 cmd2-2.1.2 colorlog-6.4.1 importlib-resources-5.2.2 optuna-2.9.1 pbr-5.6.0 pyperclip-1.8.2 pyreadline3-3.3 stevedore-3.4.0\n", - "Note: you may need to restart the kernel to use updated packages.\n" - ] - } - ], + "metadata": { + "scrolled": true + }, + "outputs": [], "source": [ - "pip install optuna" + "# pip install optuna" ] }, { "cell_type": "code", - "execution_count": 15, + "execution_count": 10, "id": "d8a0279a", "metadata": {}, "outputs": [], @@ -357,12 +376,12 @@ }, { "cell_type": "code", - "execution_count": 18, + "execution_count": 11, "id": "7433fdcb", "metadata": {}, "outputs": [], "source": [ - "def objective(trial,train_x, valid_x, train_y, valid_y):\n", + "def objective(trial,train_x, train_y, valid_x, valid_y):\n", " dtrain = lgb.Dataset(train_x, label=train_y)\n", " dvalid = lgb.Dataset(valid_x, label=valid_y)\n", "\n", @@ -395,7 +414,7 @@ }, { "cell_type": "code", - "execution_count": 19, + "execution_count": 12, "id": "b9018adb", "metadata": { "scrolled": true @@ -405,119 +424,119 @@ "name": "stderr", "output_type": "stream", "text": [ - "\u001b[32m[I 2021-09-07 09:26:30,952]\u001b[0m A new study created in memory with name: no-name-f2147511-069d-495f-90ec-5990dc3c3716\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:26:31,284]\u001b[0m Trial 0 finished with value: 0.5579165649228477 and parameters: {'lambda_l1': 1.6814939560853405e-06, 'lambda_l2': 8.772634980486007, 'num_leaves': 79, 'feature_fraction': 0.4415746779386105, 'bagging_fraction': 0.5306839081914155, 'bagging_freq': 5, 'min_child_samples': 13}. Best is trial 0 with value: 0.5579165649228477.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:26:31,523]\u001b[0m Trial 1 finished with value: 0.5577111827415846 and parameters: {'lambda_l1': 0.00013684423280171766, 'lambda_l2': 0.010116712675880523, 'num_leaves': 42, 'feature_fraction': 0.4758659908396936, 'bagging_fraction': 0.8916447942940564, 'bagging_freq': 7, 'min_child_samples': 32}. Best is trial 1 with value: 0.5577111827415846.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:26:31,986]\u001b[0m Trial 2 finished with value: 0.5382832732103667 and parameters: {'lambda_l1': 1.849141022226266, 'lambda_l2': 9.297088403542385e-06, 'num_leaves': 94, 'feature_fraction': 0.6894755218120849, 'bagging_fraction': 0.9503182129837274, 'bagging_freq': 2, 'min_child_samples': 98}. Best is trial 2 with value: 0.5382832732103667.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:26:32,156]\u001b[0m Trial 3 finished with value: 0.5786765226592324 and parameters: {'lambda_l1': 4.448151837364203e-05, 'lambda_l2': 1.8955982808109254e-08, 'num_leaves': 19, 'feature_fraction': 0.493099451522876, 'bagging_fraction': 0.6742062281671596, 'bagging_freq': 5, 'min_child_samples': 35}. Best is trial 2 with value: 0.5382832732103667.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:26:32,266]\u001b[0m Trial 4 finished with value: 0.6139367553603606 and parameters: {'lambda_l1': 0.0002791229907486381, 'lambda_l2': 5.269713608638115, 'num_leaves': 5, 'feature_fraction': 0.8303631732661438, 'bagging_fraction': 0.942024874372832, 'bagging_freq': 4, 'min_child_samples': 61}. Best is trial 2 with value: 0.5382832732103667.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:26:32,826]\u001b[0m Trial 5 finished with value: 0.5453617455111696 and parameters: {'lambda_l1': 0.06860061560766911, 'lambda_l2': 0.2723830502667369, 'num_leaves': 98, 'feature_fraction': 0.7105587363977491, 'bagging_fraction': 0.5899933873219961, 'bagging_freq': 4, 'min_child_samples': 71}. Best is trial 2 with value: 0.5382832732103667.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:26:32,880]\u001b[0m Trial 6 pruned. Trial was pruned at iteration 10.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:26:33,189]\u001b[0m Trial 7 finished with value: 0.5588682864793495 and parameters: {'lambda_l1': 0.4689515611654304, 'lambda_l2': 1.5494838308859912e-07, 'num_leaves': 65, 'feature_fraction': 0.5583496504073044, 'bagging_fraction': 0.6569918432147945, 'bagging_freq': 5, 'min_child_samples': 58}. Best is trial 2 with value: 0.5382832732103667.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:26:33,462]\u001b[0m Trial 8 finished with value: 0.5447098867148504 and parameters: {'lambda_l1': 0.0010101561223389774, 'lambda_l2': 0.01515390989225731, 'num_leaves': 47, 'feature_fraction': 0.6245633996226619, 'bagging_fraction': 0.419624870667929, 'bagging_freq': 3, 'min_child_samples': 48}. Best is trial 2 with value: 0.5382832732103667.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:26:33,693]\u001b[0m Trial 9 pruned. Trial was pruned at iteration 80.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:26:34,181]\u001b[0m Trial 10 finished with value: 0.541110883425217 and parameters: {'lambda_l1': 1.4756346287368392e-08, 'lambda_l2': 1.3544296321997672e-05, 'num_leaves': 189, 'feature_fraction': 0.9751158697609862, 'bagging_fraction': 0.8348333900598492, 'bagging_freq': 1, 'min_child_samples': 100}. Best is trial 2 with value: 0.5382832732103667.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:26:34,761]\u001b[0m Trial 11 finished with value: 0.5430244062824532 and parameters: {'lambda_l1': 1.2282790653635484e-08, 'lambda_l2': 8.373619608433318e-06, 'num_leaves': 196, 'feature_fraction': 0.9236444858767681, 'bagging_fraction': 0.8122951589315216, 'bagging_freq': 1, 'min_child_samples': 100}. Best is trial 2 with value: 0.5382832732103667.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:26:35,237]\u001b[0m Trial 12 finished with value: 0.5479941202014348 and parameters: {'lambda_l1': 8.66531632367352, 'lambda_l2': 2.7356786615324263e-05, 'num_leaves': 182, 'feature_fraction': 0.7257944800491923, 'bagging_fraction': 0.8151059105693412, 'bagging_freq': 1, 'min_child_samples': 98}. Best is trial 2 with value: 0.5382832732103667.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:26:36,132]\u001b[0m Trial 13 finished with value: 0.5426396090105863 and parameters: {'lambda_l1': 1.5311804262482413e-08, 'lambda_l2': 8.31070641463058e-06, 'num_leaves': 241, 'feature_fraction': 0.9996713520233095, 'bagging_fraction': 0.9659578077631608, 'bagging_freq': 2, 'min_child_samples': 84}. Best is trial 2 with value: 0.5382832732103667.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:26:36,961]\u001b[0m Trial 14 finished with value: 0.5367665957770634 and parameters: {'lambda_l1': 0.014095420115571123, 'lambda_l2': 0.0002563644914171101, 'num_leaves': 149, 'feature_fraction': 0.7883099949736571, 'bagging_fraction': 0.8086023984951658, 'bagging_freq': 2, 'min_child_samples': 85}. Best is trial 14 with value: 0.5367665957770634.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:26:37,824]\u001b[0m Trial 15 finished with value: 0.5348886658006932 and parameters: {'lambda_l1': 0.020071941122612545, 'lambda_l2': 0.0003014852059198554, 'num_leaves': 146, 'feature_fraction': 0.7936923799556383, 'bagging_fraction': 0.756430674635471, 'bagging_freq': 3, 'min_child_samples': 83}. Best is trial 15 with value: 0.5348886658006932.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:26:38,622]\u001b[0m Trial 16 finished with value: 0.5372333761766732 and parameters: {'lambda_l1': 0.015173696268463625, 'lambda_l2': 0.0016757129971514612, 'num_leaves': 138, 'feature_fraction': 0.8107104336197675, 'bagging_fraction': 0.7376130779961089, 'bagging_freq': 3, 'min_child_samples': 78}. Best is trial 15 with value: 0.5348886658006932.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:26:39,451]\u001b[0m Trial 17 finished with value: 0.5376252190781058 and parameters: {'lambda_l1': 0.007357005023837469, 'lambda_l2': 0.000250874148036676, 'num_leaves': 147, 'feature_fraction': 0.7829205508785075, 'bagging_fraction': 0.7478270069878126, 'bagging_freq': 3, 'min_child_samples': 67}. Best is trial 15 with value: 0.5348886658006932.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:26:39,567]\u001b[0m Trial 18 pruned. Trial was pruned at iteration 10.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:26:40,910]\u001b[0m Trial 19 finished with value: 0.5315793527934071 and parameters: {'lambda_l1': 0.013209811084277438, 'lambda_l2': 0.06726287443306718, 'num_leaves': 231, 'feature_fraction': 0.637876115312563, 'bagging_fraction': 0.8716817438233221, 'bagging_freq': 3, 'min_child_samples': 44}. Best is trial 19 with value: 0.5315793527934071.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:26:42,324]\u001b[0m Trial 20 finished with value: 0.5262550702976 and parameters: {'lambda_l1': 0.003113216663103784, 'lambda_l2': 0.24783337352027449, 'num_leaves': 242, 'feature_fraction': 0.6137607392321572, 'bagging_fraction': 0.8936473883645825, 'bagging_freq': 3, 'min_child_samples': 44}. Best is trial 20 with value: 0.5262550702976.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:26:43,182]\u001b[0m Trial 21 finished with value: 0.5297204439253479 and parameters: {'lambda_l1': 0.003978575421253158, 'lambda_l2': 0.11658574784670588, 'num_leaves': 254, 'feature_fraction': 0.6168955771891016, 'bagging_fraction': 0.8787433941466242, 'bagging_freq': 3, 'min_child_samples': 43}. Best is trial 20 with value: 0.5262550702976.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:26:43,310]\u001b[0m Trial 22 pruned. Trial was pruned at iteration 13.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:26:44,194]\u001b[0m Trial 23 finished with value: 0.5288803542960898 and parameters: {'lambda_l1': 8.341315535148706e-06, 'lambda_l2': 0.24030532904199514, 'num_leaves': 221, 'feature_fraction': 0.5901567080654435, 'bagging_fraction': 0.8875920940685119, 'bagging_freq': 3, 'min_child_samples': 25}. Best is trial 20 with value: 0.5262550702976.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:26:44,331]\u001b[0m Trial 24 pruned. Trial was pruned at iteration 10.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:26:45,248]\u001b[0m Trial 25 finished with value: 0.5310533243420329 and parameters: {'lambda_l1': 1.9500743317527624e-07, 'lambda_l2': 0.0502751774074574, 'num_leaves': 214, 'feature_fraction': 0.5734739546088348, 'bagging_fraction': 0.9115976551406307, 'bagging_freq': 2, 'min_child_samples': 24}. Best is trial 20 with value: 0.5262550702976.\u001b[0m\n" + "\u001b[32m[I 2021-09-07 16:27:35,965]\u001b[0m A new study created in memory with name: no-name-cc424a48-83c6-4329-92a3-261d08a5edb9\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:27:36,779]\u001b[0m Trial 0 finished with value: 0.5178928385298298 and parameters: {'lambda_l1': 0.9464949700025425, 'lambda_l2': 1.4692321446797693e-05, 'num_leaves': 135, 'feature_fraction': 0.6448685329355106, 'bagging_fraction': 0.9048970068857253, 'bagging_freq': 7, 'min_child_samples': 30}. Best is trial 0 with value: 0.5178928385298298.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:27:37,641]\u001b[0m Trial 1 finished with value: 0.5336017055299863 and parameters: {'lambda_l1': 0.6354399847075458, 'lambda_l2': 4.87700419620481, 'num_leaves': 237, 'feature_fraction': 0.895991820221385, 'bagging_fraction': 0.6704364287317786, 'bagging_freq': 3, 'min_child_samples': 49}. Best is trial 0 with value: 0.5178928385298298.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:27:38,288]\u001b[0m Trial 2 finished with value: 0.5345721353318472 and parameters: {'lambda_l1': 9.52099217128906e-05, 'lambda_l2': 1.1454590646374039e-07, 'num_leaves': 170, 'feature_fraction': 0.7899599630691868, 'bagging_fraction': 0.5059741439893897, 'bagging_freq': 2, 'min_child_samples': 50}. Best is trial 0 with value: 0.5178928385298298.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:27:38,956]\u001b[0m Trial 3 finished with value: 0.543693048566443 and parameters: {'lambda_l1': 0.1481577233973877, 'lambda_l2': 3.179336503731134e-07, 'num_leaves': 128, 'feature_fraction': 0.9783519466402901, 'bagging_fraction': 0.6889127950806107, 'bagging_freq': 5, 'min_child_samples': 37}. Best is trial 0 with value: 0.5178928385298298.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:27:39,424]\u001b[0m Trial 4 finished with value: 0.5501760993688642 and parameters: {'lambda_l1': 3.7638689664291936, 'lambda_l2': 0.020384770050374573, 'num_leaves': 175, 'feature_fraction': 0.4840841073088404, 'bagging_fraction': 0.8334033110992205, 'bagging_freq': 5, 'min_child_samples': 35}. Best is trial 0 with value: 0.5178928385298298.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:27:39,493]\u001b[0m Trial 5 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:27:39,774]\u001b[0m Trial 6 pruned. Trial was pruned at iteration 33.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:27:39,828]\u001b[0m Trial 7 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:27:40,187]\u001b[0m Trial 8 finished with value: 0.5249490156742078 and parameters: {'lambda_l1': 8.186728837628555e-05, 'lambda_l2': 2.6479736415938615, 'num_leaves': 72, 'feature_fraction': 0.7599981859922114, 'bagging_fraction': 0.9692259725506919, 'bagging_freq': 5, 'min_child_samples': 38}. Best is trial 0 with value: 0.5178928385298298.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:27:40,299]\u001b[0m Trial 9 pruned. Trial was pruned at iteration 11.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:27:40,353]\u001b[0m Trial 10 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:27:40,398]\u001b[0m Trial 11 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:27:40,470]\u001b[0m Trial 12 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:27:41,280]\u001b[0m Trial 13 finished with value: 0.5325142394678348 and parameters: {'lambda_l1': 0.009551573559223583, 'lambda_l2': 0.0003114297759461472, 'num_leaves': 152, 'feature_fraction': 0.8236610445865283, 'bagging_fraction': 0.8389170404125451, 'bagging_freq': 4, 'min_child_samples': 20}. Best is trial 0 with value: 0.5178928385298298.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:27:41,391]\u001b[0m Trial 14 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:27:42,015]\u001b[0m Trial 15 finished with value: 0.5314302398651312 and parameters: {'lambda_l1': 0.024227381422510946, 'lambda_l2': 0.33276527010426776, 'num_leaves': 91, 'feature_fraction': 0.7874026441153832, 'bagging_fraction': 0.7899555875604796, 'bagging_freq': 7, 'min_child_samples': 19}. Best is trial 0 with value: 0.5178928385298298.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:27:42,091]\u001b[0m Trial 16 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:27:42,154]\u001b[0m Trial 17 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:27:42,226]\u001b[0m Trial 18 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:27:42,338]\u001b[0m Trial 19 pruned. Trial was pruned at iteration 14.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:27:42,384]\u001b[0m Trial 20 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:27:42,829]\u001b[0m Trial 21 finished with value: 0.5245233417218278 and parameters: {'lambda_l1': 0.07296901453015396, 'lambda_l2': 0.19272462963641587, 'num_leaves': 90, 'feature_fraction': 0.752955332537125, 'bagging_fraction': 0.7944607588474272, 'bagging_freq': 7, 'min_child_samples': 15}. Best is trial 0 with value: 0.5178928385298298.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:27:42,901]\u001b[0m Trial 22 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:27:43,287]\u001b[0m Trial 23 pruned. Trial was pruned at iteration 97.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:27:43,371]\u001b[0m Trial 24 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:27:43,489]\u001b[0m Trial 25 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:27:43,572]\u001b[0m Trial 26 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:27:43,632]\u001b[0m Trial 27 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:27:44,171]\u001b[0m Trial 28 finished with value: 0.5255422897694476 and parameters: {'lambda_l1': 0.028260092955639447, 'lambda_l2': 0.08413723627697263, 'num_leaves': 74, 'feature_fraction': 0.8042469147694201, 'bagging_fraction': 0.7548217217698655, 'bagging_freq': 3, 'min_child_samples': 42}. Best is trial 0 with value: 0.5178928385298298.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:27:44,331]\u001b[0m Trial 29 pruned. Trial was pruned at iteration 13.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:27:44,516]\u001b[0m Trial 30 pruned. Trial was pruned at iteration 16.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:27:44,637]\u001b[0m Trial 31 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:27:44,723]\u001b[0m Trial 32 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:27:45,592]\u001b[0m Trial 33 finished with value: 0.5218146276752049 and parameters: {'lambda_l1': 0.0016497886750146257, 'lambda_l2': 0.21484669274687151, 'num_leaves': 133, 'feature_fraction': 0.6885034648040802, 'bagging_fraction': 0.9281320338482011, 'bagging_freq': 3, 'min_child_samples': 42}. Best is trial 0 with value: 0.5178928385298298.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:27:45,711]\u001b[0m Trial 34 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:27:46,669]\u001b[0m Trial 35 finished with value: 0.5205542553680208 and parameters: {'lambda_l1': 4.3493248665689235e-05, 'lambda_l2': 3.680853885692289, 'num_leaves': 169, 'feature_fraction': 0.7104567429350184, 'bagging_fraction': 0.8672533271233644, 'bagging_freq': 1, 'min_child_samples': 23}. Best is trial 0 with value: 0.5178928385298298.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:27:47,676]\u001b[0m Trial 36 finished with value: 0.5206023202395521 and parameters: {'lambda_l1': 0.0006187306261428898, 'lambda_l2': 6.130428276943865e-08, 'num_leaves': 168, 'feature_fraction': 0.7036284508576037, 'bagging_fraction': 0.868428878180794, 'bagging_freq': 2, 'min_child_samples': 15}. Best is trial 0 with value: 0.5178928385298298.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:27:47,808]\u001b[0m Trial 37 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:27:47,937]\u001b[0m Trial 38 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:27:48,844]\u001b[0m Trial 39 finished with value: 0.5191042263340773 and parameters: {'lambda_l1': 0.0005562321404093257, 'lambda_l2': 2.232696278872472e-08, 'num_leaves': 161, 'feature_fraction': 0.710527336424428, 'bagging_fraction': 0.9309118257124477, 'bagging_freq': 1, 'min_child_samples': 24}. Best is trial 0 with value: 0.5178928385298298.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:27:49,889]\u001b[0m Trial 40 finished with value: 0.5168505162425636 and parameters: {'lambda_l1': 0.0004441190594738051, 'lambda_l2': 1.0707946566246148e-08, 'num_leaves': 190, 'feature_fraction': 0.7132737394337006, 'bagging_fraction': 0.8296082050311246, 'bagging_freq': 1, 'min_child_samples': 16}. Best is trial 40 with value: 0.5168505162425636.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:27:50,929]\u001b[0m Trial 41 finished with value: 0.5262832333602245 and parameters: {'lambda_l1': 0.0004892025280466791, 'lambda_l2': 1.89573604152611e-08, 'num_leaves': 189, 'feature_fraction': 0.7134600565845959, 'bagging_fraction': 0.8334328552244646, 'bagging_freq': 1, 'min_child_samples': 16}. Best is trial 40 with value: 0.5168505162425636.\u001b[0m\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ - "\u001b[32m[I 2021-09-07 09:26:46,126]\u001b[0m Trial 26 finished with value: 0.5310399189972692 and parameters: {'lambda_l1': 5.65218762871428e-06, 'lambda_l2': 1.6944446205708956, 'num_leaves': 256, 'feature_fraction': 0.6693624417062835, 'bagging_fraction': 0.8606541857435407, 'bagging_freq': 4, 'min_child_samples': 25}. Best is trial 20 with value: 0.5262550702976.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:26:47,541]\u001b[0m Trial 27 finished with value: 0.5289141422774571 and parameters: {'lambda_l1': 3.372780137131952e-07, 'lambda_l2': 0.004231573716537988, 'num_leaves': 222, 'feature_fraction': 0.5834642111209242, 'bagging_fraction': 0.9283224808721449, 'bagging_freq': 3, 'min_child_samples': 33}. Best is trial 20 with value: 0.5262550702976.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:26:47,734]\u001b[0m Trial 28 pruned. Trial was pruned at iteration 10.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:26:47,883]\u001b[0m Trial 29 pruned. Trial was pruned at iteration 10.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:26:48,032]\u001b[0m Trial 30 pruned. Trial was pruned at iteration 10.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:26:48,233]\u001b[0m Trial 31 pruned. Trial was pruned at iteration 10.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:26:48,470]\u001b[0m Trial 32 pruned. Trial was pruned at iteration 12.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:26:50,256]\u001b[0m Trial 33 finished with value: 0.5307511312800164 and parameters: {'lambda_l1': 8.331346294111473e-08, 'lambda_l2': 0.018173663934055492, 'num_leaves': 238, 'feature_fraction': 0.5874989674601676, 'bagging_fraction': 0.8876263960076438, 'bagging_freq': 2, 'min_child_samples': 30}. Best is trial 20 with value: 0.5262550702976.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:26:52,081]\u001b[0m Trial 34 finished with value: 0.5364692667074208 and parameters: {'lambda_l1': 8.939808862490256e-07, 'lambda_l2': 1.3094120587455667, 'num_leaves': 249, 'feature_fraction': 0.739630943171398, 'bagging_fraction': 0.9999691110926188, 'bagging_freq': 3, 'min_child_samples': 17}. Best is trial 20 with value: 0.5262550702976.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:26:52,228]\u001b[0m Trial 35 pruned. Trial was pruned at iteration 10.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:26:52,401]\u001b[0m Trial 36 pruned. Trial was pruned at iteration 10.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:26:52,527]\u001b[0m Trial 37 pruned. Trial was pruned at iteration 10.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:26:52,646]\u001b[0m Trial 38 pruned. Trial was pruned at iteration 10.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:26:52,746]\u001b[0m Trial 39 pruned. Trial was pruned at iteration 10.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:26:53,973]\u001b[0m Trial 40 finished with value: 0.5256364275490798 and parameters: {'lambda_l1': 0.0710216045724967, 'lambda_l2': 0.10020172057765622, 'num_leaves': 206, 'feature_fraction': 0.6441352660567405, 'bagging_fraction': 0.8538258125595672, 'bagging_freq': 5, 'min_child_samples': 8}. Best is trial 40 with value: 0.5256364275490798.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:26:54,896]\u001b[0m Trial 41 finished with value: 0.5233364787469251 and parameters: {'lambda_l1': 0.23671365201549266, 'lambda_l2': 0.0749439220683321, 'num_leaves': 210, 'feature_fraction': 0.6436961501856725, 'bagging_fraction': 0.8521968194803821, 'bagging_freq': 5, 'min_child_samples': 5}. Best is trial 41 with value: 0.5233364787469251.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:26:55,831]\u001b[0m Trial 42 finished with value: 0.5293150200028972 and parameters: {'lambda_l1': 0.6574943326302751, 'lambda_l2': 0.0361443234291568, 'num_leaves': 209, 'feature_fraction': 0.6596272574104582, 'bagging_fraction': 0.850673456207462, 'bagging_freq': 6, 'min_child_samples': 5}. Best is trial 41 with value: 0.5233364787469251.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:26:56,858]\u001b[0m Trial 43 finished with value: 0.5308606295175099 and parameters: {'lambda_l1': 0.07618036054920922, 'lambda_l2': 0.22544030283002933, 'num_leaves': 226, 'feature_fraction': 0.6984185669610476, 'bagging_fraction': 0.7845150865412134, 'bagging_freq': 5, 'min_child_samples': 10}. Best is trial 41 with value: 0.5233364787469251.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:26:57,019]\u001b[0m Trial 44 pruned. Trial was pruned at iteration 13.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:26:57,787]\u001b[0m Trial 45 finished with value: 0.5331475267867624 and parameters: {'lambda_l1': 1.4967349278475324, 'lambda_l2': 0.0008951722845905283, 'num_leaves': 207, 'feature_fraction': 0.7339992851750987, 'bagging_fraction': 0.919959739142468, 'bagging_freq': 5, 'min_child_samples': 26}. Best is trial 41 with value: 0.5233364787469251.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:26:57,865]\u001b[0m Trial 46 pruned. Trial was pruned at iteration 10.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:26:57,971]\u001b[0m Trial 47 pruned. Trial was pruned at iteration 10.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:26:58,069]\u001b[0m Trial 48 pruned. Trial was pruned at iteration 10.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:26:58,168]\u001b[0m Trial 49 pruned. Trial was pruned at iteration 10.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:26:58,317]\u001b[0m Trial 50 pruned. Trial was pruned at iteration 30.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:26:58,445]\u001b[0m Trial 51 pruned. Trial was pruned at iteration 13.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:26:59,558]\u001b[0m Trial 52 finished with value: 0.5287423259473492 and parameters: {'lambda_l1': 0.44209025556975773, 'lambda_l2': 0.02237337201530471, 'num_leaves': 180, 'feature_fraction': 0.6816257587535688, 'bagging_fraction': 0.8645245816570197, 'bagging_freq': 7, 'min_child_samples': 5}. Best is trial 41 with value: 0.5233364787469251.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:27:00,776]\u001b[0m Trial 53 finished with value: 0.5248984867652216 and parameters: {'lambda_l1': 0.02970872423104034, 'lambda_l2': 0.0197319338622143, 'num_leaves': 177, 'feature_fraction': 0.7150825328146555, 'bagging_fraction': 0.8972252525350597, 'bagging_freq': 7, 'min_child_samples': 12}. Best is trial 41 with value: 0.5233364787469251.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:27:01,886]\u001b[0m Trial 54 finished with value: 0.5310139507545261 and parameters: {'lambda_l1': 0.03922536031082296, 'lambda_l2': 1.7618527190204045e-06, 'num_leaves': 180, 'feature_fraction': 0.7089331672809696, 'bagging_fraction': 0.8021837464466384, 'bagging_freq': 7, 'min_child_samples': 11}. Best is trial 41 with value: 0.5233364787469251.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:27:02,043]\u001b[0m Trial 55 pruned. Trial was pruned at iteration 13.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:27:02,158]\u001b[0m Trial 56 pruned. Trial was pruned at iteration 10.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:27:02,937]\u001b[0m Trial 57 pruned. Trial was pruned at iteration 78.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:27:03,912]\u001b[0m Trial 58 finished with value: 0.5310990808119093 and parameters: {'lambda_l1': 0.008908141404368873, 'lambda_l2': 1.0024604099274968e-08, 'num_leaves': 156, 'feature_fraction': 0.7135958388327297, 'bagging_fraction': 0.8918708775644101, 'bagging_freq': 7, 'min_child_samples': 10}. Best is trial 41 with value: 0.5233364787469251.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:27:05,509]\u001b[0m Trial 59 finished with value: 0.5303075036750562 and parameters: {'lambda_l1': 0.0036415543019748704, 'lambda_l2': 0.060450481568631226, 'num_leaves': 245, 'feature_fraction': 0.6322922933331399, 'bagging_fraction': 0.8119277832780933, 'bagging_freq': 6, 'min_child_samples': 18}. Best is trial 41 with value: 0.5233364787469251.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:27:05,675]\u001b[0m Trial 60 pruned. Trial was pruned at iteration 10.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:27:06,717]\u001b[0m Trial 61 finished with value: 0.5266068329070613 and parameters: {'lambda_l1': 0.08187221145217663, 'lambda_l2': 0.12038514531389062, 'num_leaves': 230, 'feature_fraction': 0.5893130926352276, 'bagging_fraction': 0.9218811557192532, 'bagging_freq': 3, 'min_child_samples': 30}. Best is trial 41 with value: 0.5233364787469251.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:27:06,839]\u001b[0m Trial 62 pruned. Trial was pruned at iteration 10.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:27:06,957]\u001b[0m Trial 63 pruned. Trial was pruned at iteration 10.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:27:07,060]\u001b[0m Trial 64 pruned. Trial was pruned at iteration 10.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:27:07,235]\u001b[0m Trial 65 pruned. Trial was pruned at iteration 13.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:27:07,371]\u001b[0m Trial 66 pruned. Trial was pruned at iteration 13.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:27:08,252]\u001b[0m Trial 67 finished with value: 0.524615381244906 and parameters: {'lambda_l1': 0.0007372437461333507, 'lambda_l2': 0.07197819258619385, 'num_leaves': 217, 'feature_fraction': 0.7085126717217893, 'bagging_fraction': 0.9783185592011208, 'bagging_freq': 4, 'min_child_samples': 7}. Best is trial 41 with value: 0.5233364787469251.\u001b[0m\n" + "\u001b[32m[I 2021-09-07 16:27:51,023]\u001b[0m Trial 42 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:27:51,805]\u001b[0m Trial 43 finished with value: 0.516065766031832 and parameters: {'lambda_l1': 0.00021869661941011055, 'lambda_l2': 1.0674672013946999e-08, 'num_leaves': 201, 'feature_fraction': 0.7039965594752932, 'bagging_fraction': 0.8165783106186105, 'bagging_freq': 1, 'min_child_samples': 10}. Best is trial 43 with value: 0.516065766031832.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:27:51,940]\u001b[0m Trial 44 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:27:52,044]\u001b[0m Trial 45 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:27:52,218]\u001b[0m Trial 46 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:27:53,067]\u001b[0m Trial 47 finished with value: 0.5256815758791472 and parameters: {'lambda_l1': 6.162602167779085e-05, 'lambda_l2': 1.6462819728079273e-07, 'num_leaves': 201, 'feature_fraction': 0.7163097853297453, 'bagging_fraction': 0.9420618119436382, 'bagging_freq': 1, 'min_child_samples': 24}. Best is trial 43 with value: 0.516065766031832.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:27:53,178]\u001b[0m Trial 48 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:27:53,762]\u001b[0m Trial 49 finished with value: 0.5213791636070063 and parameters: {'lambda_l1': 2.774750232046531e-07, 'lambda_l2': 2.4404405074646332e-08, 'num_leaves': 154, 'feature_fraction': 0.6882189607163384, 'bagging_fraction': 0.9871043449165129, 'bagging_freq': 1, 'min_child_samples': 9}. Best is trial 43 with value: 0.516065766031832.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:27:53,875]\u001b[0m Trial 50 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:27:54,587]\u001b[0m Trial 51 finished with value: 0.5174618905113623 and parameters: {'lambda_l1': 0.000381507052985562, 'lambda_l2': 4.901891994722806e-08, 'num_leaves': 166, 'feature_fraction': 0.7117978094083592, 'bagging_fraction': 0.8956613846537137, 'bagging_freq': 1, 'min_child_samples': 12}. Best is trial 43 with value: 0.516065766031832.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:27:55,304]\u001b[0m Trial 52 finished with value: 0.5219780933938188 and parameters: {'lambda_l1': 0.0003055316771241415, 'lambda_l2': 1.0975399369852818e-08, 'num_leaves': 175, 'feature_fraction': 0.7709159871191618, 'bagging_fraction': 0.8946659725371712, 'bagging_freq': 1, 'min_child_samples': 12}. Best is trial 43 with value: 0.516065766031832.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:27:56,481]\u001b[0m Trial 53 finished with value: 0.522019841081264 and parameters: {'lambda_l1': 3.0853077659712017e-06, 'lambda_l2': 3.167435996190143e-08, 'num_leaves': 199, 'feature_fraction': 0.700370780125537, 'bagging_fraction': 0.9659440325481246, 'bagging_freq': 1, 'min_child_samples': 5}. Best is trial 43 with value: 0.516065766031832.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:27:57,422]\u001b[0m Trial 54 finished with value: 0.5210662969810745 and parameters: {'lambda_l1': 7.857450656938453e-05, 'lambda_l2': 4.5159973730477044e-07, 'num_leaves': 153, 'feature_fraction': 0.7263108852936352, 'bagging_fraction': 0.936630215722916, 'bagging_freq': 1, 'min_child_samples': 19}. Best is trial 43 with value: 0.516065766031832.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:27:57,568]\u001b[0m Trial 55 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:27:57,678]\u001b[0m Trial 56 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:27:57,803]\u001b[0m Trial 57 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:27:57,889]\u001b[0m Trial 58 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:27:58,284]\u001b[0m Trial 59 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:27:58,510]\u001b[0m Trial 60 pruned. Trial was pruned at iteration 15.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:27:59,683]\u001b[0m Trial 61 finished with value: 0.5238780309760502 and parameters: {'lambda_l1': 0.0005622649168938934, 'lambda_l2': 7.041773458860283e-08, 'num_leaves': 165, 'feature_fraction': 0.6971774484774765, 'bagging_fraction': 0.8845735572162268, 'bagging_freq': 2, 'min_child_samples': 16}. Best is trial 43 with value: 0.516065766031832.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:27:59,867]\u001b[0m Trial 62 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:01,236]\u001b[0m Trial 63 finished with value: 0.5172018048571797 and parameters: {'lambda_l1': 4.4774427854289624e-05, 'lambda_l2': 1.0431434685856767e-07, 'num_leaves': 175, 'feature_fraction': 0.7067550902029666, 'bagging_fraction': 0.7960327498051027, 'bagging_freq': 2, 'min_child_samples': 16}. Best is trial 43 with value: 0.516065766031832.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:01,369]\u001b[0m Trial 64 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:01,513]\u001b[0m Trial 65 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:02,393]\u001b[0m Trial 66 pruned. Trial was pruned at iteration 76.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:02,529]\u001b[0m Trial 67 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:02,721]\u001b[0m Trial 68 pruned. Trial was pruned at iteration 12.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:03,060]\u001b[0m Trial 69 pruned. Trial was pruned at iteration 44.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:03,135]\u001b[0m Trial 70 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:03,486]\u001b[0m Trial 71 pruned. Trial was pruned at iteration 56.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:03,587]\u001b[0m Trial 72 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:04,107]\u001b[0m Trial 73 pruned. Trial was pruned at iteration 68.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:04,203]\u001b[0m Trial 74 pruned. Trial was pruned at iteration 11.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:04,307]\u001b[0m Trial 75 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:04,631]\u001b[0m Trial 76 pruned. Trial was pruned at iteration 39.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:04,725]\u001b[0m Trial 77 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:04,831]\u001b[0m Trial 78 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:04,920]\u001b[0m Trial 79 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:04,992]\u001b[0m Trial 80 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:05,099]\u001b[0m Trial 81 pruned. Trial was pruned at iteration 11.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:05,766]\u001b[0m Trial 82 finished with value: 0.5178938881576146 and parameters: {'lambda_l1': 1.2203483524616316e-08, 'lambda_l2': 5.507928324956204e-07, 'num_leaves': 185, 'feature_fraction': 0.7219415365227362, 'bagging_fraction': 0.9438920094595108, 'bagging_freq': 1, 'min_child_samples': 17}. Best is trial 43 with value: 0.516065766031832.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:05,866]\u001b[0m Trial 83 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:05,961]\u001b[0m Trial 84 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:06,778]\u001b[0m Trial 85 finished with value: 0.5122528701381253 and parameters: {'lambda_l1': 6.371822640459341e-07, 'lambda_l2': 3.03967794068636e-08, 'num_leaves': 189, 'feature_fraction': 0.7066521328434867, 'bagging_fraction': 0.8972190570020028, 'bagging_freq': 1, 'min_child_samples': 11}. Best is trial 85 with value: 0.5122528701381253.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:07,709]\u001b[0m Trial 86 finished with value: 0.5128398316765018 and parameters: {'lambda_l1': 1.5059254193489838e-07, 'lambda_l2': 2.6171717314455622e-08, 'num_leaves': 192, 'feature_fraction': 0.711268040370677, 'bagging_fraction': 0.9174432395706226, 'bagging_freq': 1, 'min_child_samples': 11}. Best is trial 85 with value: 0.5122528701381253.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:09,411]\u001b[0m Trial 87 finished with value: 0.5191456815789754 and parameters: {'lambda_l1': 5.687878085793806e-08, 'lambda_l2': 2.6772763929664963e-08, 'num_leaves': 216, 'feature_fraction': 0.7413398142797374, 'bagging_fraction': 0.9993988673163303, 'bagging_freq': 1, 'min_child_samples': 11}. Best is trial 85 with value: 0.5122528701381253.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:09,813]\u001b[0m Trial 88 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:12,526]\u001b[0m Trial 89 finished with value: 0.5102536883069437 and parameters: {'lambda_l1': 1.6446541932580473e-07, 'lambda_l2': 1.0734412329614426e-07, 'num_leaves': 193, 'feature_fraction': 0.8123395009389368, 'bagging_fraction': 0.9184649865458648, 'bagging_freq': 1, 'min_child_samples': 10}. Best is trial 89 with value: 0.5102536883069437.\u001b[0m\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ - "\u001b[32m[I 2021-09-07 09:27:09,210]\u001b[0m Trial 68 finished with value: 0.524942396458539 and parameters: {'lambda_l1': 0.0011590093830295628, 'lambda_l2': 0.06353809127053359, 'num_leaves': 198, 'feature_fraction': 0.7106295012909083, 'bagging_fraction': 0.973655849802224, 'bagging_freq': 5, 'min_child_samples': 7}. Best is trial 41 with value: 0.5233364787469251.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:27:09,572]\u001b[0m Trial 69 pruned. Trial was pruned at iteration 29.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:27:10,511]\u001b[0m Trial 70 finished with value: 0.5227354154865925 and parameters: {'lambda_l1': 0.0019204609823774911, 'lambda_l2': 0.006957053438847566, 'num_leaves': 200, 'feature_fraction': 0.7174135338967843, 'bagging_fraction': 0.9747628414441452, 'bagging_freq': 5, 'min_child_samples': 8}. Best is trial 70 with value: 0.5227354154865925.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:27:11,445]\u001b[0m Trial 71 pruned. Trial was pruned at iteration 72.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:27:12,707]\u001b[0m Trial 72 finished with value: 0.5273794666616337 and parameters: {'lambda_l1': 0.00253464769953144, 'lambda_l2': 0.04250761519699366, 'num_leaves': 203, 'feature_fraction': 0.723989872880134, 'bagging_fraction': 0.9954485306248265, 'bagging_freq': 5, 'min_child_samples': 10}. Best is trial 70 with value: 0.5227354154865925.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:27:14,118]\u001b[0m Trial 73 finished with value: 0.5290081420408789 and parameters: {'lambda_l1': 0.0014187815549416435, 'lambda_l2': 0.12791352246804705, 'num_leaves': 218, 'feature_fraction': 0.7758653557130019, 'bagging_fraction': 0.9488928693793105, 'bagging_freq': 5, 'min_child_samples': 18}. Best is trial 70 with value: 0.5227354154865925.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:27:14,196]\u001b[0m Trial 74 pruned. Trial was pruned at iteration 10.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:27:15,517]\u001b[0m Trial 75 finished with value: 0.5325397214424649 and parameters: {'lambda_l1': 0.0005872739303922063, 'lambda_l2': 0.0096136159649407, 'num_leaves': 211, 'feature_fraction': 0.8012899648531023, 'bagging_fraction': 0.931501707649951, 'bagging_freq': 4, 'min_child_samples': 13}. Best is trial 70 with value: 0.5227354154865925.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:27:16,903]\u001b[0m Trial 76 finished with value: 0.5296781377317934 and parameters: {'lambda_l1': 0.006787205212552698, 'lambda_l2': 0.0790698959620889, 'num_leaves': 239, 'feature_fraction': 0.7454753912863651, 'bagging_fraction': 0.9799306593714261, 'bagging_freq': 5, 'min_child_samples': 7}. Best is trial 70 with value: 0.5227354154865925.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:27:17,956]\u001b[0m Trial 77 finished with value: 0.5309336374271006 and parameters: {'lambda_l1': 0.0037427478770181, 'lambda_l2': 0.8130901726120456, 'num_leaves': 232, 'feature_fraction': 0.7224358819185711, 'bagging_fraction': 0.9520121758601578, 'bagging_freq': 4, 'min_child_samples': 12}. Best is trial 70 with value: 0.5227354154865925.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:27:18,071]\u001b[0m Trial 78 pruned. Trial was pruned at iteration 13.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:27:19,373]\u001b[0m Trial 79 finished with value: 0.5347521802448649 and parameters: {'lambda_l1': 0.017570019491239652, 'lambda_l2': 0.05059260326524094, 'num_leaves': 226, 'feature_fraction': 0.6981808494111595, 'bagging_fraction': 0.9993645575870086, 'bagging_freq': 4, 'min_child_samples': 20}. Best is trial 70 with value: 0.5227354154865925.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:27:19,471]\u001b[0m Trial 80 pruned. Trial was pruned at iteration 10.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:27:21,230]\u001b[0m Trial 81 finished with value: 0.5227732830528738 and parameters: {'lambda_l1': 0.0003058538457597125, 'lambda_l2': 0.04324937485563167, 'num_leaves': 202, 'feature_fraction': 0.73206644303025, 'bagging_fraction': 0.9911170826848787, 'bagging_freq': 5, 'min_child_samples': 10}. Best is trial 70 with value: 0.5227354154865925.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:27:22,266]\u001b[0m Trial 82 finished with value: 0.5279916258627607 and parameters: {'lambda_l1': 0.0002580487853233371, 'lambda_l2': 0.015002378165105253, 'num_leaves': 213, 'feature_fraction': 0.665112654945876, 'bagging_fraction': 0.9341515771267435, 'bagging_freq': 5, 'min_child_samples': 11}. Best is trial 70 with value: 0.5227354154865925.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:27:26,040]\u001b[0m Trial 83 pruned. Trial was pruned at iteration 51.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:27:29,359]\u001b[0m Trial 84 finished with value: 0.5257703648298483 and parameters: {'lambda_l1': 0.09313058634431053, 'lambda_l2': 0.0794176561682967, 'num_leaves': 174, 'feature_fraction': 0.7330255912724464, 'bagging_fraction': 0.9556166881594967, 'bagging_freq': 6, 'min_child_samples': 14}. Best is trial 70 with value: 0.5227354154865925.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:27:29,749]\u001b[0m Trial 85 pruned. Trial was pruned at iteration 30.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:27:29,919]\u001b[0m Trial 86 pruned. Trial was pruned at iteration 17.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:27:30,026]\u001b[0m Trial 87 pruned. Trial was pruned at iteration 10.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:27:30,861]\u001b[0m Trial 88 finished with value: 0.5270306752854106 and parameters: {'lambda_l1': 0.0003495273937948516, 'lambda_l2': 0.057992141622577896, 'num_leaves': 167, 'feature_fraction': 0.7406493057210397, 'bagging_fraction': 0.8812843020741449, 'bagging_freq': 5, 'min_child_samples': 10}. Best is trial 70 with value: 0.5227354154865925.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:27:30,933]\u001b[0m Trial 89 pruned. Trial was pruned at iteration 11.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:27:31,029]\u001b[0m Trial 90 pruned. Trial was pruned at iteration 10.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:27:31,358]\u001b[0m Trial 91 pruned. Trial was pruned at iteration 48.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:27:31,714]\u001b[0m Trial 92 pruned. Trial was pruned at iteration 41.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:27:32,543]\u001b[0m Trial 93 finished with value: 0.5264391883178012 and parameters: {'lambda_l1': 0.055429809241931895, 'lambda_l2': 0.013315511259061849, 'num_leaves': 235, 'feature_fraction': 0.6209901742544794, 'bagging_fraction': 0.9603625243176316, 'bagging_freq': 5, 'min_child_samples': 9}. Best is trial 70 with value: 0.5227354154865925.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:27:32,825]\u001b[0m Trial 94 pruned. Trial was pruned at iteration 40.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:27:32,940]\u001b[0m Trial 95 pruned. Trial was pruned at iteration 13.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:27:33,525]\u001b[0m Trial 96 finished with value: 0.521566218012535 and parameters: {'lambda_l1': 0.001072724146880552, 'lambda_l2': 0.043446953857601264, 'num_leaves': 142, 'feature_fraction': 0.6922532844601723, 'bagging_fraction': 0.9898097804978216, 'bagging_freq': 5, 'min_child_samples': 5}. Best is trial 96 with value: 0.521566218012535.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:27:34,149]\u001b[0m Trial 97 finished with value: 0.5196727190561812 and parameters: {'lambda_l1': 0.0025915189209523223, 'lambda_l2': 0.37941398551753863, 'num_leaves': 159, 'feature_fraction': 0.6942886423659413, 'bagging_fraction': 0.9888819332176355, 'bagging_freq': 6, 'min_child_samples': 5}. Best is trial 97 with value: 0.5196727190561812.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:27:34,855]\u001b[0m Trial 98 finished with value: 0.5265000531725292 and parameters: {'lambda_l1': 0.0006868362572582551, 'lambda_l2': 0.040525833784093586, 'num_leaves': 143, 'feature_fraction': 0.6930600995921747, 'bagging_fraction': 0.9897374239207342, 'bagging_freq': 6, 'min_child_samples': 6}. Best is trial 97 with value: 0.5196727190561812.\u001b[0m\n", - "\u001b[32m[I 2021-09-07 09:27:34,935]\u001b[0m Trial 99 pruned. Trial was pruned at iteration 10.\u001b[0m\n" + "\u001b[32m[I 2021-09-07 16:28:12,792]\u001b[0m Trial 90 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:13,099]\u001b[0m Trial 91 pruned. Trial was pruned at iteration 13.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:13,456]\u001b[0m Trial 92 pruned. Trial was pruned at iteration 14.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:13,664]\u001b[0m Trial 93 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:14,351]\u001b[0m Trial 94 pruned. Trial was pruned at iteration 45.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:14,508]\u001b[0m Trial 95 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:14,828]\u001b[0m Trial 96 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:14,962]\u001b[0m Trial 97 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:16,197]\u001b[0m Trial 98 finished with value: 0.5171555001746302 and parameters: {'lambda_l1': 1.0103137663726183e-08, 'lambda_l2': 1.7418953701287595e-06, 'num_leaves': 203, 'feature_fraction': 0.7265189263478641, 'bagging_fraction': 0.9317931154547426, 'bagging_freq': 1, 'min_child_samples': 10}. Best is trial 89 with value: 0.5102536883069437.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:16,557]\u001b[0m Trial 99 pruned. Trial was pruned at iteration 16.\u001b[0m\n" ] }, { @@ -526,108 +545,635 @@ "text": [ "Number of finished trials: 100\n", "Best trial:\n", - " Value: 0.5196727190561812\n", + " Value: 0.5102536883069437\n", " Params: \n", - " lambda_l1: 0.0025915189209523223\n", - " lambda_l2: 0.37941398551753863\n", - " num_leaves: 159\n", - " feature_fraction: 0.6942886423659413\n", - " bagging_fraction: 0.9888819332176355\n", - " bagging_freq: 6\n", - " min_child_samples: 5\n" + " lambda_l1: 1.6446541932580473e-07\n", + " lambda_l2: 1.0734412329614426e-07\n", + " num_leaves: 193\n", + " feature_fraction: 0.8123395009389368\n", + " bagging_fraction: 0.9184649865458648\n", + " bagging_freq: 1\n", + " min_child_samples: 10\n", + "[LightGBM] [Warning] Auto-choosing col-wise multi-threading, the overhead of testing was 0.000712 seconds.\n", + "You can set `force_col_wise=true` to remove the overhead.\n", + "[LightGBM] [Info] Total Bins 1837\n", + "[LightGBM] [Info] Number of data points in the train set: 14860, number of used features: 8\n", + "[LightGBM] [Info] Start training from score 2.073247\n", + "Training until validation scores don't improve for 20 rounds\n", + "[50]\tvalid_0's rmse: 0.453164\n", + "[100]\tvalid_0's rmse: 0.442958\n", + "Early stopping, best iteration is:\n", + "[110]\tvalid_0's rmse: 0.442157\n" ] - } - ], - "source": [ - "if __name__ == \"__main__\":\n", - " study = optuna.create_study(\n", - " pruner=optuna.pruners.MedianPruner(n_warmup_steps=10), direction=\"minimize\"\n", - " )\n", - " study.optimize(lambda trial: objective(trial, train_x, valid_x, train_y, valid_y), n_trials=100)\n", - "\n", - " print(\"Number of finished trials: {}\".format(len(study.trials)))\n", - "\n", - " print(\"Best trial:\")\n", - " trial = study.best_trial\n", - "\n", - " print(\" Value: {}\".format(trial.value))\n", - "\n", - " print(\" Params: \")\n", - " for key, value in trial.params.items():\n", - " print(\" {}: {}\".format(key, value))" - ] - }, - { - "cell_type": "code", - "execution_count": 24, - "id": "681d9cc2", - "metadata": {}, - "outputs": [ + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\u001b[32m[I 2021-09-07 16:28:17,677]\u001b[0m Trial 100 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:17,796]\u001b[0m Trial 101 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:17,941]\u001b[0m Trial 102 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:18,090]\u001b[0m Trial 103 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:18,293]\u001b[0m Trial 104 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:18,412]\u001b[0m Trial 105 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:18,524]\u001b[0m Trial 106 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:18,647]\u001b[0m Trial 107 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:18,797]\u001b[0m Trial 108 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:18,911]\u001b[0m Trial 109 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:19,029]\u001b[0m Trial 110 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:19,211]\u001b[0m Trial 111 pruned. Trial was pruned at iteration 16.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:19,595]\u001b[0m Trial 112 pruned. Trial was pruned at iteration 30.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:19,744]\u001b[0m Trial 113 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:19,853]\u001b[0m Trial 114 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:20,146]\u001b[0m Trial 115 pruned. Trial was pruned at iteration 23.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:20,267]\u001b[0m Trial 116 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:20,420]\u001b[0m Trial 117 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:20,594]\u001b[0m Trial 118 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:20,780]\u001b[0m Trial 119 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:20,942]\u001b[0m Trial 120 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:21,073]\u001b[0m Trial 121 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:21,410]\u001b[0m Trial 122 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:21,601]\u001b[0m Trial 123 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:21,799]\u001b[0m Trial 124 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:21,944]\u001b[0m Trial 125 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:22,080]\u001b[0m Trial 126 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:22,209]\u001b[0m Trial 127 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:22,367]\u001b[0m Trial 128 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:22,509]\u001b[0m Trial 129 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:22,673]\u001b[0m Trial 130 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:22,754]\u001b[0m Trial 131 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:22,923]\u001b[0m Trial 132 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:23,074]\u001b[0m Trial 133 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:23,442]\u001b[0m Trial 134 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:23,682]\u001b[0m Trial 135 pruned. Trial was pruned at iteration 16.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:23,824]\u001b[0m Trial 136 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:24,010]\u001b[0m Trial 137 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:24,160]\u001b[0m Trial 138 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:24,458]\u001b[0m Trial 139 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:24,678]\u001b[0m Trial 140 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:24,849]\u001b[0m Trial 141 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:25,081]\u001b[0m Trial 142 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:25,256]\u001b[0m Trial 143 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:25,501]\u001b[0m Trial 144 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:26,208]\u001b[0m Trial 145 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:26,709]\u001b[0m Trial 146 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:26,919]\u001b[0m Trial 147 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:27,534]\u001b[0m Trial 148 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:27,729]\u001b[0m Trial 149 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:28,152]\u001b[0m Trial 150 pruned. Trial was pruned at iteration 24.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:28,278]\u001b[0m Trial 151 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:28,393]\u001b[0m Trial 152 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:28,543]\u001b[0m Trial 153 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:28,653]\u001b[0m Trial 154 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:28,757]\u001b[0m Trial 155 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:28,887]\u001b[0m Trial 156 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:29,001]\u001b[0m Trial 157 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:29,176]\u001b[0m Trial 158 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:29,332]\u001b[0m Trial 159 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:29,541]\u001b[0m Trial 160 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:29,732]\u001b[0m Trial 161 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:29,884]\u001b[0m Trial 162 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:29,997]\u001b[0m Trial 163 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:30,103]\u001b[0m Trial 164 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:30,225]\u001b[0m Trial 165 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:30,330]\u001b[0m Trial 166 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:30,487]\u001b[0m Trial 167 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:30,579]\u001b[0m Trial 168 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:30,685]\u001b[0m Trial 169 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:30,798]\u001b[0m Trial 170 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:30,902]\u001b[0m Trial 171 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:30,999]\u001b[0m Trial 172 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:31,099]\u001b[0m Trial 173 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:31,206]\u001b[0m Trial 174 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:31,352]\u001b[0m Trial 175 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:31,476]\u001b[0m Trial 176 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:31,716]\u001b[0m Trial 177 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:31,834]\u001b[0m Trial 178 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:31,983]\u001b[0m Trial 179 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:32,099]\u001b[0m Trial 180 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:32,272]\u001b[0m Trial 181 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:32,431]\u001b[0m Trial 182 pruned. Trial was pruned at iteration 12.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:32,546]\u001b[0m Trial 183 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:32,656]\u001b[0m Trial 184 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:32,763]\u001b[0m Trial 185 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:32,965]\u001b[0m Trial 186 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:33,183]\u001b[0m Trial 187 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:33,409]\u001b[0m Trial 188 pruned. Trial was pruned at iteration 10.\u001b[0m\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\u001b[32m[I 2021-09-07 16:28:33,571]\u001b[0m Trial 189 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:33,719]\u001b[0m Trial 190 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:33,919]\u001b[0m Trial 191 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:34,112]\u001b[0m Trial 192 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:34,276]\u001b[0m Trial 193 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:34,426]\u001b[0m Trial 194 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:34,633]\u001b[0m Trial 195 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:34,781]\u001b[0m Trial 196 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:35,002]\u001b[0m Trial 197 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:35,242]\u001b[0m Trial 198 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:35,408]\u001b[0m Trial 199 pruned. Trial was pruned at iteration 10.\u001b[0m\n" + ] + }, { "name": "stdout", "output_type": "stream", "text": [ - "{'boosting_type': 'gbdt', 'objective': 'regression', 'metric': 'rmse', 'lambda_l1': 0.0025915189209523223, 'lambda_l2': 0.37941398551753863, 'num_leaves': 159, 'feature_fraction': 0.6942886423659413, 'bagging_fraction': 0.9888819332176355, 'bagging_freq': 6, 'min_child_samples': 5}\n" + "Number of finished trials: 200\n", + "Best trial:\n", + " Value: 0.5102536883069437\n", + " Params: \n", + " lambda_l1: 1.6446541932580473e-07\n", + " lambda_l2: 1.0734412329614426e-07\n", + " num_leaves: 193\n", + " feature_fraction: 0.8123395009389368\n", + " bagging_fraction: 0.9184649865458648\n", + " bagging_freq: 1\n", + " min_child_samples: 10\n", + "[LightGBM] [Warning] Auto-choosing col-wise multi-threading, the overhead of testing was 0.000777 seconds.\n", + "You can set `force_col_wise=true` to remove the overhead.\n", + "[LightGBM] [Info] Total Bins 1838\n", + "[LightGBM] [Info] Number of data points in the train set: 14861, number of used features: 8\n", + "[LightGBM] [Info] Start training from score 2.070396\n", + "Training until validation scores don't improve for 20 rounds\n", + "[50]\tvalid_0's rmse: 0.465875\n", + "Early stopping, best iteration is:\n", + "[76]\tvalid_0's rmse: 0.460117\n" ] - } - ], - "source": [ - "params = {'boosting_type': 'gbdt',\n", - " 'objective': 'regression',\n", - " \"metric\": 'rmse'}\n", - "for key, value in trial.params.items():\n", - " params[key]=value\n", - "print(params)" - ] - }, - { - "cell_type": "code", - "execution_count": 25, - "id": "f5534bab", - "metadata": {}, - "outputs": [ + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\u001b[32m[I 2021-09-07 16:28:36,810]\u001b[0m Trial 200 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:36,973]\u001b[0m Trial 201 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:37,136]\u001b[0m Trial 202 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:37,255]\u001b[0m Trial 203 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:37,402]\u001b[0m Trial 204 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:37,545]\u001b[0m Trial 205 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:37,742]\u001b[0m Trial 206 pruned. Trial was pruned at iteration 11.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:37,864]\u001b[0m Trial 207 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:38,005]\u001b[0m Trial 208 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:38,155]\u001b[0m Trial 209 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:38,307]\u001b[0m Trial 210 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:38,419]\u001b[0m Trial 211 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:38,537]\u001b[0m Trial 212 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:38,637]\u001b[0m Trial 213 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:38,786]\u001b[0m Trial 214 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:38,962]\u001b[0m Trial 215 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:39,091]\u001b[0m Trial 216 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:39,246]\u001b[0m Trial 217 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:39,368]\u001b[0m Trial 218 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:39,505]\u001b[0m Trial 219 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:39,637]\u001b[0m Trial 220 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:39,737]\u001b[0m Trial 221 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:39,847]\u001b[0m Trial 222 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:39,948]\u001b[0m Trial 223 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:40,046]\u001b[0m Trial 224 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:40,138]\u001b[0m Trial 225 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:40,235]\u001b[0m Trial 226 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:40,324]\u001b[0m Trial 227 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:40,398]\u001b[0m Trial 228 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:40,483]\u001b[0m Trial 229 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:40,621]\u001b[0m Trial 230 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:40,779]\u001b[0m Trial 231 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:40,860]\u001b[0m Trial 232 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:40,937]\u001b[0m Trial 233 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:41,077]\u001b[0m Trial 234 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:41,188]\u001b[0m Trial 235 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:41,312]\u001b[0m Trial 236 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:41,431]\u001b[0m Trial 237 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:41,533]\u001b[0m Trial 238 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:41,664]\u001b[0m Trial 239 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:41,790]\u001b[0m Trial 240 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:41,902]\u001b[0m Trial 241 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:42,009]\u001b[0m Trial 242 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:42,110]\u001b[0m Trial 243 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:42,233]\u001b[0m Trial 244 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:42,443]\u001b[0m Trial 245 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:42,587]\u001b[0m Trial 246 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:42,674]\u001b[0m Trial 247 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:42,779]\u001b[0m Trial 248 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:42,859]\u001b[0m Trial 249 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:42,958]\u001b[0m Trial 250 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:43,070]\u001b[0m Trial 251 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:43,257]\u001b[0m Trial 252 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:43,356]\u001b[0m Trial 253 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:43,462]\u001b[0m Trial 254 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:43,566]\u001b[0m Trial 255 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:43,627]\u001b[0m Trial 256 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:43,743]\u001b[0m Trial 257 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:43,856]\u001b[0m Trial 258 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:43,945]\u001b[0m Trial 259 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:44,059]\u001b[0m Trial 260 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:44,164]\u001b[0m Trial 261 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:44,279]\u001b[0m Trial 262 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:44,377]\u001b[0m Trial 263 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:44,478]\u001b[0m Trial 264 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:44,585]\u001b[0m Trial 265 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:44,688]\u001b[0m Trial 266 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:44,794]\u001b[0m Trial 267 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:44,914]\u001b[0m Trial 268 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:45,016]\u001b[0m Trial 269 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:45,111]\u001b[0m Trial 270 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:45,248]\u001b[0m Trial 271 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:45,424]\u001b[0m Trial 272 pruned. Trial was pruned at iteration 11.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:45,578]\u001b[0m Trial 273 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:45,809]\u001b[0m Trial 274 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:45,937]\u001b[0m Trial 275 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:46,068]\u001b[0m Trial 276 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:46,210]\u001b[0m Trial 277 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:46,357]\u001b[0m Trial 278 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:46,505]\u001b[0m Trial 279 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:46,662]\u001b[0m Trial 280 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:46,828]\u001b[0m Trial 281 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:46,927]\u001b[0m Trial 282 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:47,157]\u001b[0m Trial 283 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:47,394]\u001b[0m Trial 284 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:47,564]\u001b[0m Trial 285 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:47,701]\u001b[0m Trial 286 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:47,936]\u001b[0m Trial 287 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:48,091]\u001b[0m Trial 288 pruned. Trial was pruned at iteration 10.\u001b[0m\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\u001b[32m[I 2021-09-07 16:28:48,268]\u001b[0m Trial 289 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:48,470]\u001b[0m Trial 290 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:48,650]\u001b[0m Trial 291 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:48,799]\u001b[0m Trial 292 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:48,953]\u001b[0m Trial 293 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:49,112]\u001b[0m Trial 294 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:49,267]\u001b[0m Trial 295 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:49,454]\u001b[0m Trial 296 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:49,631]\u001b[0m Trial 297 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:49,744]\u001b[0m Trial 298 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:49,933]\u001b[0m Trial 299 pruned. Trial was pruned at iteration 10.\u001b[0m\n" + ] + }, { "name": "stdout", "output_type": "stream", "text": [ - "[LightGBM] [Warning] Auto-choosing row-wise multi-threading, the overhead of testing was 0.000585 seconds.\n", - "You can set `force_row_wise=true` to remove the overhead.\n", - "And if memory is not enough, you can set `force_col_wise=true`.\n", + "Number of finished trials: 300\n", + "Best trial:\n", + " Value: 0.5102536883069437\n", + " Params: \n", + " lambda_l1: 1.6446541932580473e-07\n", + " lambda_l2: 1.0734412329614426e-07\n", + " num_leaves: 193\n", + " feature_fraction: 0.8123395009389368\n", + " bagging_fraction: 0.9184649865458648\n", + " bagging_freq: 1\n", + " min_child_samples: 10\n", + "[LightGBM] [Warning] Auto-choosing col-wise multi-threading, the overhead of testing was 0.000797 seconds.\n", + "You can set `force_col_wise=true` to remove the overhead.\n", "[LightGBM] [Info] Total Bins 1837\n", - "[LightGBM] [Info] Number of data points in the train set: 13932, number of used features: 8\n", - "[LightGBM] [Info] Start training from score 2.072422\n", + "[LightGBM] [Info] Number of data points in the train set: 14861, number of used features: 8\n", + "[LightGBM] [Info] Start training from score 2.069154\n", + "Training until validation scores don't improve for 20 rounds\n", + "[50]\tvalid_0's rmse: 0.451573\n", + "Early stopping, best iteration is:\n", + "[63]\tvalid_0's rmse: 0.44707\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\u001b[32m[I 2021-09-07 16:28:51,004]\u001b[0m Trial 300 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:51,159]\u001b[0m Trial 301 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:51,315]\u001b[0m Trial 302 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:51,412]\u001b[0m Trial 303 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:51,543]\u001b[0m Trial 304 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:51,686]\u001b[0m Trial 305 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:51,842]\u001b[0m Trial 306 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:51,977]\u001b[0m Trial 307 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:52,105]\u001b[0m Trial 308 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:52,202]\u001b[0m Trial 309 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:52,341]\u001b[0m Trial 310 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:52,426]\u001b[0m Trial 311 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:52,522]\u001b[0m Trial 312 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:52,642]\u001b[0m Trial 313 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:52,755]\u001b[0m Trial 314 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:52,880]\u001b[0m Trial 315 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:52,980]\u001b[0m Trial 316 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:53,093]\u001b[0m Trial 317 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:53,202]\u001b[0m Trial 318 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:53,333]\u001b[0m Trial 319 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:53,488]\u001b[0m Trial 320 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:53,634]\u001b[0m Trial 321 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:53,749]\u001b[0m Trial 322 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:53,863]\u001b[0m Trial 323 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:53,983]\u001b[0m Trial 324 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:54,088]\u001b[0m Trial 325 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:54,198]\u001b[0m Trial 326 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:54,321]\u001b[0m Trial 327 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:54,417]\u001b[0m Trial 328 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:54,486]\u001b[0m Trial 329 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:54,630]\u001b[0m Trial 330 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:54,711]\u001b[0m Trial 331 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:54,838]\u001b[0m Trial 332 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:54,968]\u001b[0m Trial 333 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:55,077]\u001b[0m Trial 334 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:55,179]\u001b[0m Trial 335 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:55,319]\u001b[0m Trial 336 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:55,518]\u001b[0m Trial 337 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:55,609]\u001b[0m Trial 338 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:55,741]\u001b[0m Trial 339 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:55,903]\u001b[0m Trial 340 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:56,024]\u001b[0m Trial 341 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:56,140]\u001b[0m Trial 342 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:56,425]\u001b[0m Trial 343 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:56,577]\u001b[0m Trial 344 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:56,697]\u001b[0m Trial 345 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:56,814]\u001b[0m Trial 346 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:56,931]\u001b[0m Trial 347 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:57,047]\u001b[0m Trial 348 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:57,165]\u001b[0m Trial 349 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:57,318]\u001b[0m Trial 350 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:57,470]\u001b[0m Trial 351 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:57,640]\u001b[0m Trial 352 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:57,826]\u001b[0m Trial 353 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:57,991]\u001b[0m Trial 354 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:58,135]\u001b[0m Trial 355 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:58,300]\u001b[0m Trial 356 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:58,479]\u001b[0m Trial 357 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:58,638]\u001b[0m Trial 358 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:58,813]\u001b[0m Trial 359 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:59,027]\u001b[0m Trial 360 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:59,183]\u001b[0m Trial 361 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:59,346]\u001b[0m Trial 362 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:59,510]\u001b[0m Trial 363 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:59,627]\u001b[0m Trial 364 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:59,792]\u001b[0m Trial 365 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:28:59,954]\u001b[0m Trial 366 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:00,188]\u001b[0m Trial 367 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:00,393]\u001b[0m Trial 368 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:00,557]\u001b[0m Trial 369 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:00,707]\u001b[0m Trial 370 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:00,836]\u001b[0m Trial 371 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:01,018]\u001b[0m Trial 372 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:01,171]\u001b[0m Trial 373 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:01,355]\u001b[0m Trial 374 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:01,634]\u001b[0m Trial 375 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:01,803]\u001b[0m Trial 376 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:01,949]\u001b[0m Trial 377 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:02,110]\u001b[0m Trial 378 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:02,225]\u001b[0m Trial 379 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:02,409]\u001b[0m Trial 380 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:02,574]\u001b[0m Trial 381 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:02,733]\u001b[0m Trial 382 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:02,895]\u001b[0m Trial 383 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:03,059]\u001b[0m Trial 384 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:03,184]\u001b[0m Trial 385 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:03,408]\u001b[0m Trial 386 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:03,583]\u001b[0m Trial 387 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:03,729]\u001b[0m Trial 388 pruned. Trial was pruned at iteration 10.\u001b[0m\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\u001b[32m[I 2021-09-07 16:29:03,924]\u001b[0m Trial 389 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:04,120]\u001b[0m Trial 390 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:04,292]\u001b[0m Trial 391 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:04,406]\u001b[0m Trial 392 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:04,514]\u001b[0m Trial 393 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:04,671]\u001b[0m Trial 394 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:04,760]\u001b[0m Trial 395 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:04,879]\u001b[0m Trial 396 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:05,049]\u001b[0m Trial 397 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:05,188]\u001b[0m Trial 398 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:05,277]\u001b[0m Trial 399 pruned. Trial was pruned at iteration 10.\u001b[0m\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Number of finished trials: 400\n", + "Best trial:\n", + " Value: 0.5102536883069437\n", + " Params: \n", + " lambda_l1: 1.6446541932580473e-07\n", + " lambda_l2: 1.0734412329614426e-07\n", + " num_leaves: 193\n", + " feature_fraction: 0.8123395009389368\n", + " bagging_fraction: 0.9184649865458648\n", + " bagging_freq: 1\n", + " min_child_samples: 10\n", + "[LightGBM] [Warning] Auto-choosing col-wise multi-threading, the overhead of testing was 0.000424 seconds.\n", + "You can set `force_col_wise=true` to remove the overhead.\n", + "[LightGBM] [Info] Total Bins 1838\n", + "[LightGBM] [Info] Number of data points in the train set: 14861, number of used features: 8\n", + "[LightGBM] [Info] Start training from score 2.063366\n", + "Training until validation scores don't improve for 20 rounds\n", + "[50]\tvalid_0's rmse: 0.458105\n", + "[100]\tvalid_0's rmse: 0.449598\n", + "[150]\tvalid_0's rmse: 0.449092\n", + "Early stopping, best iteration is:\n", + "[138]\tvalid_0's rmse: 0.448682\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\u001b[32m[I 2021-09-07 16:29:06,435]\u001b[0m Trial 400 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:06,586]\u001b[0m Trial 401 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:06,756]\u001b[0m Trial 402 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:06,867]\u001b[0m Trial 403 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:06,968]\u001b[0m Trial 404 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:07,051]\u001b[0m Trial 405 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:07,151]\u001b[0m Trial 406 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:07,290]\u001b[0m Trial 407 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:07,400]\u001b[0m Trial 408 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:07,530]\u001b[0m Trial 409 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:07,649]\u001b[0m Trial 410 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:07,752]\u001b[0m Trial 411 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:07,880]\u001b[0m Trial 412 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:08,002]\u001b[0m Trial 413 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:08,140]\u001b[0m Trial 414 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:08,264]\u001b[0m Trial 415 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:08,434]\u001b[0m Trial 416 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:08,553]\u001b[0m Trial 417 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:08,677]\u001b[0m Trial 418 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:08,794]\u001b[0m Trial 419 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:08,902]\u001b[0m Trial 420 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:09,060]\u001b[0m Trial 421 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:09,181]\u001b[0m Trial 422 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:09,279]\u001b[0m Trial 423 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:09,517]\u001b[0m Trial 424 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:09,691]\u001b[0m Trial 425 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:09,858]\u001b[0m Trial 426 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:10,005]\u001b[0m Trial 427 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:10,197]\u001b[0m Trial 428 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:10,377]\u001b[0m Trial 429 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:10,567]\u001b[0m Trial 430 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:10,749]\u001b[0m Trial 431 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:10,908]\u001b[0m Trial 432 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:11,012]\u001b[0m Trial 433 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:11,264]\u001b[0m Trial 434 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:11,460]\u001b[0m Trial 435 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:11,636]\u001b[0m Trial 436 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:11,805]\u001b[0m Trial 437 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:11,985]\u001b[0m Trial 438 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:12,116]\u001b[0m Trial 439 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:12,257]\u001b[0m Trial 440 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:12,455]\u001b[0m Trial 441 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:12,626]\u001b[0m Trial 442 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:12,785]\u001b[0m Trial 443 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:12,916]\u001b[0m Trial 444 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:13,059]\u001b[0m Trial 445 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:13,202]\u001b[0m Trial 446 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:13,402]\u001b[0m Trial 447 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:13,571]\u001b[0m Trial 448 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:13,751]\u001b[0m Trial 449 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:13,967]\u001b[0m Trial 450 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:14,171]\u001b[0m Trial 451 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:14,354]\u001b[0m Trial 452 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:14,580]\u001b[0m Trial 453 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:14,763]\u001b[0m Trial 454 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:14,944]\u001b[0m Trial 455 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:15,124]\u001b[0m Trial 456 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:15,326]\u001b[0m Trial 457 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:15,508]\u001b[0m Trial 458 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:15,707]\u001b[0m Trial 459 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:15,875]\u001b[0m Trial 460 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:16,043]\u001b[0m Trial 461 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:16,205]\u001b[0m Trial 462 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:16,355]\u001b[0m Trial 463 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:16,499]\u001b[0m Trial 464 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:16,601]\u001b[0m Trial 465 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:16,757]\u001b[0m Trial 466 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:16,899]\u001b[0m Trial 467 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:17,040]\u001b[0m Trial 468 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:17,256]\u001b[0m Trial 469 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:17,440]\u001b[0m Trial 470 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:17,613]\u001b[0m Trial 471 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:17,735]\u001b[0m Trial 472 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:17,851]\u001b[0m Trial 473 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:17,940]\u001b[0m Trial 474 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:18,015]\u001b[0m Trial 475 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:18,140]\u001b[0m Trial 476 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:18,318]\u001b[0m Trial 477 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:18,485]\u001b[0m Trial 478 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:18,594]\u001b[0m Trial 479 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:18,767]\u001b[0m Trial 480 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:18,864]\u001b[0m Trial 481 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:19,000]\u001b[0m Trial 482 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:19,141]\u001b[0m Trial 483 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:19,241]\u001b[0m Trial 484 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:19,362]\u001b[0m Trial 485 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:19,512]\u001b[0m Trial 486 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:19,651]\u001b[0m Trial 487 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:19,777]\u001b[0m Trial 488 pruned. Trial was pruned at iteration 10.\u001b[0m\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\u001b[32m[I 2021-09-07 16:29:19,892]\u001b[0m Trial 489 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:20,026]\u001b[0m Trial 490 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:20,152]\u001b[0m Trial 491 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:20,291]\u001b[0m Trial 492 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:20,385]\u001b[0m Trial 493 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:20,508]\u001b[0m Trial 494 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:20,644]\u001b[0m Trial 495 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:20,766]\u001b[0m Trial 496 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:20,912]\u001b[0m Trial 497 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:21,093]\u001b[0m Trial 498 pruned. Trial was pruned at iteration 10.\u001b[0m\n", + "\u001b[32m[I 2021-09-07 16:29:21,228]\u001b[0m Trial 499 pruned. Trial was pruned at iteration 10.\u001b[0m\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Number of finished trials: 500\n", + "Best trial:\n", + " Value: 0.5102536883069437\n", + " Params: \n", + " lambda_l1: 1.6446541932580473e-07\n", + " lambda_l2: 1.0734412329614426e-07\n", + " num_leaves: 193\n", + " feature_fraction: 0.8123395009389368\n", + " bagging_fraction: 0.9184649865458648\n", + " bagging_freq: 1\n", + " min_child_samples: 10\n", + "[LightGBM] [Warning] Auto-choosing col-wise multi-threading, the overhead of testing was 0.001110 seconds.\n", + "You can set `force_col_wise=true` to remove the overhead.\n", + "[LightGBM] [Info] Total Bins 1838\n", + "[LightGBM] [Info] Number of data points in the train set: 14861, number of used features: 8\n", + "[LightGBM] [Info] Start training from score 2.066924\n", "Training until validation scores don't improve for 20 rounds\n", - "[50]\tvalid_0's rmse: 0.459286\n", - "[100]\tvalid_0's rmse: 0.449051\n", - "[150]\tvalid_0's rmse: 0.448356\n", + "[50]\tvalid_0's rmse: 0.444941\n", + "[100]\tvalid_0's rmse: 0.437147\n", "Early stopping, best iteration is:\n", - "[166]\tvalid_0's rmse: 0.447872\n" + "[90]\tvalid_0's rmse: 0.43678\n", + "Wall time: 1min 46s\n" ] } ], "source": [ - "dtrain = lgb.Dataset(train_x, label=train_y)\n", - "dvalid = lgb.Dataset(valid_x, label=valid_y)\n", - "model = lgb.train(params=params, train_set=dtrain,valid_sets=[dvalid],\n", - " verbose_eval=50,\n", - " early_stopping_rounds=20,\n", - " num_boost_round=5000)\n", - "predict = model.predict(test_x)" + "%%time\n", + "if __name__ == \"__main__\":\n", + " test_predict = np.zeros(shape=[test_x.shape[0], 5],dtype=float)\n", + " study = optuna.create_study(\n", + " pruner=optuna.pruners.MedianPruner(n_warmup_steps=10), direction=\"minimize\" # 指定是越小越好\n", + " )\n", + " for i, (trn_idx, val_idx) in enumerate(skf.split(train_x, train_y)):\n", + " study.optimize(lambda trial: objective(trial, train_x.iloc[trn_idx], train_y.iloc[trn_idx], \n", + " train_x.iloc[val_idx], train_y.iloc[val_idx]), n_trials=100)\n", + "\n", + " print(\"Number of finished trials: {}\".format(len(study.trials)))\n", + "\n", + " print(\"Best trial:\")\n", + " trial = study.best_trial\n", + "\n", + " print(\" Value: {}\".format(trial.value))\n", + "\n", + " print(\" Params: \")\n", + " for key, value in trial.params.items():\n", + " print(\" {}: {}\".format(key, value))\n", + " \n", + " params = {'boosting_type': 'gbdt',\n", + " 'objective': 'regression',\n", + " \"metric\": 'rmse'}\n", + " for key, value in trial.params.items():\n", + " params[key]=value\n", + " \n", + " dtrain = lgb.Dataset(train_x.iloc[trn_idx], label=train_y.iloc[trn_idx])\n", + " dvalid = lgb.Dataset(train_x.iloc[val_idx], label=train_y.iloc[val_idx])\n", + " model = lgb.train(params=params, train_set=dtrain,valid_sets=[dvalid],\n", + " verbose_eval=50,\n", + " early_stopping_rounds=20,\n", + " num_boost_round=5000)\n", + " test_predict[:,i] = model.predict(test_x)\n", + " predict = np.mean(test_predict,axis=1)" ] }, { "cell_type": "code", - "execution_count": 26, + "execution_count": 13, "id": "f28d82da", "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "0.44403838770137805" + "0.432341765333029" ] }, - "execution_count": 26, + "execution_count": 13, "metadata": {}, "output_type": "execute_result" } @@ -639,17 +1185,19 @@ }, { "cell_type": "markdown", - "id": "5e292bdb", + "id": "0c7ee3c5", "metadata": {}, "source": [ "### 回归任务的结论\n", - "不使用optuna的分数是0.4531666044672748,使用的分数是0.44403838770137805,提升了0.00912821676589675。" + "不使用optuna的分数是0.4346521330333544,使用的分数是0.432341765333029,提升了0.0023103677003254。\n", + "\n", + "作者测了很多次,基本在0.003-0.002之间,感兴趣的可以多跑几次。" ] }, { "cell_type": "code", "execution_count": null, - "id": "36384535", + "id": "82ea6cab", "metadata": {}, "outputs": [], "source": []