From dde105eefc754b1adab62c140bff71b0b05e8d4a Mon Sep 17 00:00:00 2001 From: pancodaffee Date: Thu, 14 Dec 2023 22:01:36 -0800 Subject: [PATCH] fix data file path issue --- 6-NLP/5-Hotel-Reviews-2/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/6-NLP/5-Hotel-Reviews-2/README.md b/6-NLP/5-Hotel-Reviews-2/README.md index 36c00bed..a36c79b1 100644 --- a/6-NLP/5-Hotel-Reviews-2/README.md +++ b/6-NLP/5-Hotel-Reviews-2/README.md @@ -251,7 +251,7 @@ from nltk.sentiment.vader import SentimentIntensityAnalyzer nltk.download('vader_lexicon') # Load the filtered hotel reviews from CSV -df = pd.read_csv('../../data/Hotel_Reviews_Filtered.csv') +df = pd.read_csv('../data/Hotel_Reviews_Filtered.csv') # You code will be added here @@ -275,7 +275,7 @@ Removing the stop words is also a fast operation, removing the stop words from 2 from nltk.corpus import stopwords # Load the hotel reviews from CSV -df = pd.read_csv("../../data/Hotel_Reviews_Filtered.csv") +df = pd.read_csv("../data/Hotel_Reviews_Filtered.csv") # Remove stop words - can be slow for a lot of text! # Ryan Han (ryanxjhan on Kaggle) has a great post measuring performance of different stop words removal approaches