-
Notifications
You must be signed in to change notification settings - Fork 134
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
1 parent
86b8bd4
commit ce2b1ad
Showing
2 changed files
with
131 additions
and
72 deletions.
There are no files selected for viewing
107 changes: 107 additions & 0 deletions
107
.ipynb_checkpoints/clustering movie review-checkpoint.ipynb
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,107 @@ | ||
{ | ||
"cells": [ | ||
{ | ||
"cell_type": "code", | ||
"execution_count": null, | ||
"metadata": {}, | ||
"outputs": [], | ||
"source": [ | ||
"\n", | ||
"import pandas as pd\n", | ||
"from sklearn.feature_extraction.text import TfidfVectorizer\n", | ||
"from sklearn.cluster import KMeans\n", | ||
"from sklearn.metrics import silhouette_score\n", | ||
"from nltk.sentiment import SentimentIntensityAnalyzer\n", | ||
"\n", | ||
"# Load NLTK's sentiment analyzer\n", | ||
"sid = SentimentIntensityAnalyzer()\n", | ||
"\n", | ||
"data = pd.read_csv('Product listing.csv')\n", | ||
"\n", | ||
"# Data preprocessing\n", | ||
"def preprocess_text(text):\n", | ||
" # Convert text to lowercase\n", | ||
" text = text.lower()\n", | ||
" # Tokenization can be done using regex or libraries like NLTK or spaCy\n", | ||
" # Here, a simple split by space is used\n", | ||
" tokens = text.split()\n", | ||
" # Remove stopwords (you may need to download the stopwords list for your language)\n", | ||
" stopwords = set(['the', 'and', 'is', 'in', 'to', 'it', 'this', 'of', 'for', 'with', 'as'])\n", | ||
" tokens = [token for token in tokens if token not in stopwords]\n", | ||
" return ' '.join(tokens)\n", | ||
"\n", | ||
"data['clean_text'] = data['product'].apply(preprocess_text)\n", | ||
"\n", | ||
"# TF-IDF vectorization\n", | ||
"tfidf_vectorizer = TfidfVectorizer(max_features=1000) # You can adjust max_features as needed\n", | ||
"tfidf_matrix = tfidf_vectorizer.fit_transform(data['clean_text'])\n", | ||
"\n", | ||
"# Clustering with K-means\n", | ||
"k = 5 # Number of clusters (you can adjust this)\n", | ||
"kmeans = KMeans(n_clusters=k, random_state=42)\n", | ||
"kmeans.fit(tfidf_matrix)\n", | ||
"\n", | ||
"# Assign cluster labels to each review\n", | ||
"data['cluster_label'] = kmeans.labels_\n", | ||
"\n", | ||
"# Sentiment Analysis\n", | ||
"def get_sentiment(text):\n", | ||
" # NLTK's sentiment analyzer\n", | ||
" sentiment_scores = sid.polarity_scores(text)\n", | ||
" # Classify sentiment based on compound score\n", | ||
" if sentiment_scores['compound'] >= 0.05:\n", | ||
" return 'Positive'\n", | ||
" elif sentiment_scores['compound'] <= -0.05:\n", | ||
" return 'Negative'\n", | ||
" else:\n", | ||
" return 'Neutral'\n", | ||
" \n", | ||
"data['sentiment'] = data['clean_text'].apply(get_sentiment)\n", | ||
"\n", | ||
"\n", | ||
"# Evaluate clustering using silhouette score\n", | ||
"silhouette_avg = silhouette_score(tfidf_matrix, kmeans.labels_)\n", | ||
"print(f\"Silhouette Score: {silhouette_avg}\")\n", | ||
"\n", | ||
"# Display some reviews from each cluster\n", | ||
"for cluster_id in range(k):\n", | ||
" cluster_samples = data[data['cluster_label'] == cluster_id].sample(5) # Displaying 5 samples per cluster\n", | ||
" print(f\"\\nCluster {cluster_id}:\")\n", | ||
" for index, row in cluster_samples.iterrows():\n", | ||
" print(row['product'])\n", | ||
" print(\"Sentiment:\", row['sentiment'])\n", | ||
" print('-' * 50)\n", | ||
"\n", | ||
"# You can further analyze the clusters and refine the process as needed\n" | ||
] | ||
}, | ||
{ | ||
"cell_type": "code", | ||
"execution_count": null, | ||
"metadata": {}, | ||
"outputs": [], | ||
"source": [] | ||
} | ||
], | ||
"metadata": { | ||
"kernelspec": { | ||
"display_name": "Python 3 (ipykernel)", | ||
"language": "python", | ||
"name": "python3" | ||
}, | ||
"language_info": { | ||
"codemirror_mode": { | ||
"name": "ipython", | ||
"version": 3 | ||
}, | ||
"file_extension": ".py", | ||
"mimetype": "text/x-python", | ||
"name": "python", | ||
"nbconvert_exporter": "python", | ||
"pygments_lexer": "ipython3", | ||
"version": "3.11.4" | ||
} | ||
}, | ||
"nbformat": 4, | ||
"nbformat_minor": 2 | ||
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters