-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathDecisionTree.py
More file actions
89 lines (71 loc) · 3.11 KB
/
DecisionTree.py
File metadata and controls
89 lines (71 loc) · 3.11 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
import random
import pandas as pd
import numpy as np
import matplotlib as nlp
import nltk
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import confusion_matrix, classification_report
from nltk.stem import WordNetLemmatizer
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
import warnings
warnings.filterwarnings('ignore')
from sklearn.feature_extraction.text import CountVectorizer
from nltk.tokenize import RegexpTokenizer
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import ComplementNB
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import cross_val_score
from sklearn import metrics
from math import *
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix, classification_report
from sklearn import tree
from sklearn.metrics import accuracy_score
from utils import clean_tweets, handle_emojis, evaluate_with_three_labels, clean_dataset, evaluate_with_two_labels
#Stop Words: A stop word is a commonly used word (such as “the”, “a”, “an”, “in”)
#that a search engine has been programmed to ignore,
#both when indexing entries for searching and when retrieving them as the result of a search query.
stopword = set(stopwords.words('english'))
tweets = clean_dataset()
#tweets.drop(tweets[tweets.sentiment =='2'].index, inplace=True)
X = tweets.cleaned_tweets
y = tweets.sentiment
random_state = random.randint(10000,100000)
X_train, X_test, y_train, y_test = train_test_split(X,y,test_size = 0.05, random_state = 100)
vectoriser = TfidfVectorizer(ngram_range=(1,2), max_features=500000)
vectoriser.fit(X_train)
X_train = vectoriser.transform(X_train)
X_test = vectoriser.transform(X_test)
def decision_trees():
#0.632768361581921
#0.655367231638418
clf = tree.DecisionTreeClassifier(max_depth=100, max_leaf_nodes= 120, min_samples_split=15, min_samples_leaf=3)
clf.fit(X_train,y_train)
y_pred=clf.predict(X_test)
acc=accuracy_score(y_test,y_pred)
#print("acc:", acc)
return clf,acc
def fig_visualization_dt():
#tweets.drop(tweets[tweets.sentiment =='2'].index, inplace=True)
return evaluate_with_three_labels(decision_trees()[0], X_test, y_test)
evaluate_with_three_labels(decision_trees()[0], X_test, y_test)
def fig_visualization_binary():
tweets = clean_dataset()
tweets.drop(tweets[tweets.sentiment =='2'].index, inplace=True)
X = tweets.cleaned_tweets
y = tweets.sentiment
random_state = random.randint(10000,100000)
X_train, X_test, y_train, y_test = train_test_split(X,y,test_size = 0.05, random_state = 100)
vectoriser = TfidfVectorizer(ngram_range=(1,2), max_features=500000)
vectoriser.fit(X_train)
X_train = vectoriser.transform(X_train)
X_test = vectoriser.transform(X_test)
clf = tree.DecisionTreeClassifier(max_depth=100, max_leaf_nodes= 120, min_samples_split=15, min_samples_leaf=3)
clf.fit(X_train,y_train)
y_pred=clf.predict(X_test)
acc=accuracy_score(y_test,y_pred)
#print("acc:", acc)
return evaluate_with_two_labels(clf, X_test, y_test), acc