-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathdiff_thresholding.py
More file actions
135 lines (118 loc) · 5.32 KB
/
diff_thresholding.py
File metadata and controls
135 lines (118 loc) · 5.32 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
import pandas as pd
import io
import requests
import matplotlib.pyplot as plt
plt.style.use('fivethirtyeight')
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.utils import shuffle
from sklearn.metrics import roc_auc_score, accuracy_score, precision_score, recall_score, f1_score
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.cluster import KMeans
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.preprocessing import StandardScaler
from xgboost import XGBClassifier
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import cross_validate, train_test_split, GroupKFold
from sklearn.feature_selection import SelectFromModel
def get_dataset(key):
urls = {
"one-source":"https://figshare.com/ndownloader/files/26648885",
"hoax":"https://figshare.com/ndownloader/files/26648861"
}
response = requests.get(urls[key], allow_redirects=True)
df = pd.read_csv(io.StringIO(response.content.decode('utf-8')))
return df
def train_test_split_grouped(df, test_size=0.1):
pageids = np.unique(df.index)
train_pageids, test_pageids =train_test_split(pageids,test_size=test_size)
train = df.loc[train_pageids]
train = shuffle(train)
test = df.loc[test_pageids]
test = shuffle(test)
group = train.index.values
return train, test, group
def expand_scores(scores_dicts):
fit_time = {}; score_time = {}; accuracy = {}; precision = {}; recall = {}; f1 = {}; roc = {}
for modelname, scores in scores_dicts.items():
fit_time[modelname] = scores['fit_time'].mean()
score_time[modelname] = scores['score_time'].mean()
accuracy[modelname] = scores['test_accuracy'].mean()
precision[modelname] = scores['test_precision_macro'].mean()
recall[modelname] = scores['test_recall_macro'].mean()
f1[modelname] = scores['test_f1_weighted'].mean()
roc[modelname] = scores['test_roc_auc'].mean()
models_results = pd.DataFrame({
'Model' : list(scores_dicts.keys()),
'Fitting time': list(fit_time.values()),
'Scoring time': list(score_time.values()),
'Accuracy' : list(accuracy.values()),
'Precision' : list(precision.values()),
'Recall' : list(recall.values()),
'F1_score' : list(f1.values()),
'AUC_ROC' : list(roc.values()),
}, columns = ['Model', 'Fitting time', 'Scoring time', 'Accuracy', 'Precision', 'Recall', 'F1_score', 'AUC_ROC'])
return models_results
def build_model(key, DROP_COLS=None):
df = get_dataset(key)
df.set_index('page_id', inplace=True)
drop_colnames= ['revision_id.key', 'revision_id']
if DROP_COLS:
drop_colnames.extend(DROP_COLS)
df.drop(drop_colnames, axis=1, inplace=True)
# Encode any categorical variables
df[['has_template']] = df[['has_template']].astype('bool')
# Encode article quality score
aq_score_map = {'article_quality_score':
{"List":0, "Stub": 1, "Start": 2, "C": 3, "B": 4, "GA": 5, "A": 6, "FL": 7, "FA": 8}
}
df.replace(aq_score_map, inplace=True)
# Assign label
df.fillna(0, inplace=True)
y = df.pop('has_template')
X = df.copy()
pageID_group = df.index.values
# Standard scale our data
feature_names = list(X.columns) #Keep track of feature names
ss = StandardScaler()
ss.fit(X)
X = ss.transform(X)
# Score dicts to keep track of scores across models
scoring = ['accuracy', 'precision_macro', 'recall_macro' , 'f1_weighted', 'roc_auc']
model_scores = {}
XGB_model = XGBClassifier(learning_rate=0.01, n_estimators=25, max_depth=15,gamma=0.6, subsample=0.52,colsample_bytree=0.6,
reg_lambda=2, booster='dart', colsample_bylevel=0.6, colsample_bynode=0.5)
XGB_scores = cross_validate(XGB_model, X, y, scoring=scoring, cv=GroupKFold(), groups=pageID_group)
XGB_model.fit(X, y)
print(key)
print(XGB_scores)
model_scores['XGB'] = XGB_scores
XGB_model.get_booster().feature_names = feature_names
feature_importance = XGB_model.get_booster().get_score(importance_type='gain')
model_results = expand_scores(model_scores)
return model_results, feature_importance, XGB_model, X, y
def model_training():
results = {}
template_names = ["one-source", "hoax"]
for template_name in template_names:
scores, feature_importance, model, X, y = build_model(template_name)
results[(template_name, "scores")] = scores
results[(template_name, "feature_importance")] = feature_importance
results[(template_name, "model")] = model
results[(template_name, "X")] = X
results[(template_name, "y")] = y
return results
def plot_feature_importance(fimp, filename):
fimp_df = pd.DataFrame(list(fimp.items()), columns=["Feature", "Importance"])
fimp_df.sort_values("Importance", ascending=True, inplace=True)
plt.figure(figsize=(25,5))
plt.title("Feature importance ")
plt.xlabel("importance ")
plt.ylabel("features")
plt.barh(fimp_df['Feature'], fimp_df['Importance'])
plt.savefig(filename)