73 lines
2.4 KiB
Python
73 lines
2.4 KiB
Python
from nltk.util import pr
|
||
import pandas as pd
|
||
import numpy as np
|
||
from sklearn.feature_extraction.text import CountVectorizer
|
||
from sklearn.model_selection import train_test_split
|
||
from sklearn.tree import DecisionTreeClassifier
|
||
import re
|
||
import nltk
|
||
stemmer = nltk.SnowballStemmer("english")
|
||
from nltk.corpus import stopwords
|
||
import string
|
||
stopword=set(stopwords.words('english'))
|
||
data = pd.read_csv("datasets//twitter.csv")
|
||
|
||
#Step 2 I will add a new column to this dataset as labels which will contain the values as: Hate Speech O. ffensive Language No Hate and Offensive
|
||
|
||
data["labels"] = data["class"].map({0: "Hate Speech",
|
||
1: "Offensive Language",
|
||
2: "No Hate and Offensive"})
|
||
data = data[["tweet", "labels"]]
|
||
|
||
|
||
#Step 3 Now I will only select the tweet and labels columns for the rest of the task of training a hate speech detection model:
|
||
|
||
def clean(text):
|
||
text = str(text).lower()
|
||
text = re.sub('\[.*?\]', '', text)
|
||
text = re.sub('https?://\S+|www\.\S+', '', text)
|
||
text = re.sub('<.*?>+', '', text)
|
||
text = re.sub('[%s]' % re.escape(string.punctuation), '', text)
|
||
text = re.sub('\n', '', text)
|
||
text = re.sub('\w*\d\w*', '', text)
|
||
text = [word for word in text.split(' ') if word not in stopword]
|
||
text=" ".join(text)
|
||
text = [stemmer.stem(word) for word in text.split(' ')]
|
||
text=" ".join(text)
|
||
return text
|
||
data["tweet"] = data["tweet"].apply(clean)
|
||
|
||
#Step 5 Now I will create a function to clean the texts in the tweet column
|
||
|
||
|
||
x = np.array(data["tweet"])
|
||
y = np.array(data["labels"])
|
||
|
||
cv = CountVectorizer()
|
||
X = cv.fit_transform(x) # Fit the Data
|
||
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42)
|
||
|
||
clf = DecisionTreeClassifier()
|
||
clf.fit(X_train,y_train)
|
||
|
||
#Step 6 Now let’s split the dataset into training and test sets and train a machine learning model for the task of hate speech detection
|
||
|
||
x = np.array(data["tweet"])
|
||
y = np.array(data["labels"])
|
||
|
||
cv = CountVectorizer()
|
||
X = cv.fit_transform(x) # Fit the Data
|
||
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42)
|
||
|
||
clf = DecisionTreeClassifier()
|
||
clf.fit(X_train,y_train)
|
||
#print(data.head())
|
||
|
||
#Step 7 Now let’s test this machine learning model to see if it detects hate speech or not
|
||
|
||
sample = "Let's unite and kill all the people who are protesting against the government"
|
||
data = cv.transform([sample]).toarray()
|
||
print(clf.predict(data))
|
||
|
||
|