{ "cells": [ { "cell_type": "markdown", "metadata": {}, "source": [ "# Construcción de clasificadores\n", "\n", "* *30 min* | Última modificación: Sept 22, 2020" ] }, { "cell_type": "code", "execution_count": 2, "metadata": {}, "outputs": [], "source": [ "##\n", "## los datos se encuentran disponibles directamente en scikit-learn\n", "##\n", "from sklearn.datasets import fetch_20newsgroups\n", "news = fetch_20newsgroups(subset='all')" ] }, { "cell_type": "code", "execution_count": 3, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "dict_keys(['data', 'filenames', 'target_names', 'target', 'DESCR'])" ] }, "execution_count": 3, "metadata": {}, "output_type": "execute_result" } ], "source": [ "##\n", "## campos de datosqu\n", "##\n", "news.keys()" ] }, { "cell_type": "code", "execution_count": 4, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "['alt.atheism',\n", " 'comp.graphics',\n", " 'comp.os.ms-windows.misc',\n", " 'comp.sys.ibm.pc.hardware',\n", " 'comp.sys.mac.hardware',\n", " 'comp.windows.x',\n", " 'misc.forsale',\n", " 'rec.autos',\n", " 'rec.motorcycles',\n", " 'rec.sport.baseball',\n", " 'rec.sport.hockey',\n", " 'sci.crypt',\n", " 'sci.electronics',\n", " 'sci.med',\n", " 'sci.space',\n", " 'soc.religion.christian',\n", " 'talk.politics.guns',\n", " 'talk.politics.mideast',\n", " 'talk.politics.misc',\n", " 'talk.religion.misc']" ] }, "execution_count": 4, "metadata": {}, "output_type": "execute_result" } ], "source": [ "##\n", "## Nombres de los grupos\n", "##\n", "news.target_names" ] }, { "cell_type": "code", "execution_count": 5, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "From: Mamatha Devineni Ratnam \n", "Subject: Pens fans reactions\n", "Organization: Post Office, Carnegie Mellon, Pittsburgh, PA\n", "Lines: 12\n", "NNTP-Posting-Host: po4.andrew.cmu.edu\n", "\n", "\n", "\n", "I am sure some bashers of Pens fans are pretty confused about the lack\n", "of any kind of posts about the recent Pens massacre of the Devils. Actually,\n", "I am bit puzzled too and a bit relieved. However, I am going to put an end\n", "to non-PIttsburghers' relief with a bit of praise for the Pens. Man, they\n", "are killing those Devils worse than I thought. Jagr just showed you why\n", "he is much better than his regular season stats. He is also a lot\n", "fo fun to watch in the playoffs. Bowman should let JAgr have a lot of\n", "fun in the next couple of games since the Pens are going to beat the pulp out of Jersey anyway. I was very disappointed not to see the Islanders lose the final\n", "regular season game. PENS RULE!!!\n", "\n", "\n" ] } ], "source": [ "##\n", "## A continuación se imprime un mensaje como ejemplo\n", "##\n", "print(news.data[0])" ] }, { "cell_type": "code", "execution_count": 6, "metadata": {}, "outputs": [], "source": [ "\n", "##\n", "## Se utiliza el 75% de los datos para entrenamiento\n", "## y el 25% restante para prueba\n", "##\n", "SPLIT_PERC = 0.75\n", "\n", "split_size = int(len(news.data)*SPLIT_PERC)\n", "\n", "X_train = news.data[:split_size]\n", "X_test = news.data[split_size:]\n", "y_train = news.target[:split_size]\n", "y_test = news.target[split_size:]" ] }, { "cell_type": "code", "execution_count": 7, "metadata": {}, "outputs": [], "source": [ "from sklearn.naive_bayes import MultinomialNB\n", "from sklearn.pipeline import Pipeline\n", "from sklearn.feature_extraction.text import TfidfVectorizer\n", "\n", "clf = Pipeline([\n", " ('vect', TfidfVectorizer()),\n", " ('clf', MultinomialNB())])" ] }, { "cell_type": "code", "execution_count": 8, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Mean score: 0.844 (+/-0.002)\n" ] } ], "source": [ "from sklearn.model_selection import cross_val_score, KFold\n", "from scipy.stats import sem\n", "import numpy as np\n", "\n", "K = 10\n", "\n", "kf = KFold(\n", " n_splits = K, # cantidad de grupos\n", " shuffle = True, # los ejemplos son asignados aleatoriamente a cada grupo\n", " random_state = 12345) # semilla del generador aleatorio\n", "\n", "cv = kf.get_n_splits(X_train) # particiones de los mensajes\n", "\n", "## computa el score promedio\n", "score = cross_val_score(\n", " clf, # clasificador\n", " X_train, #\n", " y_train,\n", " cv=cv)\n", "\n", "print (\"Mean score: {0:.3f} (+/-{1:.3f})\".format(np.mean(score), sem(score)))" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Para revizar: notas del libro Text Analytics with Python 2da edicion" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "CONTRACTION_MAP = {\n", " \"ain't\": \"is not\",\n", " \"aren't\": \"are not\",\n", " \"can't\": \"cannot\",\n", " \"can't've\": \"cannot have\",\n", " \"'cause\": \"because\",\n", " \"could've\": \"could have\",\n", " \"couldn't\": \"could not\",\n", " \"couldn't've\": \"could not have\",\n", " \"didn't\": \"did not\",\n", " \"doesn't\": \"does not\",\n", " \"don't\": \"do not\",\n", " \"hadn't\": \"had not\",\n", " \"hadn't've\": \"had not have\",\n", " \"hasn't\": \"has not\",\n", " \"haven't\": \"have not\",\n", " \"he'd\": \"he would\",\n", " \"he'd've\": \"he would have\",\n", " \"he'll\": \"he will\",\n", " \"he'll've\": \"he he will have\",\n", " \"he's\": \"he is\",\n", " \"how'd\": \"how did\",\n", " \"how'd'y\": \"how do you\",\n", " \"how'll\": \"how will\",\n", " \"how's\": \"how is\",\n", " \"I'd\": \"I would\",\n", " \"I'd've\": \"I would have\",\n", " \"I'll\": \"I will\",\n", " \"I'll've\": \"I will have\",\n", " \"I'm\": \"I am\",\n", " \"I've\": \"I have\",\n", " \"i'd\": \"i would\",\n", " \"i'd've\": \"i would have\",\n", " \"i'll\": \"i will\",\n", " \"i'll've\": \"i will have\",\n", " \"i'm\": \"i am\",\n", " \"i've\": \"i have\",\n", " \"isn't\": \"is not\",\n", " \"it'd\": \"it would\",\n", " \"it'd've\": \"it would have\",\n", " \"it'll\": \"it will\",\n", " \"it'll've\": \"it will have\",\n", " \"it's\": \"it is\",\n", " \"let's\": \"let us\",\n", " \"ma'am\": \"madam\",\n", " \"mayn't\": \"may not\",\n", " \"might've\": \"might have\",\n", " \"mightn't\": \"might not\",\n", " \"mightn't've\": \"might not have\",\n", " \"must've\": \"must have\",\n", " \"mustn't\": \"must not\",\n", " \"mustn't've\": \"must not have\",\n", " \"needn't\": \"need not\",\n", " \"needn't've\": \"need not have\",\n", " \"o'clock\": \"of the clock\",\n", " \"oughtn't\": \"ought not\",\n", " \"oughtn't've\": \"ought not have\",\n", " \"shan't\": \"shall not\",\n", " \"sha'n't\": \"shall not\",\n", " \"shan't've\": \"shall not have\",\n", " \"she'd\": \"she would\",\n", " \"she'd've\": \"she would have\",\n", " \"she'll\": \"she will\",\n", " \"she'll've\": \"she will have\",\n", " \"she's\": \"she is\",\n", " \"should've\": \"should have\",\n", " \"shouldn't\": \"should not\",\n", " \"shouldn't've\": \"should not have\",\n", " \"so've\": \"so have\",\n", " \"so's\": \"so as\",\n", " \"that'd\": \"that would\",\n", " \"that'd've\": \"that would have\",\n", " \"that's\": \"that is\",\n", " \"there'd\": \"there would\",\n", " \"there'd've\": \"there would have\",\n", " \"there's\": \"there is\",\n", " \"they'd\": \"they would\",\n", " \"they'd've\": \"they would have\",\n", " \"they'll\": \"they will\",\n", " \"they'll've\": \"they will have\",\n", " \"they're\": \"they are\",\n", " \"they've\": \"they have\",\n", " \"to've\": \"to have\",\n", " \"wasn't\": \"was not\",\n", " \"we'd\": \"we would\",\n", " \"we'd've\": \"we would have\",\n", " \"we'll\": \"we will\",\n", " \"we'll've\": \"we will have\",\n", " \"we're\": \"we are\",\n", " \"we've\": \"we have\",\n", " \"weren't\": \"were not\",\n", " \"what'll\": \"what will\",\n", " \"what'll've\": \"what will have\",\n", " \"what're\": \"what are\",\n", " \"what's\": \"what is\",\n", " \"what've\": \"what have\",\n", " \"when's\": \"when is\",\n", " \"when've\": \"when have\",\n", " \"where'd\": \"where did\",\n", " \"where's\": \"where is\",\n", " \"where've\": \"where have\",\n", " \"who'll\": \"who will\",\n", " \"who'll've\": \"who will have\",\n", " \"who's\": \"who is\",\n", " \"who've\": \"who have\",\n", " \"why's\": \"why is\",\n", " \"why've\": \"why have\",\n", " \"will've\": \"will have\",\n", " \"won't\": \"will not\",\n", " \"won't've\": \"will not have\",\n", " \"would've\": \"would have\",\n", " \"wouldn't\": \"would not\",\n", " \"wouldn't've\": \"would not have\",\n", " \"y'all\": \"you all\",\n", " \"y'all'd\": \"you all would\",\n", " \"y'all'd've\": \"you all would have\",\n", " \"y'all're\": \"you all are\",\n", " \"y'all've\": \"you all have\",\n", " \"you'd\": \"you would\",\n", " \"you'd've\": \"you would have\",\n", " \"you'll\": \"you will\",\n", " \"you'll've\": \"you will have\",\n", " \"you're\": \"you are\",\n", " \"you've\": \"you have\",\n", "}" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "import unicodedata\n", "import re\n", "from bs4 import BeautifulSoup\n", "import spacy\n", "from nltk.tokenize.toktok import ToktokTokenizer\n", "import nltk\n", "\n", "stopword_list = nltk.corpus.stopwords.words('english')\n", "stopword_list.remove('no')\n", "stopword_list.remove('not')\n", "\n", "nlp = spacy.load('en_core_web_sm', parse=True, tag=True, entity=True)\n", "tokenizer = ToktokTokenizer()\n", "\n", "def normalize_corpus(corpus):\n", " #\n", " def strip_html_tags(text):\n", " soup = BeautifulSoup(text, \"html.parser\")\n", " soup = [s.extract() for s in soup(['iframe', 'script'])] \n", " stripped_text = soup.get_text()\n", " stripped_text = re.sub(r'[\\r|\\n|\\r\\n]+', '\\n', stripped_text)\n", " return stripped_text\n", " #\n", " def remove_accented_chars(text):\n", " text = unicodedata.normalize('NFKD', text).encode('ascii', 'ignore').decode('utf-8', 'ignore')\n", " return text\n", " #\n", " def expand_contractions(text):\n", " contractions_pattern = re.compile('({})'.format('|'.join(CONTRACTION_MAP.keys())), flags=re.IGNORECASE|re.DOTALL)\n", " def expand_match(contraction):\n", " match = contraction.group(0)\n", " first_char = match[0]\n", " expanded_contraction = contraction_mapping.get(match) if contraction_mapping.get(match) else contraction_mapping.get(match.lower())\n", " expanded_contraction = first_char+expanded_contraction[1:]\n", " return expanded_contraction\n", " expanded_text = contractions_pattern.sub(expand_match, text)\n", " expanded_text = re.sub(\"'\", \"\", expanded_text)\n", " return expanded_text\n", " #\n", " def lemmatize_text(text): \n", " text = nlp(text)\n", " text = ' '.join([word.lemma_ if word.lemma_ != '-PRON-' else word.text for word in text])\n", " return text\n", " #\n", " def remove_stopwords(text, is_lower_case=False):\n", " tokens = tokenizer.tokenize(text)\n", " tokens = [token.strip() for token in tokens]\n", " if is_lower_case:\n", " filtered_tokens = [token for token in stopword_list]\n", " else:\n", " filtered_tokens = [token for token in text if token not in stopword_list]\n", " filtered_text = ' '.join(filtered_tokens) \n", " return filtered_text\n", " \n", " def remove_special_characters(text, remove_digits=False):\n", " pattern = r'[^a-zA-z0-9\\s]' if not remove_digits else r'[^a-zA-z\\s]' \n", " text = re.sub(pattern, '', text)\n", " return text\n", " \n", " normalized_corpus = []\n", " for doc in corpus:\n", " # doc = strip_html_tags(doc)\n", " doc = remove_accented_chars(doc)\n", " # doc = expand_contractions(doc)\n", " doc = doc.lower()\n", " doc = re.sub(r'[\\r|\\n|\\r\\n]+', ' ',doc)\n", " doc = lemmatize_text(doc)\n", " special_char_pattern = re.compile(r'([{.(-)!}])')\n", " doc = special_char_pattern.sub(\" \\\\1 \", doc)\n", " special_char_pattern = re.compile(r'([{.(-)!}])')\n", " doc = special_char_pattern.sub(\" \\\\1 \", doc)\n", " doc = re.sub(' +', ' ', doc)\n", " doc = remove_stopwords(doc, is_lower_case=True)\n", " doc = remove_special_characters(doc, remove_digits=True)\n", " normalized_corpus.append(doc)\n", " return normalized_corpus" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "data_df['Clean Article'] = normalize_corpus(corpus=data_df['Article'])" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "data_df = data_df[['Article', 'Clean Article', 'Target Label', 'Target Name']] \n", "data_df.head(10)" ] } ], "metadata": { "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.6.9" } }, "nbformat": 4, "nbformat_minor": 4 }