提问者:小点点

NLP-发现输入变量与不一致的数字样本


因此,我正在尝试训练模型以读取从TripAdvisor收集的示例数据集中的问候语,并且在尝试训练模型集时遇到以下错误。

这是数据集的链接-https://nextit-public.s3-us-west-2.amazonaws.com/rsics.html?fbclid=IwAR0CktLQtuPBaZNk03odCKdrjN3LjYl_ouuFBbWvyj-yQ-BvzJ0v_n9w9xo

这是我的代码;

import streamlit as st
import numpy as np
import pandas as pd

# NLP Pkgs
import matplotlib.pyplot as plt
import re
import nltk
from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import confusion_matrix
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier

import os


# Main Stuff

st.title("Greetings NLP - Presence")
st.subheader("Created using Streamlit - Harshil Parikh ")



# Loading the data into streamlit
@st.cache
def load_data(nrows):
    #data = pd.read_csv('/Users/harshilparikh/Desktop/INT/data/selections.csv', nrows=nrows)
    dataset = st.cache(pd.read_csv)('/Users/harshilparikh/Desktop/INT/data/selections.csv')
    return dataset


data_load_state = st.text('Loading data...')
dataset = load_data(1000)
data_load_state.text('Data loaded.')

#Displaying all data first
if st.checkbox('Show Raw data'):
    st.subheader('Raw Data')
st.write(dataset)


# GREETING TAB

st.subheader('Greetings')
greet = st.sidebar.multiselect("Select Greeting", dataset['Greeting'].unique())

select = dataset[(dataset['Greeting'].isin(greet))]
# SEPARATING ONLY TWO COLUMNS FROM THE DATA 
greet_select = select[['Greeting','Selected']]
select_check= st.checkbox("Display records with greeting")
if select_check:
    st.write(greet_select)


#Text- Preprocessing  - Range from 0 to 6758 total feedback
nltk.download('stopwords')
corpus = []
for i in range(0, 6758):
    review = re.sub('[^a-zA-Z]', '', str(dataset['Selected'][i]))
review = review.lower()
review = review.split()
ps = PorterStemmer()
review = [ps.stem(word) for word in review if not word in set(stopwords.words('english'))]
review = ''.join(review)
corpus.append(review)


#BAG OF WORDS
cv = CountVectorizer(max_features = 6758)
X = cv.fit_transform(corpus).toarray()
y = dataset.iloc[:, 1].values

st.write(X)
st.write(y)
st.write(cv)


#Training sets (800 values)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)
#X_train[0, 0:10] #First 10 rows of the first column of X_train.

# NLP - Naive Bayes algorithm 

classifier = GaussianNB()
classifier.fit(X_train, y_train)

我正在努力学习简单的NPL。任何帮助都将不胜感激。

我得到的错误

ValueError:发现样本数不一致的输入变量:[1,6759]Traceback: File"/Library/Fra体/Python.Framework/Versions/3.8/lib/python3.8/site-包/stream light/script_runner.py",第332行,在_run_scriptexec(code,module.cript)File"/User/harshilparikh/Desktop/INT/first.py",第90行,在X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.2,random_state=0)File"/Library/Framework/Python.框架/Versions/3.8/lib/python3.8/site-包/sklear/model_selection/_split.py",第2127行,在train_test_split数组中=indexable(*数组)File"/Library/Fra体/Python.框架/Versions/3.8/lib/python3.8/site-包/sklear/utils/validation.py",第292行,在可索引check_consistent_length(*结果)File"/,第255行,在check_consistent_length引发ValueError("发现输入变量的数量不一致"


共1个答案

匿名用户

调用函数train_test_split时发生错误,x和y需要具有相同的长度,但事实并非如此。我怀疑问题出在您的for循环上。不是将所有评论添加到您的语料库中,而是在离开for循环后添加最后一个评论。尝试一下:

for i in range(0, 6758):
    review = re.sub('[^a-zA-Z]', '', str(dataset['Selected'][i]))
    review = review.lower()
    review = review.split()
    ps = PorterStemmer()
    review = [ps.stem(word) for word in review if not word in set(stopwords.words('english'))]
    review = ''.join(review)
    corpus.append(review)

相关问题