#导入需用的R包library(Rwordseg)require(rJava)library(tm)library(slam)library(topicmodels)library(RColorBrewer)library(wordcloud)library(igraph)library(grDevices)loadDict()#更改为工作路径,并存放数据在此setwd("C:\\Users\\nununu\\Desktop\\111")#路径中不能存在中文字符txt<-("",colClasses="character")#去除字母、数字等data1<-gsub(pattern="http:[a-zA-Z\\/\\.0-9]+","",txt$sentence)data1=gsub("[a-z0-9A-Z_]","",data1)data1<-gsub("\n","",data1)data1<-gsub("","",data1)head(data1)length(data1)#添加词语使分词更准确insertWords(c("限牌","中签","胡冰蜀黍","买车","中签率","买车","拉风","杯具","吐槽","放牌","谈资","摇中","摇号","公信力","辟谣","裸奔","限行","车源","攒钱","指点","高冷","央视","限号","囤牌","竞价","上牌","猛戳","东问西问","错峰","治堵","限堵"))#进行分词处理poem_words<-lapply(1:length(data1),function(i)(data1[i],nature=TRUE))#去除停止词data_stw<-readLines("C:\\Users\\nununu\\Desktop\\111\\")removeStopWords=function(x,words){ret=character(0)index<-1it_max<-length(x)while(index<=it_max){if(length(words[words==x[index]])<1)ret<-c(ret,x[index])index<-index+1}ret}<-lapply(poem_words,removeStopWords,data_stw)#组成语料库格式wordcorpus<-Corpus(VectorSource())length(wordcorpus)#将数据通过tm这个R包转化为文本-词矩阵(DocumentTermMatrix)(locale="Chinese")dtm1<-DocumentTermMatrix(wordcorpus,control=list(wordLengths=c(2,2),bounds=list(g
挖掘高频词r语言程序 来自淘豆网www.taodocs.com转载请标明出处.