Skip to content
Permalink
master
Switch branches/tags

Name already in use

A tag already exists with the provided branch name. Many Git commands accept both tag and branch names, so creating this branch may cause unexpected behavior. Are you sure you want to create this branch?
Go to file
 
 
Cannot retrieve contributors at this time
"""
@Author: Arunav Mishra, Supratim Das
"""
import nltk.data
from nltk.tokenize import *
from nltk.stem.porter import *
from nltk.corpus import stopwords
from gensim import corpora
from collections import defaultdict
import glob
import pickle
import os
import io
class Metadata(object):
# Initialize
def __init__(self):
self.sent_detector = nltk.data.load('tokenizers/punkt/english.pickle')
self.stemmer = PorterStemmer()
self.tokenizer = RegexpTokenizer(r'\w+').tokenize
self.list_of_files = glob.glob('/Users/Supra/PycharmProjects/LDA/Input/67/*.txt')
self.stop_set = set(stopwords.words('english'))
# Run metadata calculation
def cal_metadata(self):
documents = []
# Adding every document content as an element in the document list
for filename in self.list_of_files:
with io.open(filename, "r", encoding="utf-8-sig") as fp:
data = fp.read().replace('\n\n', '\n')
documents.extend(self.sent_detector.tokenize(data.strip()))
# Dumping the documents list as a text file into a directory
docMapping = '/Users/Supra/PycharmProjects/LDA/Input/OP/docIDMapping.txt'
directory = os.path.dirname(docMapping)
os.mkdir(directory)
try:
os.stat(directory)
except:
os.mkdir(directory)
output = open(docMapping, 'ab+')
pickle.dump(documents, output)
output.close()
# Creating a dictionary of word frequency list
texts = [[self.stemmer.stem(word) for word in self.tokenizer(document.lower()) if word not in self.stop_set]
for document in documents]
frequency = defaultdict(int)
for text in texts:
for token in text:
frequency[token] += 1
# Filtering words with frequency greater than 1
texts = [[token for token in text if frequency[token] > 0]
for text in texts]
dictionary_file = '/Users/Supra/PycharmProjects/LDA/Input/OP/words.dict'
dir = os.path.dirname(dictionary_file)
try:
os.stat(dir)
except:
os.mkdir(dir)
# Part 3: Creating a corpus.mm file
dictionary = corpora.Dictionary(texts)
dictionary.save(dictionary_file)
corpus_file = '/Users/Supra/PycharmProjects/LDA/Input/OP/corpus.mm'
dir = os.path.dirname(corpus_file)
try:
os.stat(dir)
except:
os.mkdir(dir)
corpus = [dictionary.doc2bow(text) for text in texts]
corpora.MmCorpus.serialize(corpus_file, corpus)
print("done")
def main(self):
self.run()
if __name__ == '__main__':
Metadata().main()