Permalink
Cannot retrieve contributors at this time
Name already in use
A tag already exists with the provided branch name. Many Git commands accept both tag and branch names, so creating this branch may cause unexpected behavior. Are you sure you want to create this branch?
LDA/Metadata.py
Go to fileThis commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
92 lines (72 sloc)
2.71 KB
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
""" | |
@Author: Arunav Mishra, Supratim Das | |
""" | |
import nltk.data | |
from nltk.tokenize import * | |
from nltk.stem.porter import * | |
from nltk.corpus import stopwords | |
from gensim import corpora | |
from collections import defaultdict | |
import glob | |
import pickle | |
import os | |
import io | |
class Metadata(object): | |
# Initialize | |
def __init__(self): | |
self.sent_detector = nltk.data.load('tokenizers/punkt/english.pickle') | |
self.stemmer = PorterStemmer() | |
self.tokenizer = RegexpTokenizer(r'\w+').tokenize | |
self.list_of_files = glob.glob('/Users/Supra/PycharmProjects/LDA/Input/67/*.txt') | |
self.stop_set = set(stopwords.words('english')) | |
# Run metadata calculation | |
def cal_metadata(self): | |
documents = [] | |
# Adding every document content as an element in the document list | |
for filename in self.list_of_files: | |
with io.open(filename, "r", encoding="utf-8-sig") as fp: | |
data = fp.read().replace('\n\n', '\n') | |
documents.extend(self.sent_detector.tokenize(data.strip())) | |
# Dumping the documents list as a text file into a directory | |
docMapping = '/Users/Supra/PycharmProjects/LDA/Input/OP/docIDMapping.txt' | |
directory = os.path.dirname(docMapping) | |
os.mkdir(directory) | |
try: | |
os.stat(directory) | |
except: | |
os.mkdir(directory) | |
output = open(docMapping, 'ab+') | |
pickle.dump(documents, output) | |
output.close() | |
# Creating a dictionary of word frequency list | |
texts = [[self.stemmer.stem(word) for word in self.tokenizer(document.lower()) if word not in self.stop_set] | |
for document in documents] | |
frequency = defaultdict(int) | |
for text in texts: | |
for token in text: | |
frequency[token] += 1 | |
# Filtering words with frequency greater than 1 | |
texts = [[token for token in text if frequency[token] > 0] | |
for text in texts] | |
dictionary_file = '/Users/Supra/PycharmProjects/LDA/Input/OP/words.dict' | |
dir = os.path.dirname(dictionary_file) | |
try: | |
os.stat(dir) | |
except: | |
os.mkdir(dir) | |
# Part 3: Creating a corpus.mm file | |
dictionary = corpora.Dictionary(texts) | |
dictionary.save(dictionary_file) | |
corpus_file = '/Users/Supra/PycharmProjects/LDA/Input/OP/corpus.mm' | |
dir = os.path.dirname(corpus_file) | |
try: | |
os.stat(dir) | |
except: | |
os.mkdir(dir) | |
corpus = [dictionary.doc2bow(text) for text in texts] | |
corpora.MmCorpus.serialize(corpus_file, corpus) | |
print("done") | |
def main(self): | |
self.run() | |
if __name__ == '__main__': | |
Metadata().main() |