Mercurial > repos > stevecassidy > nltktools
comparison g_stemmer.py @ 0:e991d4e60c17 draft
planemo upload commit 0203cb3a0b40d9348674b2b098af805e2986abca-dirty
author | stevecassidy |
---|---|
date | Wed, 12 Oct 2016 22:17:53 -0400 |
parents | |
children | fb617586f4b2 |
comparison
equal
deleted
inserted
replaced
-1:000000000000 | 0:e991d4e60c17 |
---|---|
1 import sys | |
2 import os | |
3 import nltk | |
4 from nltk.stem import * | |
5 import argparse | |
6 | |
7 | |
8 def arguments(): | |
9 parser = argparse.ArgumentParser(description="Segments the text input into separate sentences") | |
10 parser.add_argument('--input', required=True, action="store", type=str, help="input text file") | |
11 parser.add_argument('--output', required=True, action="store", type=str, help="output file path") | |
12 parser.add_argument('--stemmer', required=False, action="store", type=str, help="output file path") | |
13 args = parser.parse_args() | |
14 return args | |
15 | |
16 def stem_file(in_file, out_file, stemmer_type): | |
17 unsegmented = unicode(open(in_file, 'r').read(), errors='ignore') | |
18 output = open(out_file, 'w') | |
19 sentences = nltk.sent_tokenize(unsegmented) | |
20 stemmer = get_stemmer(stemmer_type) | |
21 for sentence in sentences: | |
22 words = nltk.word_tokenize(sentence) | |
23 for word in words: | |
24 stemmed_word = stemmer.stem(word) | |
25 output.write(stemmed_word) | |
26 output.write('\n') | |
27 output.close() | |
28 | |
29 def get_stemmer(stemmer_type): | |
30 if stemmer_type == 'lancaster': | |
31 stemmer = LancasterStemmer() | |
32 elif stemmer_type == 'porter': | |
33 stemmer = PorterStemmer() | |
34 else: | |
35 stemmer = snowball.EnglishStemmer() | |
36 return stemmer | |
37 | |
38 if __name__ == '__main__': | |
39 args = arguments() | |
40 stem_file(args.input, args.output, args.stemmer) |