Mercurial > repos > stevecassidy > nltktools
view g_collocation.py @ 2:a47980ef2b96 draft
planemo upload for repository https://github.com/Alveo/alveo-galaxy-tools commit b5b26e9118f2ad8af109d606746b39a5588f0511-dirty
author | stevecassidy |
---|---|
date | Wed, 01 Nov 2017 01:19:55 -0400 |
parents | fb617586f4b2 |
children |
line wrap: on
line source
import nltk from nltk.collocations import BigramCollocationFinder, BigramAssocMeasures from nltk.collocations import TrigramCollocationFinder, TrigramAssocMeasures import argparse nltk.download('punkt', quiet=True) def Parser(): the_parser = argparse.ArgumentParser(description="Parse the sentence using Chart Parser and a supplied grammar") the_parser.add_argument('--input', required=True, action="store", type=str, help="input text file") the_parser.add_argument('--output', required=True, action="store", type=str, help="output file path") the_parser.add_argument('--freq_filter', required=True, action="store", type=str, help="The minimum number of required occurrences in the corpus") the_parser.add_argument('--results', required=True, action="store", type=str, help="The maximum number of collocations to show in the results") the_parser.add_argument('--coll_type', required=True, action="store", type=str, help="Type of collocations to find") the_parser.add_argument('--pos', required=True, action="store", type=str, help="Data input is a set of POS tags") return the_parser.parse_args() def collocation(inp, outp, freq_filter, results, coll_type, pos): pos = bool(pos == 'true') with open(inp, 'r') as fd: i = fd.read() all_words = [] if pos: text = i.split(' ')[:-1] all_words = [x[0:x.index('/')] if x != '\n' else x for x in text] all_words = [x.strip(' ').strip('\n') for x in all_words] else: sents = nltk.sent_tokenize(i) for sent in sents: all_words += nltk.word_tokenize(sent) if coll_type == 'bigram': measures = BigramAssocMeasures() finder = BigramCollocationFinder.from_words(all_words) else: measures = TrigramAssocMeasures() finder = TrigramCollocationFinder.from_words(all_words) finder.apply_freq_filter(int(freq_filter)) # score the ngrams and get the first N colls = finder.score_ngrams(measures.pmi)[:int(results)] with open(outp, 'w') as output: for coll in colls: (a, b), score = coll output.write("%s\t%s\n" % (a, b)) if __name__ == '__main__': args = Parser() collocation(args.input, args.output, args.freq_filter, args.results, args.coll_type, args.pos)