.. Copyright (C) 2001-2018 NLTK Project .. For license information, see LICENSE.TXT >>> from __future__ import print_function >>> from nltk.tokenize import * Regression Tests: Treebank Tokenizer ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Some test strings. >>> s1 = "On a $50,000 mortgage of 30 years at 8 percent, the monthly payment would be $366.88." >>> word_tokenize(s1) ['On', 'a', '$', '50,000', 'mortgage', 'of', '30', 'years', 'at', '8', 'percent', ',', 'the', 'monthly', 'payment', 'would', 'be', '$', '366.88', '.'] >>> s2 = "\"We beat some pretty good teams to get here,\" Slocum said." >>> word_tokenize(s2) ['``', 'We', 'beat', 'some', 'pretty', 'good', 'teams', 'to', 'get', 'here', ',', "''", 'Slocum', 'said', '.'] >>> s3 = "Well, we couldn't have this predictable, cliche-ridden, \"Touched by an Angel\" (a show creator John Masius worked on) wanna-be if she didn't." >>> word_tokenize(s3) ['Well', ',', 'we', 'could', "n't", 'have', 'this', 'predictable', ',', 'cliche-ridden', ',', '``', 'Touched', 'by', 'an', 'Angel', "''", '(', 'a', 'show', 'creator', 'John', 'Masius', 'worked', 'on', ')', 'wanna-be', 'if', 'she', 'did', "n't", '.'] >>> s4 = "I cannot cannot work under these conditions!" >>> word_tokenize(s4) ['I', 'can', 'not', 'can', 'not', 'work', 'under', 'these', 'conditions', '!'] >>> s5 = "The company spent $30,000,000 last year." >>> word_tokenize(s5) ['The', 'company', 'spent', '$', '30,000,000', 'last', 'year', '.'] >>> s6 = "The company spent 40.75% of its income last year." >>> word_tokenize(s6) ['The', 'company', 'spent', '40.75', '%', 'of', 'its', 'income', 'last', 'year', '.'] >>> s7 = "He arrived at 3:00 pm." >>> word_tokenize(s7) ['He', 'arrived', 'at', '3:00', 'pm', '.'] >>> s8 = "I bought these items: books, pencils, and pens." >>> word_tokenize(s8) ['I', 'bought', 'these', 'items', ':', 'books', ',', 'pencils', ',', 'and', 'pens', '.'] >>> s9 = "Though there were 150, 100 of them were old." >>> word_tokenize(s9) ['Though', 'there', 'were', '150', ',', '100', 'of', 'them', 'were', 'old', '.'] >>> s10 = "There were 300,000, but that wasn't enough." >>> word_tokenize(s10) ['There', 'were', '300,000', ',', 'but', 'that', 'was', "n't", 'enough', '.'] Testing improvement made to the TreebankWordTokenizer >>> sx1 = u'\xabNow that I can do.\xbb' >>> expected = [u'\xab', u'Now', u'that', u'I', u'can', u'do', u'.', u'\xbb'] >>> word_tokenize(sx1) == expected True >>> sx2 = u'The unicode 201C and 201D \u201cLEFT(RIGHT) DOUBLE QUOTATION MARK\u201d is also OPEN_PUNCT and CLOSE_PUNCT.' >>> expected = [u'The', u'unicode', u'201C', u'and', u'201D', u'\u201c', u'LEFT', u'(', u'RIGHT', u')', u'DOUBLE', u'QUOTATION', u'MARK', u'\u201d', u'is', u'also', u'OPEN_PUNCT', u'and', u'CLOSE_PUNCT', u'.'] >>> word_tokenize(sx2) == expected True Sentence tokenization in word_tokenize: >>> s11 = "I called Dr. Jones. I called Dr. Jones." >>> word_tokenize(s11) ['I', 'called', 'Dr.', 'Jones', '.', 'I', 'called', 'Dr.', 'Jones', '.'] >>> s12 = ("Ich muss unbedingt daran denken, Mehl, usw. fur einen " ... "Kuchen einzukaufen. Ich muss.") >>> word_tokenize(s12) ['Ich', 'muss', 'unbedingt', 'daran', 'denken', ',', 'Mehl', ',', 'usw', '.', 'fur', 'einen', 'Kuchen', 'einzukaufen', '.', 'Ich', 'muss', '.'] >>> word_tokenize(s12, 'german') ['Ich', 'muss', 'unbedingt', 'daran', 'denken', ',', 'Mehl', ',', 'usw.', 'fur', 'einen', 'Kuchen', 'einzukaufen', '.', 'Ich', 'muss', '.'] Regression Tests: Regexp Tokenizer ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Some additional test strings. >>> s = ("Good muffins cost $3.88\nin New York. Please buy me\n" ... "two of them.\n\nThanks.") >>> s2 = ("Alas, it has not rained today. When, do you think, " ... "will it rain again?") >>> s3 = ("

Although this is not the case here, we must " ... "not relax our vigilance!

") >>> regexp_tokenize(s2, r'[,\.\?!"]\s*', gaps=False) [', ', '. ', ', ', ', ', '?'] >>> regexp_tokenize(s2, r'[,\.\?!"]\s*', gaps=True) ['Alas', 'it has not rained today', 'When', 'do you think', 'will it rain again'] Take care to avoid using capturing groups: >>> regexp_tokenize(s3, r'', gaps=False) ['

', '', '', '

'] >>> regexp_tokenize(s3, r'', gaps=False) ['

', '', '', '

'] >>> regexp_tokenize(s3, r'', gaps=True) ['Although this is ', 'not', ' the case here, we must not relax our vigilance!'] Named groups are capturing groups, and confuse the tokenizer: >>> regexp_tokenize(s3, r'b|p)>', gaps=False) ['p', 'b', 'b', 'p'] >>> regexp_tokenize(s3, r'b|p)>', gaps=True) ['p', 'Although this is ', 'b', 'not', 'b', ' the case here, we must not relax our vigilance!', 'p'] Make sure that nested groups don't confuse the tokenizer: >>> regexp_tokenize(s2, r'(?:h|r|l)a(?:s|(?:i|n0))', gaps=False) ['las', 'has', 'rai', 'rai'] >>> regexp_tokenize(s2, r'(?:h|r|l)a(?:s|(?:i|n0))', gaps=True) ['A', ', it ', ' not ', 'ned today. When, do you think, will it ', 'n again?'] Back-references require capturing groups, and these are not supported: >>> regexp_tokenize("aabbbcccc", r'(.)\1') ['a', 'b', 'c', 'c'] A simple sentence tokenizer '\.(\s+|$)' >>> regexp_tokenize(s, pattern=r'\.(?:\s+|$)', gaps=True) ['Good muffins cost $3.88\nin New York', 'Please buy me\ntwo of them', 'Thanks'] Regression Tests: TweetTokenizer ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ TweetTokenizer is a tokenizer specifically designed for micro-blogging tokenization tasks. >>> from nltk.tokenize import TweetTokenizer >>> tknzr = TweetTokenizer() >>> s0 = "This is a cooool #dummysmiley: :-) :-P <3 and some arrows < > -> <--" >>> tknzr.tokenize(s0) ['This', 'is', 'a', 'cooool', '#dummysmiley', ':', ':-)', ':-P', '<3', 'and', 'some', 'arrows', '<', '>', '->', '<--'] >>> s1 = "@Joyster2012 @CathStaincliffe Good for you, girl!! Best wishes :-)" >>> tknzr.tokenize(s1) ['@Joyster2012', '@CathStaincliffe', 'Good', 'for', 'you', ',', 'girl', '!', '!', 'Best', 'wishes', ':-)'] >>> s2 = "3Points for #DreamTeam Gooo BAILEY! :) #PBB737Gold @PBBabscbn" >>> tknzr.tokenize(s2) ['3Points', 'for', '#DreamTeam', 'Gooo', 'BAILEY', '!', ':)', '#PBB737Gold', '@PBBabscbn'] >>> s3 = "@Insanomania They do... Their mentality doesn't :(" >>> tknzr.tokenize(s3) ['@Insanomania', 'They', 'do', '...', 'Their', 'mentality', "doesn't", ':('] >>> s4 = "RT @facugambande: Ya por arrancar a grabar !!! #TirenTirenTiren vamoo !!" >>> tknzr.tokenize(s4) ['RT', '@facugambande', ':', 'Ya', 'por', 'arrancar', 'a', 'grabar', '!', '!', '!', '#TirenTirenTiren', 'vamoo', '!', '!'] >>> tknzr = TweetTokenizer(reduce_len=True) >>> s5 = "@crushinghes the summer holidays are great but I'm so bored already :(" >>> tknzr.tokenize(s5) ['@crushinghes', 'the', 'summer', 'holidays', 'are', 'great', 'but', "I'm", 'so', 'bored', 'already', ':('] It is possible to specify `strip_handles` and `reduce_len` parameters for a TweetTokenizer instance. Setting `strip_handles` to True, the tokenizer will remove Twitter handles (e.g. usernames). Setting `reduce_len` to True, repeated character sequences of length 3 or greater will be replaced with sequences of length 3. >>> tknzr = TweetTokenizer(strip_handles=True, reduce_len=True) >>> s6 = '@remy: This is waaaaayyyy too much for you!!!!!!' >>> tknzr.tokenize(s6) [':', 'This', 'is', 'waaayyy', 'too', 'much', 'for', 'you', '!', '!', '!'] >>> s7 = '@_willy65: No place for @chuck tonight. Sorry.' >>> tknzr.tokenize(s7) [':', 'No', 'place', 'for', 'tonight', '.', 'Sorry', '.'] >>> s8 = '@mar_tin is a great developer. Contact him at mar_tin@email.com.' >>> tknzr.tokenize(s8) ['is', 'a', 'great', 'developer', '.', 'Contact', 'him', 'at', 'mar_tin@email.com', '.'] The `preserve_case` parameter (default: True) allows to convert uppercase tokens to lowercase tokens. Emoticons are not affected: >>> tknzr = TweetTokenizer(preserve_case=False) >>> s9 = "@jrmy: I'm REALLY HAPPYYY about that! NICEEEE :D :P" >>> tknzr.tokenize(s9) ['@jrmy', ':', "i'm", 'really', 'happyyy', 'about', 'that', '!', 'niceeee', ':D', ':P'] It should not hang on long sequences of the same punctuation character. >>> tknzr = TweetTokenizer() >>> s10 = "Photo: Aujourd'hui sur http://t.co/0gebOFDUzn Projet... http://t.co/bKfIUbydz2.............................. http://fb.me/3b6uXpz0L" >>> tknzr.tokenize(s10) [u'Photo', u':', u"Aujourd'hui", u'sur', u'http://t.co/0gebOFDUzn', u'Projet', u'...', u'http://t.co/bKfIUbydz2', u'...', u'http://fb.me/3b6uXpz0L'] Regression Tests: PunktSentenceTokenizer ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The sentence splitter should remove whitespace following the sentence boundary. >>> pst = PunktSentenceTokenizer() >>> pst.tokenize('See Section 3). Or Section 2). ') ['See Section 3).', 'Or Section 2).'] >>> pst.tokenize('See Section 3.) Or Section 2.) ') ['See Section 3.)', 'Or Section 2.)'] >>> pst.tokenize('See Section 3.) Or Section 2.) ', realign_boundaries=False) ['See Section 3.', ') Or Section 2.', ')'] Two instances of PunktSentenceTokenizer should not share PunktParameters. >>> pst = PunktSentenceTokenizer() >>> pst2 = PunktSentenceTokenizer() >>> pst._params is pst2._params False Regression Tests: align_tokens ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Post-hoc alignment of tokens with a source string >>> from nltk.tokenize.util import align_tokens >>> list(align_tokens([''], "")) [(0, 0)] >>> list(align_tokens([''], " ")) [(0, 0)] >>> list(align_tokens([], "")) [] >>> list(align_tokens([], " ")) [] >>> list(align_tokens(['a'], "a")) [(0, 1)] >>> list(align_tokens(['abc', 'def'], "abcdef")) [(0, 3), (3, 6)] >>> list(align_tokens(['abc', 'def'], "abc def")) [(0, 3), (4, 7)] >>> list(align_tokens(['ab', 'cd'], "ab cd ef")) [(0, 2), (3, 5)] >>> list(align_tokens(['ab', 'cd', 'ef'], "ab cd ef")) [(0, 2), (3, 5), (6, 8)] >>> list(align_tokens(['ab', 'cd', 'efg'], "ab cd ef")) Traceback (most recent call last): .... ValueError: substring "efg" not found in "ab cd ef" >>> list(align_tokens(['ab', 'cd', 'ef', 'gh'], "ab cd ef")) Traceback (most recent call last): .... ValueError: substring "gh" not found in "ab cd ef" >>> list(align_tokens(['The', 'plane', ',', 'bound', 'for', 'St', 'Petersburg', ',', 'crashed', 'in', 'Egypt', "'s", 'Sinai', 'desert', 'just', '23', 'minutes', 'after', 'take-off', 'from', 'Sharm', 'el-Sheikh', 'on', 'Saturday', '.'], "The plane, bound for St Petersburg, crashed in Egypt's Sinai desert just 23 minutes after take-off from Sharm el-Sheikh on Saturday.")) [(0, 3), (4, 9), (9, 10), (11, 16), (17, 20), (21, 23), (24, 34), (34, 35), (36, 43), (44, 46), (47, 52), (52, 54), (55, 60), (61, 67), (68, 72), (73, 75), (76, 83), (84, 89), (90, 98), (99, 103), (104, 109), (110, 119), (120, 122), (123, 131), (131, 132)] Regression Tests: MWETokenizer ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Pickle an MWETokenizer >>> from nltk.tokenize import MWETokenizer >>> import pickle >>> tokenizer = MWETokenizer([('hors', "d'oeuvre")], separator='+') >>> p = pickle.dumps(tokenizer) >>> unpickeled = pickle.loads(p) >>> unpickeled.tokenize("An hors d'oeuvre tonight, sir?".split()) ['An', "hors+d'oeuvre", 'tonight,', 'sir?'] Regression Tests: TextTilingTokenizer ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ TextTilingTokneizer tokenizes text into coherent subtopic chunks based upon Hearst's TextTiling algorithm. >>> from nltk.tokenize import TextTilingTokenizer >>> from nltk.corpus import brown >>> tt = TextTilingTokenizer() >>> tt.tokenize(brown.raw()[0:1000]) ["\n\n\tThe/at Fulton/np-tl County/nn-tl Grand/jj-tl Jury/nn-tl said/vbd Friday/nr an/at investigation/nn of/in Atlanta's/np$ recent/jj primary/nn election/nn produced/vbd ``/`` no/at evidence/nn ''/'' that/cs any/dti irregularities/nns took/vbd place/nn ./.\n\n\n\tThe/at jury/nn further/rbr said/vbd in/in term-end/nn presentments/nns that/cs the/at City/nn-tl Executive/jj-tl Committee/nn-tl ,/, which/wdt had/hvd over-all/jj charge/nn of/in the/at election/nn ,/, ``/`` deserves/vbz the/at praise/nn and/cc thanks/nns of/in the/at City/nn-tl of/in-tl Atlanta/np-tl ''/'' for/in the/at manner/nn in/in which/wdt the/at election/nn was/bedz conducted/vbn ./.\n\n\n\tThe/at September-October/np term/nn jury/nn had/hvd been/ben charged/vbn by/in Fulton/np-tl Superior/jj-tl Court/nn-tl Judge/nn-tl Durwood/np Pye/np to/to investigate/vb reports/nns of/in possible/jj ``/`` irregularities/nns ''/'' in/in the/at hard-fought/jj primary/nn which/wdt was/bedz won/vbn by/in Mayor-nominate/nn-tl Ivan/np Allen/np Jr./"] Test that `ValueError` exceptions are raised when illegal arguments are used. >>> TextTilingTokenizer(similarity_method='foo').tokenize(brown.raw()[0:1000]) Traceback (most recent call last): ... ValueError: Similarity method foo not recognized >>> TextTilingTokenizer(smoothing_method='bar').tokenize(brown.raw()[0:1000]) Traceback (most recent call last): ... ValueError: Smoothing method bar not recognized