diff --git a/lib/natural/index.js b/lib/natural/index.js index 8b7693647..921754896 100644 --- a/lib/natural/index.js +++ b/lib/natural/index.js @@ -47,6 +47,7 @@ exports.AggressiveTokenizer = require('./tokenizers/aggressive_tokenizer'); exports.RegexpTokenizer = require('./tokenizers/regexp_tokenizer').RegexpTokenizer; exports.WordTokenizer = require('./tokenizers/regexp_tokenizer').WordTokenizer; exports.WordPunctTokenizer = require('./tokenizers/regexp_tokenizer').WordPunctTokenizer; +exports.CaseTokenizer = require('./tokenizers/tokenizer_case').CaseTokenizer; exports.TreebankWordTokenizer = require('./tokenizers/treebank_word_tokenizer'); exports.TokenizerJa = require('./tokenizers/tokenizer_ja'); exports.BayesClassifier = require('./classifiers/bayes_classifier'); diff --git a/lib/natural/tokenizers/tokenizer_case.js b/lib/natural/tokenizers/tokenizer_case.js index 64a38d82f..d16e2cef2 100644 --- a/lib/natural/tokenizers/tokenizer_case.js +++ b/lib/natural/tokenizers/tokenizer_case.js @@ -22,12 +22,12 @@ var Tokenizer = require('./tokenizer'), util = require('util'), - TokenizerCase = function() { + CaseTokenizer = function() { }; -util.inherits(TokenizerCase, Tokenizer); +util.inherits(CaseTokenizer, Tokenizer); -TokenizerCase.prototype.attach = function() { +CaseTokenizer.prototype.attach = function() { var self = this; String.prototype.tokenize = function(preserveApostrophe) { @@ -35,7 +35,7 @@ TokenizerCase.prototype.attach = function() { } }; -TokenizerCase.prototype.tokenize = function(text, preserveApostrophe) { +CaseTokenizer.prototype.tokenize = function(text, preserveApostrophe) { var whitelist = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']; var lower = text.toLowerCase(); var upper = text.toUpperCase(); @@ -53,4 +53,4 @@ TokenizerCase.prototype.tokenize = function(text, preserveApostrophe) { return this.trim(result.replace(/\s+/g, ' ').split(' ')); }; -module.exports = TokenizerCase; \ No newline at end of file +exports.CaseTokenizer = CaseTokenizer; \ No newline at end of file