diff --git a/core/modules/parsers/wikiparser/rules/rules.js b/core/modules/parsers/wikiparser/rules/rules.js new file mode 100644 index 000000000..b240eb6a5 --- /dev/null +++ b/core/modules/parsers/wikiparser/rules/rules.js @@ -0,0 +1,62 @@ +/*\ +title: $:/core/modules/parsers/wikiparser/rules/rules.js +type: application/javascript +module-type: wikirule + +Wiki pragma rule for rules specifications + +``` +\rules except ruleone ruletwo rulethree +\rules only ruleone ruletwo rulethree +``` + +\*/ +(function(){ + +/*jslint node: true, browser: true */ +/*global $tw: false */ +"use strict"; + +exports.name = "rules"; +exports.types = {pragma: true}; + +/* +Instantiate parse rule +*/ +exports.init = function(parser) { + this.parser = parser; + // Regexp to match + this.matchRegExp = /^\\rules[^\S\n]/mg; +}; + +/* +Parse the most recent match +*/ +exports.parse = function() { + // Move past the pragma invocation + this.parser.pos = this.matchRegExp.lastIndex; + // Parse whitespace delimited tokens terminated by a line break + var reMatch = /[^\S\n]*(\S+)|(\r?\n)/mg, + tokens = []; + reMatch.lastIndex = this.parser.pos; + var match = reMatch.exec(this.parser.source); + while(match && match.index === this.parser.pos) { + this.parser.pos = reMatch.lastIndex; + // Exit if we've got the line break + if(match[2]) { + break; + } + // Process the token + if(match[1]) { + tokens.push(match[1]); + } + // Match the next token + match = reMatch.exec(this.parser.source); + } + // Process the tokens + if(tokens.length > 0) { + this.parser.amendRules(tokens[0],tokens.slice(1)); + } +}; + +})(); diff --git a/core/modules/parsers/wikiparser/wikiparser.js b/core/modules/parsers/wikiparser/wikiparser.js index baaf33e05..eae4c6c4e 100644 --- a/core/modules/parsers/wikiparser/wikiparser.js +++ b/core/modules/parsers/wikiparser/wikiparser.js @@ -39,11 +39,11 @@ var WikiParser = function(vocabulary,type,text,options) { this.macroDefinitions = {}; // Hash map of macro definitions // Instantiate the pragma parse rules this.pragmaRules = this.instantiateRules(this.vocabulary.pragmaRuleClasses,"pragma",0); + // Instantiate the parser block and inline rules + this.blockRules = this.instantiateRules(this.vocabulary.blockRuleClasses,"block",0); + this.inlineRules = this.instantiateRules(this.vocabulary.inlineRuleClasses,"inline",0); // Parse any pragmas this.parsePragmas(); - // Instantiate the parser block and inline rules - this.blockRules = this.instantiateRules(this.vocabulary.blockRuleClasses,"block",this.pos); - this.inlineRules = this.instantiateRules(this.vocabulary.inlineRuleClasses,"inline",this.pos); // Parse the text into inline runs or blocks if(options.parseAsInline) { this.tree = this.parseInlineRun(); @@ -305,6 +305,40 @@ WikiParser.prototype.parseClasses = function() { return classNames; }; +/* +Amend the rules used by this instance of the parser + type: `only` keeps just the named rules, `except` keeps all but the named rules + names: array of rule names +*/ +WikiParser.prototype.amendRules = function(type,names) { + names = names || []; + // Define the filter function + var keepFilter; + if(type === "only") { + keepFilter = function(name) { + return names.indexOf(name) !== -1; + }; + } else if(type === "except") { + keepFilter = function(name) { + return names.indexOf(name) === -1; + }; + } else { + return; + } + // Define a function to process each of our rule arrays + var processRuleArray = function(ruleArray) { + for(var t=ruleArray.length-1; t>=0; t--) { + if(!keepFilter(ruleArray[t].rule.name)) { + ruleArray.splice(t,1); + } + } + }; + // Process each rule array + processRuleArray(this.pragmaRules); + processRuleArray(this.blockRules); + processRuleArray(this.inlineRules); +} + exports.WikiParser = WikiParser; })();