mirror of
https://github.com/tenrok/BBob.git
synced 2026-05-15 11:59:37 +03:00
fix(parser): fix issue with escaping backslashes when enableEscapeTags is set (#20)
there is a bug in the lexer where when enableEscapeTags is set, backslashes are not always escaped (ie. \\[b] is treated as an escaped tag, rather than a literal backslash, and then a tag).
This commit is contained in:
committed by
Nikolay Kostyurin
parent
3b46fbe23c
commit
8a9e9304c1
@@ -53,7 +53,7 @@ function createLexer(buffer, options = {}) {
|
||||
const RESERVED_CHARS = [closeTag, openTag, QUOTEMARK, BACKSLASH, SPACE, TAB, EQ, N, EM];
|
||||
const NOT_CHAR_TOKENS = [
|
||||
...(options.enableEscapeTags ? [BACKSLASH] : []),
|
||||
openTag, SPACE, TAB, N, BACKSLASH,
|
||||
openTag, SPACE, TAB, N,
|
||||
];
|
||||
const WHITESPACES = [SPACE, TAB];
|
||||
const SPECIAL_CHARS = [EQ, SPACE, TAB];
|
||||
@@ -163,6 +163,10 @@ function createLexer(buffer, options = {}) {
|
||||
bufferGrabber.skip(); // skip the \ without emitting anything
|
||||
bufferGrabber.skip(); // skip past the [ or ] as well
|
||||
emitToken(createToken(TYPE_WORD, nextChar, row, col));
|
||||
} else if (options.enableEscapeTags && currChar === BACKSLASH && nextChar === BACKSLASH) {
|
||||
bufferGrabber.skip(); // skip the first \ without emitting anything
|
||||
bufferGrabber.skip(); // skip past the second \ and emit it
|
||||
emitToken(createToken(TYPE_WORD, nextChar, row, col));
|
||||
} else if (currChar === openTag) {
|
||||
bufferGrabber.skip(); // skip openTag
|
||||
|
||||
|
||||
@@ -305,6 +305,29 @@ describe('lexer', () => {
|
||||
expectOutput(output, tokens);
|
||||
});
|
||||
|
||||
test('escaped tag and escaped backslash', () => {
|
||||
const tokenizeEscape = input => (createLexer(input, {
|
||||
enableEscapeTags: true
|
||||
}).tokenize());
|
||||
const input = '\\\\\\[b\\\\\\]test\\\\\\[/b\\\\\\]';
|
||||
const tokens = tokenizeEscape(input);
|
||||
const output = [
|
||||
[TYPE.WORD, '\\', '0', '0'],
|
||||
[TYPE.WORD, '[', '0', '0'],
|
||||
[TYPE.WORD, 'b', '0', '0'],
|
||||
[TYPE.WORD, '\\', '0', '0'],
|
||||
[TYPE.WORD, ']', '0', '0'],
|
||||
[TYPE.WORD, 'test', '0', '0'],
|
||||
[TYPE.WORD, '\\', '0', '0'],
|
||||
[TYPE.WORD, '[', '0', '0'],
|
||||
[TYPE.WORD, '/b', '0', '0'],
|
||||
[TYPE.WORD, '\\', '0', '0'],
|
||||
[TYPE.WORD, ']', '0', '0'],
|
||||
];
|
||||
|
||||
expectOutput(output, tokens);
|
||||
});
|
||||
|
||||
describe('html', () => {
|
||||
const tokenizeHTML = input => createLexer(input, { openTag: '<', closeTag: '>' }).tokenize();
|
||||
|
||||
|
||||
@@ -184,7 +184,7 @@ describe('Parser', () => {
|
||||
]);
|
||||
});
|
||||
|
||||
test('parse escaped tags tags', () => {
|
||||
test('parse escaped tags', () => {
|
||||
const ast = parse('\\[b\\]test\\[/b\\]', {
|
||||
enableEscapeTags: true
|
||||
});
|
||||
@@ -199,5 +199,25 @@ describe('Parser', () => {
|
||||
']',
|
||||
]);
|
||||
});
|
||||
|
||||
test('parse escaped tags and escaped backslash', () => {
|
||||
const ast = parse('\\\\\\[b\\\\\\]test\\\\\\[/b\\\\\\]', {
|
||||
enableEscapeTags: true
|
||||
});
|
||||
|
||||
expectOutput(ast, [
|
||||
'\\',
|
||||
'[',
|
||||
'b',
|
||||
'\\',
|
||||
']',
|
||||
'test',
|
||||
'\\',
|
||||
'[',
|
||||
'/b',
|
||||
'\\',
|
||||
']',
|
||||
]);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
Reference in New Issue
Block a user