mirror of
https://github.com/tenrok/BBob.git
synced 2026-05-15 11:59:37 +03:00
fix(204): parse blanks and quotes in unique attribute tags (#281)
* feat: add test * chore: update README.md [skip ci] * feat(285): add width and height and alt for img tag (#286) feat: add width and height and alt for img tag * chore: fast-peas-brush.md * chore: working * chore: lexer tests * fix: single atttr tag * fix: single atttr tag
This commit is contained in:
@@ -0,0 +1,29 @@
|
||||
---
|
||||
"@bbob/parser": patch
|
||||
"@bbob/cli": patch
|
||||
"@bbob/core": patch
|
||||
"@bbob/html": patch
|
||||
"@bbob/plugin-helper": patch
|
||||
"@bbob/preset": patch
|
||||
"@bbob/preset-html5": patch
|
||||
"@bbob/preset-react": patch
|
||||
"@bbob/preset-vue": patch
|
||||
"@bbob/react": patch
|
||||
"@bbob/types": patch
|
||||
"@bbob/vue2": patch
|
||||
"@bbob/vue3": patch
|
||||
---
|
||||
|
||||
Now `@bbob/parser` correctly parses tags like
|
||||
|
||||
```html
|
||||
[url=javascript:alert('XSS ME');]TEXT[/url]
|
||||
```
|
||||
to
|
||||
```json
|
||||
|
||||
|
||||
```
|
||||
|
||||
|
||||
Fixes #300
|
||||
+2
-1
@@ -17,7 +17,8 @@
|
||||
"types": "nx run-many --target=types",
|
||||
"release": "npm run build && npm run types && changeset publish",
|
||||
"lint": "nx run-many --target=lint",
|
||||
"cleanup": "node scripts/cleanup"
|
||||
"cleanup": "node scripts/cleanup",
|
||||
"changeset": "npx changeset"
|
||||
},
|
||||
"author": {
|
||||
"name": "Nikolay Kostyurin <jilizart@gmail.com>"
|
||||
|
||||
@@ -144,7 +144,7 @@ export function createLexer(buffer: string, options: LexerOptions = {}): LexerTo
|
||||
const isNextEQ = nextChar === EQ;
|
||||
const isWS = isWhiteSpace(char);
|
||||
// const isPrevWS = isWhiteSpace(prevChar);
|
||||
const isNextWS = nextChar && isWhiteSpace(nextChar);
|
||||
const isNextWS = !!nextChar && isWhiteSpace(nextChar);
|
||||
|
||||
if (stateSpecial && isSpecialChar(char)) {
|
||||
return true;
|
||||
@@ -205,6 +205,7 @@ export function createLexer(buffer: string, options: LexerOptions = {}): LexerTo
|
||||
function stateTag() {
|
||||
const currChar = chars.getCurr();
|
||||
const nextChar = chars.getNext();
|
||||
const isNextCharReserved = Boolean(nextChar && isCharReserved(nextChar))
|
||||
|
||||
chars.skip(); // skip openTag
|
||||
|
||||
@@ -212,7 +213,6 @@ export function createLexer(buffer: string, options: LexerOptions = {}): LexerTo
|
||||
const substr = chars.substrUntilChar(closeTag);
|
||||
|
||||
const hasInvalidChars = substr.length === 0 || substr.indexOf(openTag) >= 0;
|
||||
const isNextCharReserved = nextChar && isCharReserved(nextChar)
|
||||
const isLastChar = chars.isLast()
|
||||
const hasSpace = substr.indexOf(SPACE) >= 0;
|
||||
const isSpaceRestricted = hasSpace && options.whitespaceInTags === false;
|
||||
@@ -228,6 +228,7 @@ export function createLexer(buffer: string, options: LexerOptions = {}): LexerTo
|
||||
// [/myTag]
|
||||
const isClosingTag = substr[0] === SLASH;
|
||||
|
||||
// [url] or [/url]
|
||||
if (isNoAttrsInTag || isClosingTag) {
|
||||
const startPos = chars.getPos() - 1;
|
||||
const name = chars.grabWhile((char) => char !== closeTag);
|
||||
@@ -250,12 +251,16 @@ export function createLexer(buffer: string, options: LexerOptions = {}): LexerTo
|
||||
const silent = true;
|
||||
const tagStr = chars.grabWhile((char) => char !== closeTag, silent);
|
||||
const tagGrabber = createCharGrabber(tagStr, { onSkip });
|
||||
const hasSpace = tagGrabber.includes(SPACE);
|
||||
const eqParts = tagStr.split(EQ);
|
||||
const tagName = eqParts[0];
|
||||
const isEndTag = tagName[0] === SLASH;
|
||||
const isSingleAttrTag = tagName.indexOf(SPACE) === -1;
|
||||
const isSingleValueTag = !isEndTag && isSingleAttrTag
|
||||
|
||||
tagMode = TAG_STATE_NAME;
|
||||
|
||||
while (tagGrabber.hasNext()) {
|
||||
tagMode = nextTagState(tagGrabber, !hasSpace, startPos);
|
||||
tagMode = nextTagState(tagGrabber, isSingleValueTag, startPos);
|
||||
}
|
||||
|
||||
chars.skip(); // skip closeTag
|
||||
|
||||
@@ -1,6 +1,18 @@
|
||||
import { TYPE_ID, VALUE_ID, TYPE_WORD, TYPE_TAG, TYPE_ATTR_NAME, TYPE_ATTR_VALUE, TYPE_SPACE, TYPE_NEW_LINE, LINE_ID, COLUMN_ID, START_POS_ID, END_POS_ID } from '../src/Token';
|
||||
import { createLexer } from '../src/lexer';
|
||||
import { parse } from "../src";
|
||||
import {
|
||||
TYPE_ID,
|
||||
VALUE_ID,
|
||||
TYPE_WORD,
|
||||
TYPE_TAG,
|
||||
TYPE_ATTR_NAME,
|
||||
TYPE_ATTR_VALUE,
|
||||
TYPE_SPACE,
|
||||
TYPE_NEW_LINE,
|
||||
LINE_ID,
|
||||
COLUMN_ID,
|
||||
START_POS_ID,
|
||||
END_POS_ID
|
||||
} from '../src/Token';
|
||||
import { createLexer } from '../src';
|
||||
|
||||
const TYPE = {
|
||||
WORD: TYPE_WORD,
|
||||
@@ -139,6 +151,49 @@ describe('lexer', () => {
|
||||
expect(tokens).toBeMatchOutput(output);
|
||||
});
|
||||
|
||||
test('paired tag with url tag with fakeUnique', () => {
|
||||
const input = '[url=https://example.org/ fakeUnique=fakeUnique]T[/url]';
|
||||
const tokens = tokenize(input);
|
||||
|
||||
const output = [
|
||||
[TYPE.TAG, 'url', 0, 0, 0, 48],
|
||||
[TYPE.ATTR_VALUE, 'https://example.org/ fakeUnique=fakeUnique', 5, 0],
|
||||
[TYPE.WORD, 'T', 48, 0],
|
||||
[TYPE.TAG, '/url', 50, 0, 49, 55],
|
||||
];
|
||||
|
||||
expect(tokens).toBeMatchOutput(output);
|
||||
});
|
||||
|
||||
test('single tag with xss', () => {
|
||||
const input = '[url=javascript:alert(\'XSS ME\');]TEXT[/url]';
|
||||
const tokens = tokenize(input);
|
||||
|
||||
const output = [
|
||||
[TYPE.TAG, 'url', 0, 0, 0, 33],
|
||||
[TYPE.ATTR_VALUE, 'javascript:alert(\'XSS ME\');', 5, 0],
|
||||
[TYPE.WORD, 'TEXT', 33, 0],
|
||||
[TYPE.TAG, '/url', 38, 0, 37, 43],
|
||||
];
|
||||
|
||||
expect(tokens).toBeMatchOutput(output);
|
||||
});
|
||||
|
||||
test('single tag with xss and double quotes', () => {
|
||||
const input = '[url=javascript:alert("XSS ME");]TEXT[/url]';
|
||||
const tokens = tokenize(input);
|
||||
|
||||
const output = [
|
||||
[TYPE.TAG, 'url', 0, 0, 0, 33],
|
||||
[TYPE.ATTR_VALUE, 'javascript:alert("XSS ME', 5, 0],
|
||||
[TYPE.ATTR_VALUE, ');', 31, 0],
|
||||
[TYPE.WORD, 'TEXT', 33, 0],
|
||||
[TYPE.TAG, '/url', 38, 0, 37, 43],
|
||||
];
|
||||
|
||||
expect(tokens).toBeMatchOutput(output);
|
||||
});
|
||||
|
||||
test('single fake tag', () => {
|
||||
const input = '[ user=111]';
|
||||
const tokens = tokenize(input);
|
||||
|
||||
@@ -641,6 +641,28 @@ describe('Parser', () => {
|
||||
]);
|
||||
});
|
||||
|
||||
test('parse url tag with fakeUnique', () => {
|
||||
const ast = parse('[url=https://example.org/ fakeUnique=fakeUnique]T[/url]');
|
||||
|
||||
expect(ast).toBeMatchAST([
|
||||
{
|
||||
tag: 'url',
|
||||
attrs: {
|
||||
'https://example.org/ fakeUnique=fakeUnique': 'https://example.org/ fakeUnique=fakeUnique',
|
||||
},
|
||||
content: ['T'],
|
||||
start: {
|
||||
from: 0,
|
||||
to: 48,
|
||||
},
|
||||
end: {
|
||||
from: 49,
|
||||
to: 55,
|
||||
},
|
||||
},
|
||||
]);
|
||||
});
|
||||
|
||||
test('parse triple nested tags', () => {
|
||||
const ast = parse(`this is outside [spoiler title="name with
|
||||
multiline
|
||||
|
||||
Reference in New Issue
Block a user