forked from FoundKeyGang/FoundKey
Change italic syntax to fit with other markdown implementations (#3916)
currently italics require an html <i> syntax, unlike every other formatter. this is very confusing, especially as there doesnt seem to be any documentation. this change makes both _text_ and *text* work, both things that users would expect from other implementations of markdown such as pleroma, discord and github. Add test
This commit is contained in:
parent
11689e6d18
commit
361af34956
2 changed files with 38 additions and 7 deletions
|
@ -224,7 +224,7 @@ const mfm = P.createLanguage({
|
|||
|
||||
//#region Italic
|
||||
italic: r =>
|
||||
P.regexp(/<i>([\s\S]+?)<\/i>/, 1)
|
||||
P.regexp(/(\*|_)([a-zA-Z0-9]+?[\s\S]*?)\1/, 2)
|
||||
.map(x => createTree('italic', P.alt(
|
||||
r.bold,
|
||||
r.strike,
|
||||
|
|
43
test/mfm.ts
43
test/mfm.ts
|
@ -32,8 +32,8 @@ describe('createTree', () => {
|
|||
leaf('left', { a: 2 }),
|
||||
leaf('right', { b: 'hi' })
|
||||
], {
|
||||
c: 4
|
||||
});
|
||||
c: 4
|
||||
});
|
||||
assert.deepStrictEqual(t, {
|
||||
node: {
|
||||
type: 'tree',
|
||||
|
@ -288,7 +288,7 @@ describe('MFM', () => {
|
|||
leaf('mention', { acct: '@a', canonical: '@a', username: 'a', host: null })
|
||||
]);
|
||||
|
||||
const tokens4 = analyze('@\n@v\n@veryverylongusername' /* \n@toolongtobeasamention */ );
|
||||
const tokens4 = analyze('@\n@v\n@veryverylongusername' /* \n@toolongtobeasamention */);
|
||||
assert.deepStrictEqual(tokens4, [
|
||||
text('@\n'),
|
||||
leaf('mention', { acct: '@v', canonical: '@v', username: 'v', host: null }),
|
||||
|
@ -883,15 +883,46 @@ describe('MFM', () => {
|
|||
});
|
||||
|
||||
describe('italic', () => {
|
||||
it('simple', () => {
|
||||
const tokens = analyze('<i>foo</i>');
|
||||
it('underscore', () => {
|
||||
const tokens = analyze('_foo_');
|
||||
assert.deepStrictEqual(tokens, [
|
||||
tree('italic', [
|
||||
text('foo')
|
||||
], {}),
|
||||
]);
|
||||
});
|
||||
});
|
||||
|
||||
it('simple with asterix', () => {
|
||||
const tokens = analyze('*foo*');
|
||||
assert.deepStrictEqual(tokens, [
|
||||
tree('italic', [
|
||||
text('foo')
|
||||
], {}),
|
||||
]);
|
||||
});
|
||||
|
||||
it('exlude emotes', () => {
|
||||
const tokens = analyze('*.*');
|
||||
assert.deepStrictEqual(tokens, [
|
||||
text("*.*"),
|
||||
]);
|
||||
});
|
||||
|
||||
it('mixed', () => {
|
||||
const tokens = analyze('_foo*');
|
||||
assert.deepStrictEqual(tokens, [
|
||||
text('_foo*'),
|
||||
]);
|
||||
});
|
||||
|
||||
it('mixed', () => {
|
||||
const tokens = analyze('*foo_');
|
||||
assert.deepStrictEqual(tokens, [
|
||||
text('*foo_'),
|
||||
]);
|
||||
});
|
||||
},
|
||||
);
|
||||
});
|
||||
|
||||
describe('toHtml', () => {
|
||||
|
|
Loading…
Reference in a new issue