mirror of
https://github.com/kubernetes-sigs/descheduler.git
synced 2026-01-27 22:14:52 +01:00
1302 lines
26 KiB
Go
Vendored
1302 lines
26 KiB
Go
Vendored
package parser
|
|
|
|
import (
|
|
"bytes"
|
|
"regexp"
|
|
"strconv"
|
|
|
|
"github.com/gomarkdown/markdown/ast"
|
|
)
|
|
|
|
// Parsing of inline elements
|
|
|
|
var (
|
|
urlRe = `((https?|ftp):\/\/|\/)[-A-Za-z0-9+&@#\/%?=~_|!:,.;\(\)]+`
|
|
anchorRe = regexp.MustCompile(`^(<a\shref="` + urlRe + `"(\stitle="[^"<>]+")?\s?>` + urlRe + `<\/a>)`)
|
|
|
|
// TODO: improve this regexp to catch all possible entities:
|
|
htmlEntityRe = regexp.MustCompile(`&[a-z]{2,5};`)
|
|
)
|
|
|
|
// Inline parses text within a block.
|
|
// Each function returns the number of consumed chars.
|
|
func (p *Parser) Inline(currBlock ast.Node, data []byte) {
|
|
// handlers might call us recursively: enforce a maximum depth
|
|
if p.nesting >= p.maxNesting || len(data) == 0 {
|
|
return
|
|
}
|
|
p.nesting++
|
|
beg, end := 0, 0
|
|
|
|
n := len(data)
|
|
for end < n {
|
|
handler := p.inlineCallback[data[end]]
|
|
if handler == nil {
|
|
end++
|
|
continue
|
|
}
|
|
consumed, node := handler(p, data, end)
|
|
if consumed == 0 {
|
|
// no action from the callback
|
|
end++
|
|
continue
|
|
}
|
|
// copy inactive chars into the output
|
|
ast.AppendChild(currBlock, newTextNode(data[beg:end]))
|
|
if node != nil {
|
|
ast.AppendChild(currBlock, node)
|
|
}
|
|
beg = end + consumed
|
|
end = beg
|
|
}
|
|
|
|
if beg < n {
|
|
if data[end-1] == '\n' {
|
|
end--
|
|
}
|
|
ast.AppendChild(currBlock, newTextNode(data[beg:end]))
|
|
}
|
|
p.nesting--
|
|
}
|
|
|
|
// single and double emphasis parsing
|
|
func emphasis(p *Parser, data []byte, offset int) (int, ast.Node) {
|
|
data = data[offset:]
|
|
c := data[0]
|
|
|
|
n := len(data)
|
|
if n > 2 && data[1] != c {
|
|
// whitespace cannot follow an opening emphasis;
|
|
// strikethrough only takes two characters '~~'
|
|
if isSpace(data[1]) {
|
|
return 0, nil
|
|
}
|
|
if p.extensions&SuperSubscript != 0 && c == '~' {
|
|
// potential subscript, no spaces, except when escaped, helperEmphasis does
|
|
// not check that for us, so walk the bytes and check.
|
|
ret := skipUntilChar(data[1:], 0, c)
|
|
if ret == 0 {
|
|
return 0, nil
|
|
}
|
|
ret++ // we started with data[1:] above.
|
|
for i := 1; i < ret; i++ {
|
|
if isSpace(data[i]) && !isEscape(data, i) {
|
|
return 0, nil
|
|
}
|
|
}
|
|
sub := &ast.Subscript{}
|
|
sub.Literal = data[1:ret]
|
|
return ret + 1, sub
|
|
}
|
|
ret, node := helperEmphasis(p, data[1:], c)
|
|
if ret == 0 {
|
|
return 0, nil
|
|
}
|
|
|
|
return ret + 1, node
|
|
}
|
|
|
|
if n > 3 && data[1] == c && data[2] != c {
|
|
if isSpace(data[2]) {
|
|
return 0, nil
|
|
}
|
|
ret, node := helperDoubleEmphasis(p, data[2:], c)
|
|
if ret == 0 {
|
|
return 0, nil
|
|
}
|
|
|
|
return ret + 2, node
|
|
}
|
|
|
|
if n > 4 && data[1] == c && data[2] == c && data[3] != c {
|
|
if c == '~' || isSpace(data[3]) {
|
|
return 0, nil
|
|
}
|
|
ret, node := helperTripleEmphasis(p, data, 3, c)
|
|
if ret == 0 {
|
|
return 0, nil
|
|
}
|
|
|
|
return ret + 3, node
|
|
}
|
|
|
|
return 0, nil
|
|
}
|
|
|
|
func codeSpan(p *Parser, data []byte, offset int) (int, ast.Node) {
|
|
data = data[offset:]
|
|
|
|
// count the number of backticks in the delimiter
|
|
nb := skipChar(data, 0, '`')
|
|
|
|
// find the next delimiter
|
|
i, end := 0, 0
|
|
for end = nb; end < len(data) && i < nb; end++ {
|
|
if data[end] == '`' {
|
|
i++
|
|
} else {
|
|
i = 0
|
|
}
|
|
}
|
|
|
|
// no matching delimiter?
|
|
if i < nb && end >= len(data) {
|
|
return 0, nil
|
|
}
|
|
|
|
// trim outside whitespace
|
|
fBegin := nb
|
|
for fBegin < end && data[fBegin] == ' ' {
|
|
fBegin++
|
|
}
|
|
|
|
fEnd := end - nb
|
|
for fEnd > fBegin && data[fEnd-1] == ' ' {
|
|
fEnd--
|
|
}
|
|
|
|
// render the code span
|
|
if fBegin != fEnd {
|
|
code := &ast.Code{}
|
|
code.Literal = data[fBegin:fEnd]
|
|
return end, code
|
|
}
|
|
|
|
return end, nil
|
|
}
|
|
|
|
// newline preceded by two spaces becomes <br>
|
|
func maybeLineBreak(p *Parser, data []byte, offset int) (int, ast.Node) {
|
|
origOffset := offset
|
|
offset = skipChar(data, offset, ' ')
|
|
|
|
if offset < len(data) && data[offset] == '\n' {
|
|
if offset-origOffset >= 2 {
|
|
return offset - origOffset + 1, &ast.Hardbreak{}
|
|
}
|
|
return offset - origOffset, nil
|
|
}
|
|
return 0, nil
|
|
}
|
|
|
|
// newline without two spaces works when HardLineBreak is enabled
|
|
func lineBreak(p *Parser, data []byte, offset int) (int, ast.Node) {
|
|
if p.extensions&HardLineBreak != 0 {
|
|
return 1, &ast.Hardbreak{}
|
|
}
|
|
return 0, nil
|
|
}
|
|
|
|
type linkType int
|
|
|
|
const (
|
|
linkNormal linkType = iota
|
|
linkImg
|
|
linkDeferredFootnote
|
|
linkInlineFootnote
|
|
linkCitation
|
|
)
|
|
|
|
func isReferenceStyleLink(data []byte, pos int, t linkType) bool {
|
|
if t == linkDeferredFootnote {
|
|
return false
|
|
}
|
|
return pos < len(data)-1 && data[pos] == '[' && data[pos+1] != '^'
|
|
}
|
|
|
|
func maybeImage(p *Parser, data []byte, offset int) (int, ast.Node) {
|
|
if offset < len(data)-1 && data[offset+1] == '[' {
|
|
return link(p, data, offset)
|
|
}
|
|
return 0, nil
|
|
}
|
|
|
|
func maybeInlineFootnoteOrSuper(p *Parser, data []byte, offset int) (int, ast.Node) {
|
|
if offset < len(data)-1 && data[offset+1] == '[' {
|
|
return link(p, data, offset)
|
|
}
|
|
|
|
if p.extensions&SuperSubscript != 0 {
|
|
ret := skipUntilChar(data[offset:], 1, '^')
|
|
if ret == 0 {
|
|
return 0, nil
|
|
}
|
|
for i := offset; i < offset+ret; i++ {
|
|
if isSpace(data[i]) && !isEscape(data, i) {
|
|
return 0, nil
|
|
}
|
|
}
|
|
sup := &ast.Superscript{}
|
|
sup.Literal = data[offset+1 : offset+ret]
|
|
return ret + 1, sup
|
|
}
|
|
|
|
return 0, nil
|
|
}
|
|
|
|
// '[': parse a link or an image or a footnote or a citation
|
|
func link(p *Parser, data []byte, offset int) (int, ast.Node) {
|
|
// no links allowed inside regular links, footnote, and deferred footnotes
|
|
if p.insideLink && (offset > 0 && data[offset-1] == '[' || len(data)-1 > offset && data[offset+1] == '^') {
|
|
return 0, nil
|
|
}
|
|
|
|
var t linkType
|
|
switch {
|
|
// special case: ![^text] == deferred footnote (that follows something with
|
|
// an exclamation point)
|
|
case p.extensions&Footnotes != 0 && len(data)-1 > offset && data[offset+1] == '^':
|
|
t = linkDeferredFootnote
|
|
// ![alt] == image
|
|
case offset >= 0 && data[offset] == '!':
|
|
t = linkImg
|
|
offset++
|
|
// [@citation], [@-citation], [@?citation], [@!citation]
|
|
case p.extensions&Mmark != 0 && len(data)-1 > offset && data[offset+1] == '@':
|
|
t = linkCitation
|
|
// [text] == regular link
|
|
// ^[text] == inline footnote
|
|
// [^refId] == deferred footnote
|
|
case p.extensions&Footnotes != 0:
|
|
if offset >= 0 && data[offset] == '^' {
|
|
t = linkInlineFootnote
|
|
offset++
|
|
} else if len(data)-1 > offset && data[offset+1] == '^' {
|
|
t = linkDeferredFootnote
|
|
}
|
|
default:
|
|
t = linkNormal
|
|
}
|
|
|
|
data = data[offset:]
|
|
|
|
if t == linkCitation {
|
|
return citation(p, data, 0)
|
|
}
|
|
|
|
var (
|
|
i = 1
|
|
noteID int
|
|
title, link, linkID, altContent []byte
|
|
textHasNl = false
|
|
)
|
|
|
|
if t == linkDeferredFootnote {
|
|
i++
|
|
}
|
|
|
|
// look for the matching closing bracket
|
|
for level := 1; level > 0 && i < len(data); i++ {
|
|
switch {
|
|
case data[i] == '\n':
|
|
textHasNl = true
|
|
|
|
case data[i-1] == '\\':
|
|
continue
|
|
|
|
case data[i] == '[':
|
|
level++
|
|
|
|
case data[i] == ']':
|
|
level--
|
|
if level <= 0 {
|
|
i-- // compensate for extra i++ in for loop
|
|
}
|
|
}
|
|
}
|
|
|
|
if i >= len(data) {
|
|
return 0, nil
|
|
}
|
|
|
|
txtE := i
|
|
i++
|
|
var footnoteNode ast.Node
|
|
|
|
// skip any amount of whitespace or newline
|
|
// (this is much more lax than original markdown syntax)
|
|
i = skipSpace(data, i)
|
|
|
|
// inline style link
|
|
switch {
|
|
case i < len(data) && data[i] == '(':
|
|
// skip initial whitespace
|
|
i++
|
|
|
|
i = skipSpace(data, i)
|
|
|
|
linkB := i
|
|
brace := 0
|
|
|
|
// look for link end: ' " )
|
|
findlinkend:
|
|
for i < len(data) {
|
|
switch {
|
|
case data[i] == '\\':
|
|
i += 2
|
|
|
|
case data[i] == '(':
|
|
brace++
|
|
i++
|
|
|
|
case data[i] == ')':
|
|
if brace <= 0 {
|
|
break findlinkend
|
|
}
|
|
brace--
|
|
i++
|
|
|
|
case data[i] == '\'' || data[i] == '"':
|
|
break findlinkend
|
|
|
|
default:
|
|
i++
|
|
}
|
|
}
|
|
|
|
if i >= len(data) {
|
|
return 0, nil
|
|
}
|
|
linkE := i
|
|
|
|
// look for title end if present
|
|
titleB, titleE := 0, 0
|
|
if data[i] == '\'' || data[i] == '"' {
|
|
i++
|
|
titleB = i
|
|
titleEndCharFound := false
|
|
|
|
findtitleend:
|
|
for i < len(data) {
|
|
switch {
|
|
case data[i] == '\\':
|
|
i++
|
|
|
|
case data[i] == data[titleB-1]: // matching title delimiter
|
|
titleEndCharFound = true
|
|
|
|
case titleEndCharFound && data[i] == ')':
|
|
break findtitleend
|
|
}
|
|
i++
|
|
}
|
|
|
|
if i >= len(data) {
|
|
return 0, nil
|
|
}
|
|
|
|
// skip whitespace after title
|
|
titleE = i - 1
|
|
for titleE > titleB && isSpace(data[titleE]) {
|
|
titleE--
|
|
}
|
|
|
|
// check for closing quote presence
|
|
if data[titleE] != '\'' && data[titleE] != '"' {
|
|
titleB, titleE = 0, 0
|
|
linkE = i
|
|
}
|
|
}
|
|
|
|
// remove whitespace at the end of the link
|
|
for linkE > linkB && isSpace(data[linkE-1]) {
|
|
linkE--
|
|
}
|
|
|
|
// remove optional angle brackets around the link
|
|
if data[linkB] == '<' {
|
|
linkB++
|
|
}
|
|
if data[linkE-1] == '>' {
|
|
linkE--
|
|
}
|
|
|
|
// build escaped link and title
|
|
if linkE > linkB {
|
|
link = data[linkB:linkE]
|
|
}
|
|
|
|
if titleE > titleB {
|
|
title = data[titleB:titleE]
|
|
}
|
|
|
|
i++
|
|
|
|
// reference style link
|
|
case isReferenceStyleLink(data, i, t):
|
|
var id []byte
|
|
altContentConsidered := false
|
|
|
|
// look for the id
|
|
i++
|
|
linkB := i
|
|
i = skipUntilChar(data, i, ']')
|
|
|
|
if i >= len(data) {
|
|
return 0, nil
|
|
}
|
|
linkE := i
|
|
|
|
// find the reference
|
|
if linkB == linkE {
|
|
if textHasNl {
|
|
var b bytes.Buffer
|
|
|
|
for j := 1; j < txtE; j++ {
|
|
switch {
|
|
case data[j] != '\n':
|
|
b.WriteByte(data[j])
|
|
case data[j-1] != ' ':
|
|
b.WriteByte(' ')
|
|
}
|
|
}
|
|
|
|
id = b.Bytes()
|
|
} else {
|
|
id = data[1:txtE]
|
|
altContentConsidered = true
|
|
}
|
|
} else {
|
|
id = data[linkB:linkE]
|
|
}
|
|
|
|
// find the reference with matching id
|
|
lr, ok := p.getRef(string(id))
|
|
if !ok {
|
|
return 0, nil
|
|
}
|
|
|
|
// keep link and title from reference
|
|
linkID = id
|
|
link = lr.link
|
|
title = lr.title
|
|
if altContentConsidered {
|
|
altContent = lr.text
|
|
}
|
|
i++
|
|
|
|
// shortcut reference style link or reference or inline footnote
|
|
default:
|
|
var id []byte
|
|
|
|
// craft the id
|
|
if textHasNl {
|
|
var b bytes.Buffer
|
|
|
|
for j := 1; j < txtE; j++ {
|
|
switch {
|
|
case data[j] != '\n':
|
|
b.WriteByte(data[j])
|
|
case data[j-1] != ' ':
|
|
b.WriteByte(' ')
|
|
}
|
|
}
|
|
|
|
id = b.Bytes()
|
|
} else {
|
|
if t == linkDeferredFootnote {
|
|
id = data[2:txtE] // get rid of the ^
|
|
} else {
|
|
id = data[1:txtE]
|
|
}
|
|
}
|
|
|
|
footnoteNode = &ast.ListItem{}
|
|
if t == linkInlineFootnote {
|
|
// create a new reference
|
|
noteID = len(p.notes) + 1
|
|
|
|
var fragment []byte
|
|
if len(id) > 0 {
|
|
if len(id) < 16 {
|
|
fragment = make([]byte, len(id))
|
|
} else {
|
|
fragment = make([]byte, 16)
|
|
}
|
|
copy(fragment, slugify(id))
|
|
} else {
|
|
fragment = append([]byte("footnote-"), []byte(strconv.Itoa(noteID))...)
|
|
}
|
|
|
|
ref := &reference{
|
|
noteID: noteID,
|
|
hasBlock: false,
|
|
link: fragment,
|
|
title: id,
|
|
footnote: footnoteNode,
|
|
}
|
|
|
|
p.notes = append(p.notes, ref)
|
|
p.refsRecord[string(ref.link)] = struct{}{}
|
|
|
|
link = ref.link
|
|
title = ref.title
|
|
} else {
|
|
// find the reference with matching id
|
|
lr, ok := p.getRef(string(id))
|
|
if !ok {
|
|
return 0, nil
|
|
}
|
|
|
|
if t == linkDeferredFootnote && !p.isFootnote(lr) {
|
|
lr.noteID = len(p.notes) + 1
|
|
lr.footnote = footnoteNode
|
|
p.notes = append(p.notes, lr)
|
|
p.refsRecord[string(lr.link)] = struct{}{}
|
|
}
|
|
|
|
// keep link and title from reference
|
|
link = lr.link
|
|
// if inline footnote, title == footnote contents
|
|
title = lr.title
|
|
noteID = lr.noteID
|
|
if len(lr.text) > 0 {
|
|
altContent = lr.text
|
|
}
|
|
}
|
|
|
|
// rewind the whitespace
|
|
i = txtE + 1
|
|
}
|
|
|
|
var uLink []byte
|
|
if t == linkNormal || t == linkImg {
|
|
if len(link) > 0 {
|
|
var uLinkBuf bytes.Buffer
|
|
unescapeText(&uLinkBuf, link)
|
|
uLink = uLinkBuf.Bytes()
|
|
}
|
|
|
|
// links need something to click on and somewhere to go
|
|
if len(uLink) == 0 || (t == linkNormal && txtE <= 1) {
|
|
return 0, nil
|
|
}
|
|
}
|
|
|
|
// call the relevant rendering function
|
|
switch t {
|
|
case linkNormal:
|
|
link := &ast.Link{
|
|
Destination: normalizeURI(uLink),
|
|
Title: title,
|
|
DeferredID: linkID,
|
|
}
|
|
if len(altContent) > 0 {
|
|
ast.AppendChild(link, newTextNode(altContent))
|
|
} else {
|
|
// links cannot contain other links, so turn off link parsing
|
|
// temporarily and recurse
|
|
insideLink := p.insideLink
|
|
p.insideLink = true
|
|
p.Inline(link, data[1:txtE])
|
|
p.insideLink = insideLink
|
|
}
|
|
return i, link
|
|
|
|
case linkImg:
|
|
image := &ast.Image{
|
|
Destination: uLink,
|
|
Title: title,
|
|
}
|
|
ast.AppendChild(image, newTextNode(data[1:txtE]))
|
|
return i + 1, image
|
|
|
|
case linkInlineFootnote, linkDeferredFootnote:
|
|
link := &ast.Link{
|
|
Destination: link,
|
|
|