Add a definition list extension, some refactoring

This commit is contained in:
yuin 2019-05-02 22:22:05 +09:00
parent 7acda36819
commit d4d7acb277
26 changed files with 467 additions and 139 deletions

View file

@ -26,7 +26,11 @@ I need a markdown parser for Go that meets following conditions:
[golang-commonmark](https://gitlab.com/golang-commonmark/markdown) may be a good choice, but it seems copy of the [markdown-it](https://github.com/markdown-it) .
[blackfriday.v2](https://github.com/russross/blackfriday/tree/v2) is a fast and widely used implementation, but it is not CommonMark compliant and can not be extended from outside of the package since it's AST is not interfaces but structs.
[blackfriday.v2](https://github.com/russross/blackfriday/tree/v2) is a fast and widely used implementation, but it is not CommonMark compliant and can not be extended from outside of the package since it's AST is not interfaces but structs.
Furthermore, its behavior differs with other implementations in some cases especially of lists. ([Deep nested lists don't output correctly #329](https://github.com/russross/blackfriday/issues/329), [List block cannot have a second line #244](https://github.com/russross/blackfriday/issues/244), etc).
This behavior sometimes causes problems. If you migrate your markdown text to blackfriday based wikis from Github, many lists will immediately be broken.
As mentioned above, CommonMark is too complicated and hard to implement, So Markdown parsers base on CommonMark barely exist.
@ -68,9 +72,9 @@ Parser and Renderer options
| Functional option | Type | Description |
| ----------------- | ---- | ----------- |
| `parser.WithBlockParsers` | List of `util.PrioritizedSlice` whose elements are `parser.BlockParser` | Parsers for parsing block level elements. |
| `parser.WithInlineParsers` | List of `util.PrioritizedSlice` whose elements are `parser.InlineParser` | Parsers for parsing inline level elements. |
| `parser.WithParagraphTransformers` | List of `util.PrioritizedSlice` whose elements are `parser.ParagraphTransformer` | Transformers for transforming paragraph nodes. |
| `parser.WithBlockParsers` | A `util.PrioritizedSlice` whose elements are `parser.BlockParser` | Parsers for parsing block level elements. |
| `parser.WithInlineParsers` | A `util.PrioritizedSlice` whose elements are `parser.InlineParser` | Parsers for parsing inline level elements. |
| `parser.WithParagraphTransformers` | A `util.PrioritizedSlice` whose elements are `parser.ParagraphTransformer` | Transformers for transforming paragraph nodes. |
| `parser.WithHeadingID` | `-` | Enables custom heading ids( `{#custom-id}` ) and auto heading ids. |
| `parser.WithFilterTags` | `...string` | HTML tag names forbidden in HTML blocks and Raw HTMLs. |
@ -92,6 +96,8 @@ Parser and Renderer options
- `extension.GFM`
- This extension enables Table, Strikethrough, Linkify and TaskList.
In addition, this extension sets some tags to `parser.FilterTags` .
- `extension.DefinitionList`
- [PHP Markdown Extra Definition lists](https://michelf.ca/projects/php-markdown/extra/#def-list)
Create extensions
--------------------

View file

@ -140,11 +140,11 @@ type Node interface {
// IsRaw returns true if contents should be rendered as 'raw' contents.
IsRaw() bool
// SetAttribute sets given value to the attributes.
// SetAttribute sets the given value to the attributes.
SetAttribute(name, value []byte)
// Attribute returns a (attribute value, true) if an attribute
// associated with given name is found, otherwise
// associated with the given name is found, otherwise
// (nil, false)
Attribute(name []byte) ([]byte, bool)

View file

@ -127,7 +127,7 @@ func NewParagraph() *Paragraph {
}
}
// IsParagraph returns true if given node implements the Paragraph interface,
// IsParagraph returns true if the given node implements the Paragraph interface,
// otherwise false.
func IsParagraph(node Node) bool {
_, ok := node.(*Paragraph)
@ -305,7 +305,7 @@ func (l *List) IsOrdered() bool {
}
// CanContinue returns true if this list can continue with
// given mark and a list type, otherwise false.
// the given mark and a list type, otherwise false.
func (l *List) CanContinue(marker byte, isOrdered bool) bool {
return marker == l.Marker && isOrdered == l.IsOrdered()
}

View file

@ -108,7 +108,7 @@ func (n *Text) SetHardLineBreak(v bool) {
}
// Merge merges a Node n into this node.
// Merge returns true if given node has been merged, otherwise false.
// Merge returns true if the given node has been merged, otherwise false.
func (n *Text) Merge(node Node, source []byte) bool {
t, ok := node.(*Text)
if !ok {
@ -148,7 +148,7 @@ func NewText() *Text {
}
}
// NewTextSegment returns a new Text node with given source potision.
// NewTextSegment returns a new Text node with the given source potision.
func NewTextSegment(v textm.Segment) *Text {
return &Text{
BaseInline: BaseInline{},
@ -156,7 +156,7 @@ func NewTextSegment(v textm.Segment) *Text {
}
}
// NewRawTextSegment returns a new Text node with given source position.
// NewRawTextSegment returns a new Text node with the given source position.
// The new node should be rendered as raw contents.
func NewRawTextSegment(v textm.Segment) *Text {
t := &Text{
@ -256,7 +256,7 @@ func (n *Emphasis) Kind() NodeKind {
return KindEmphasis
}
// NewEmphasis returns a new Emphasis node with given level.
// NewEmphasis returns a new Emphasis node with the given level.
func NewEmphasis(level int) *Emphasis {
return &Emphasis{
BaseInline: BaseInline{},

View file

@ -0,0 +1,83 @@
package ast
import (
gast "github.com/yuin/goldmark/ast"
)
// A DefinitionList struct represents a definition list of Markdown
// (PHPMarkdownExtra) text.
type DefinitionList struct {
gast.BaseBlock
Offset int
TemporaryParagraph *gast.Paragraph
}
// Dump implements Node.Dump.
func (n *DefinitionList) Dump(source []byte, level int) {
gast.DumpHelper(n, source, level, nil, nil)
}
// KindDefinitionList is a NodeKind of the DefinitionList node.
var KindDefinitionList = gast.NewNodeKind("DefinitionList")
// Kind implements Node.Kind.
func (n *DefinitionList) Kind() gast.NodeKind {
return KindDefinitionList
}
// NewDefinitionList returns a new DefinitionList node.
func NewDefinitionList(offset int, para *gast.Paragraph) *DefinitionList {
return &DefinitionList{
Offset: offset,
TemporaryParagraph: para,
}
}
// A DefinitionTerm struct represents a definition list term of Markdown
// (PHPMarkdownExtra) text.
type DefinitionTerm struct {
gast.BaseBlock
}
// Dump implements Node.Dump.
func (n *DefinitionTerm) Dump(source []byte, level int) {
gast.DumpHelper(n, source, level, nil, nil)
}
// KindDefinitionTerm is a NodeKind of the DefinitionTerm node.
var KindDefinitionTerm = gast.NewNodeKind("DefinitionTerm")
// Kind implements Node.Kind.
func (n *DefinitionTerm) Kind() gast.NodeKind {
return KindDefinitionTerm
}
// NewDefinitionTerm returns a new DefinitionTerm node.
func NewDefinitionTerm() *DefinitionTerm {
return &DefinitionTerm{}
}
// A DefinitionDescription struct represents a definition list description of Markdown
// (PHPMarkdownExtra) text.
type DefinitionDescription struct {
gast.BaseBlock
IsTight bool
}
// Dump implements Node.Dump.
func (n *DefinitionDescription) Dump(source []byte, level int) {
gast.DumpHelper(n, source, level, nil, nil)
}
// KindDefinitionDescription is a NodeKind of the DefinitionDescription node.
var KindDefinitionDescription = gast.NewNodeKind("DefinitionDescription")
// Kind implements Node.Kind.
func (n *DefinitionDescription) Kind() gast.NodeKind {
return KindDefinitionDescription
}
// NewDefinitionDescription returns a new DefinitionDescription node.
func NewDefinitionDescription() *DefinitionDescription {
return &DefinitionDescription{}
}

View file

@ -0,0 +1,230 @@
package extension
import (
"github.com/yuin/goldmark"
gast "github.com/yuin/goldmark/ast"
"github.com/yuin/goldmark/extension/ast"
"github.com/yuin/goldmark/parser"
"github.com/yuin/goldmark/renderer"
"github.com/yuin/goldmark/renderer/html"
"github.com/yuin/goldmark/text"
"github.com/yuin/goldmark/util"
)
type definitionListParser struct {
}
var defaultDefinitionListParser = &definitionListParser{}
// NewDefinitionListParser return a new parser.BlockParser that
// can parse PHP Markdown Extra Definition lists.
func NewDefinitionListParser() parser.BlockParser {
return defaultDefinitionListParser
}
func (b *definitionListParser) Open(parent gast.Node, reader text.Reader, pc parser.Context) (gast.Node, parser.State) {
if _, ok := parent.(*ast.DefinitionList); ok {
return nil, parser.NoChildren
}
line, _ := reader.PeekLine()
pos := pc.BlockOffset()
if line[pos] != ':' {
return nil, parser.NoChildren
}
last := parent.LastChild()
// need 1 or more spaces after ':'
w, _ := util.IndentWidth(line[pos+1:], pos+1)
if w < 1 {
return nil, parser.NoChildren
}
if w >= 8 { // starts with indented code
w = 5
}
w += pos + 1 /* 1 = ':' */
para, lastIsParagraph := last.(*gast.Paragraph)
var list *ast.DefinitionList
var ok bool
if lastIsParagraph {
list, ok = last.PreviousSibling().(*ast.DefinitionList)
if ok { // is not first item
list.Offset = w
list.TemporaryParagraph = para
} else { // is first item
list = ast.NewDefinitionList(w, para)
}
} else if list, ok = last.(*ast.DefinitionList); ok { // multiple description
list.Offset = w
list.TemporaryParagraph = nil
} else {
return nil, parser.NoChildren
}
return list, parser.HasChildren
}
func (b *definitionListParser) Continue(node gast.Node, reader text.Reader, pc parser.Context) parser.State {
line, _ := reader.PeekLine()
if util.IsBlank(line) {
return parser.Continue | parser.HasChildren
}
list, _ := node.(*ast.DefinitionList)
w, _ := util.IndentWidth(line, reader.LineOffset())
if w < list.Offset {
return parser.Close
}
pos, padding := util.IndentPosition(line, reader.LineOffset(), list.Offset)
reader.AdvanceAndSetPadding(pos, padding)
return parser.Continue | parser.HasChildren
}
func (b *definitionListParser) Close(node gast.Node, reader text.Reader, pc parser.Context) {
// nothing to do
}
func (b *definitionListParser) CanInterruptParagraph() bool {
return true
}
func (b *definitionListParser) CanAcceptIndentedLine() bool {
return false
}
type definitionDescriptionParser struct {
}
var defaultDefinitionDescriptionParser = &definitionDescriptionParser{}
// NewDefinitionDescriptionParser return a new parser.BlockParser that
// can parse definition description starts with ':'.
func NewDefinitionDescriptionParser() parser.BlockParser {
return defaultDefinitionDescriptionParser
}
func (b *definitionDescriptionParser) Open(parent gast.Node, reader text.Reader, pc parser.Context) (gast.Node, parser.State) {
line, _ := reader.PeekLine()
pos := pc.BlockOffset()
if line[pos] != ':' {
return nil, parser.NoChildren
}
list, _ := parent.(*ast.DefinitionList)
para := list.TemporaryParagraph
list.TemporaryParagraph = nil
if para != nil {
lines := para.Lines()
l := lines.Len()
for i := 0; i < l; i++ {
term := ast.NewDefinitionTerm()
segment := lines.At(i)
term.Lines().Append(segment.TrimRightSpace(reader.Source()))
list.AppendChild(list, term)
}
para.Parent().RemoveChild(para.Parent(), para)
}
cpos, padding := util.IndentPosition(line[pos+1:], pos+1, list.Offset-pos-1)
reader.AdvanceAndSetPadding(cpos, padding)
return ast.NewDefinitionDescription(), parser.HasChildren
}
func (b *definitionDescriptionParser) Continue(node gast.Node, reader text.Reader, pc parser.Context) parser.State {
// definitionListParser detects end of the description.
// so this method will never be called.
return parser.Continue | parser.HasChildren
}
func (b *definitionDescriptionParser) Close(node gast.Node, reader text.Reader, pc parser.Context) {
desc := node.(*ast.DefinitionDescription)
desc.IsTight = !desc.HasBlankPreviousLines()
if desc.IsTight {
for gc := desc.FirstChild(); gc != nil; gc = gc.NextSibling() {
paragraph, ok := gc.(*gast.Paragraph)
if ok {
textBlock := gast.NewTextBlock()
textBlock.SetLines(paragraph.Lines())
desc.ReplaceChild(desc, paragraph, textBlock)
}
}
}
}
func (b *definitionDescriptionParser) CanInterruptParagraph() bool {
return true
}
func (b *definitionDescriptionParser) CanAcceptIndentedLine() bool {
return false
}
// DefinitionListHTMLRenderer is a renderer.NodeRenderer implementation that
// renders DefinitionList nodes.
type DefinitionListHTMLRenderer struct {
html.Config
}
// NewDefinitionListHTMLRenderer returns a new DefinitionListHTMLRenderer.
func NewDefinitionListHTMLRenderer(opts ...html.Option) renderer.NodeRenderer {
r := &DefinitionListHTMLRenderer{
Config: html.NewConfig(),
}
for _, opt := range opts {
opt.SetHTMLOption(&r.Config)
}
return r
}
// RegisterFuncs implements renderer.NodeRenderer.RegisterFuncs.
func (r *DefinitionListHTMLRenderer) RegisterFuncs(reg renderer.NodeRendererFuncRegisterer) {
reg.Register(ast.KindDefinitionList, r.renderDefinitionList)
reg.Register(ast.KindDefinitionTerm, r.renderDefinitionTerm)
reg.Register(ast.KindDefinitionDescription, r.renderDefinitionDescription)
}
func (r *DefinitionListHTMLRenderer) renderDefinitionList(w util.BufWriter, source []byte, n gast.Node, entering bool) (gast.WalkStatus, error) {
if entering {
w.WriteString("<dl>\n")
} else {
w.WriteString("</dl>\n")
}
return gast.WalkContinue, nil
}
func (r *DefinitionListHTMLRenderer) renderDefinitionTerm(w util.BufWriter, source []byte, n gast.Node, entering bool) (gast.WalkStatus, error) {
if entering {
w.WriteString("<dt>")
} else {
w.WriteString("</dt>\n")
}
return gast.WalkContinue, nil
}
func (r *DefinitionListHTMLRenderer) renderDefinitionDescription(w util.BufWriter, source []byte, node gast.Node, entering bool) (gast.WalkStatus, error) {
if entering {
n := node.(*ast.DefinitionDescription)
if n.IsTight {
w.WriteString("<dd>")
} else {
w.WriteString("<dd>\n")
}
} else {
w.WriteString("</dd>\n")
}
return gast.WalkContinue, nil
}
type definitionList struct {
}
// DefinitionList is an extension that allow you to use PHP Markdown Extra Definition lists.
var DefinitionList = &definitionList{}
func (e *definitionList) Extend(m goldmark.Markdown) {
m.Parser().AddOption(parser.WithBlockParsers(
util.Prioritized(NewDefinitionListParser(), 101),
util.Prioritized(NewDefinitionDescriptionParser(), 102),
))
m.Renderer().AddOption(renderer.WithNodeRenderers(
util.Prioritized(NewDefinitionListHTMLRenderer(), 500),
))
}

View file

@ -32,16 +32,16 @@ func NewTableParagraphTransformer() parser.ParagraphTransformer {
return defaultTableParagraphTransformer
}
func (b *tableParagraphTransformer) Transform(node *gast.Paragraph, pc parser.Context) {
func (b *tableParagraphTransformer) Transform(node *gast.Paragraph, reader text.Reader, pc parser.Context) {
lines := node.Lines()
if lines.Len() < 2 {
return
}
alignments := b.parseDelimiter(lines.At(1), pc)
alignments := b.parseDelimiter(lines.At(1), reader)
if alignments == nil {
return
}
header := b.parseRow(lines.At(0), alignments, pc)
header := b.parseRow(lines.At(0), alignments, reader)
if header == nil || len(alignments) != header.ChildCount() {
return
}
@ -50,7 +50,7 @@ func (b *tableParagraphTransformer) Transform(node *gast.Paragraph, pc parser.Co
table.AppendChild(table, ast.NewTableHeader(header))
if lines.Len() > 2 {
for i := 2; i < lines.Len(); i++ {
table.AppendChild(table, b.parseRow(lines.At(i), alignments, pc))
table.AppendChild(table, b.parseRow(lines.At(i), alignments, reader))
}
}
node.Parent().InsertBefore(node.Parent(), node, table)
@ -58,8 +58,9 @@ func (b *tableParagraphTransformer) Transform(node *gast.Paragraph, pc parser.Co
return
}
func (b *tableParagraphTransformer) parseRow(segment text.Segment, alignments []ast.Alignment, pc parser.Context) *ast.TableRow {
line := segment.Value(pc.Source())
func (b *tableParagraphTransformer) parseRow(segment text.Segment, alignments []ast.Alignment, reader text.Reader) *ast.TableRow {
source := reader.Source()
line := segment.Value(source)
pos := 0
pos += util.TrimLeftSpaceLength(line)
limit := len(line)
@ -78,8 +79,8 @@ func (b *tableParagraphTransformer) parseRow(segment text.Segment, alignments []
}
node := ast.NewTableCell()
segment := text.NewSegment(segment.Start+pos, segment.Start+pos+closure)
segment = segment.TrimLeftSpace(pc.Source())
segment = segment.TrimRightSpace(pc.Source())
segment = segment.TrimLeftSpace(source)
segment = segment.TrimRightSpace(source)
node.Lines().Append(segment)
node.Alignment = alignments[i]
row.AppendChild(row, node)
@ -88,8 +89,8 @@ func (b *tableParagraphTransformer) parseRow(segment text.Segment, alignments []
return row
}
func (b *tableParagraphTransformer) parseDelimiter(segment text.Segment, pc parser.Context) []ast.Alignment {
line := segment.Value(pc.Source())
func (b *tableParagraphTransformer) parseDelimiter(segment text.Segment, reader text.Reader) []ast.Alignment {
line := segment.Value(reader.Source())
if !tableDelimRegexp.Match(line) {
return nil
}

View file

@ -103,11 +103,11 @@ func (b *atxHeadingParser) Continue(node ast.Node, reader text.Reader, pc Contex
return Close
}
func (b *atxHeadingParser) Close(node ast.Node, pc Context) {
func (b *atxHeadingParser) Close(node ast.Node, reader text.Reader, pc Context) {
if !b.HeadingID {
return
}
parseOrGenerateHeadingID(node.(*ast.Heading), pc)
parseOrGenerateHeadingID(node.(*ast.Heading), reader, pc)
}
func (b *atxHeadingParser) CanInterruptParagraph() bool {
@ -122,7 +122,7 @@ var headingIDRegexp = regexp.MustCompile(`^(.*[^\\])({#([^}]+)}\s*)\n?$`)
var headingIDMap = NewContextKey()
var attrNameID = []byte("id")
func parseOrGenerateHeadingID(node *ast.Heading, pc Context) {
func parseOrGenerateHeadingID(node *ast.Heading, reader text.Reader, pc Context) {
existsv := pc.Get(headingIDMap)
var exists map[string]bool
if existsv == nil {
@ -133,7 +133,7 @@ func parseOrGenerateHeadingID(node *ast.Heading, pc Context) {
}
lastIndex := node.Lines().Len() - 1
lastLine := node.Lines().At(lastIndex)
line := lastLine.Value(pc.Source())
line := lastLine.Value(reader.Source())
m := headingIDRegexp.FindSubmatchIndex(line)
var headingID []byte
if m != nil {

View file

@ -52,7 +52,7 @@ func (b *blockquoteParser) Continue(node ast.Node, reader text.Reader, pc Contex
return Close
}
func (b *blockquoteParser) Close(node ast.Node, pc Context) {
func (b *blockquoteParser) Close(node ast.Node, reader text.Reader, pc Context) {
// nothing to do
}

View file

@ -50,11 +50,11 @@ func (b *codeBlockParser) Continue(node ast.Node, reader text.Reader, pc Context
return Continue | NoChildren
}
func (b *codeBlockParser) Close(node ast.Node, pc Context) {
func (b *codeBlockParser) Close(node ast.Node, reader text.Reader, pc Context) {
// trim trailing blank lines
lines := node.Lines()
length := lines.Len() - 1
source := pc.Source()
source := reader.Source()
for {
line := lines.At(length)
if util.IsBlank(line.Value(source)) {

View file

@ -58,15 +58,15 @@ func (s *codeSpanParser) Parse(parent ast.Node, block text.Reader, pc Context) a
block.AdvanceLine()
}
end:
if !node.IsBlank(pc.Source()) {
if !node.IsBlank(block.Source()) {
// trim first halfspace and last halfspace
segment := node.FirstChild().(*ast.Text).Segment
shouldTrimmed := true
if !(!segment.IsEmpty() && pc.Source()[segment.Start] == ' ') {
if !(!segment.IsEmpty() && block.Source()[segment.Start] == ' ') {
shouldTrimmed = false
}
segment = node.LastChild().(*ast.Text).Segment
if !(!segment.IsEmpty() && pc.Source()[segment.Stop-1] == ' ') {
if !(!segment.IsEmpty() && block.Source()[segment.Stop-1] == ' ') {
shouldTrimmed = false
}
if shouldTrimmed {

View file

@ -83,7 +83,7 @@ func (b *fencedCodeBlockParser) Continue(node ast.Node, reader text.Reader, pc C
return Continue | NoChildren
}
func (b *fencedCodeBlockParser) Close(node ast.Node, pc Context) {
func (b *fencedCodeBlockParser) Close(node ast.Node, reader text.Reader, pc Context) {
pc.Set(fencedCodeBlockInfoKey, nil)
}

View file

@ -265,7 +265,7 @@ func (b *htmlBlockParser) Continue(node ast.Node, reader text.Reader, pc Context
return Continue | NoChildren
}
func (b *htmlBlockParser) Close(node ast.Node, pc Context) {
func (b *htmlBlockParser) Close(node ast.Node, reader text.Reader, pc Context) {
// nothing to do
}

View file

@ -358,7 +358,7 @@ func parseLinkTitle(block text.Reader) ([]byte, bool) {
return line[1 : pos-1], true
}
func (s *linkParser) CloseBlock(parent ast.Node, pc Context) {
func (s *linkParser) CloseBlock(parent ast.Node, block text.Reader, pc Context) {
tlist := pc.Get(linkLabelStateKey)
if tlist == nil {
return

View file

@ -13,9 +13,9 @@ type linkReferenceParagraphTransformer struct {
// that parses and extracts link reference from paragraphs.
var LinkReferenceParagraphTransformer = &linkReferenceParagraphTransformer{}
func (p *linkReferenceParagraphTransformer) Transform(node *ast.Paragraph, pc Context) {
func (p *linkReferenceParagraphTransformer) Transform(node *ast.Paragraph, reader text.Reader, pc Context) {
lines := node.Lines()
block := text.NewBlockReader(pc.Source(), lines)
block := text.NewBlockReader(reader.Source(), lines)
removes := [][2]int{}
for {
start, end := parseLinkReferenceDefinition(block, pc)

View file

@ -199,7 +199,7 @@ func (b *listParser) Continue(node ast.Node, reader text.Reader, pc Context) Sta
return Continue | HasChildren
}
func (b *listParser) Close(node ast.Node, pc Context) {
func (b *listParser) Close(node ast.Node, reader text.Reader, pc Context) {
list := node.(*ast.List)
for c := node.FirstChild(); c != nil && list.IsTight; c = c.NextSibling() {

View file

@ -68,7 +68,7 @@ func (b *listItemParser) Continue(node ast.Node, reader text.Reader, pc Context)
return Continue | HasChildren
}
func (b *listItemParser) Close(node ast.Node, pc Context) {
func (b *listItemParser) Close(node ast.Node, reader text.Reader, pc Context) {
// nothing to do
}

View file

@ -39,13 +39,13 @@ func (b *paragraphParser) Continue(node ast.Node, reader text.Reader, pc Context
return Continue | NoChildren
}
func (b *paragraphParser) Close(node ast.Node, pc Context) {
func (b *paragraphParser) Close(node ast.Node, reader text.Reader, pc Context) {
lines := node.Lines()
if lines.Len() != 0 {
// trim trailing spaces
length := lines.Len()
lastLine := node.Lines().At(length - 1)
node.Lines().Set(length-1, lastLine.TrimRightSpace(pc.Source()))
node.Lines().Set(length-1, lastLine.TrimRightSpace(reader.Source()))
}
if lines.Len() == 0 {
node.Parent().RemoveChild(node.Parent(), node)

View file

@ -71,20 +71,17 @@ type Context interface {
// String implements Stringer.
String() string
// Source returns a source of Markdown text.
Source() []byte
// Get returns a value associated with given key.
// Get returns a value associated with the given key.
Get(ContextKey) interface{}
// Set sets given value to the context.
// Set sets the given value to the context.
Set(ContextKey, interface{})
// AddReference adds given reference to this context.
// AddReference adds the given reference to this context.
AddReference(Reference)
// Reference returns (a reference, true) if a reference associated with
// given label exists, otherwise (nil, false).
// the given label exists, otherwise (nil, false).
Reference(label string) (Reference, bool)
// References returns a list of references.
@ -104,11 +101,11 @@ type Context interface {
// LastDelimiter returns a last delimiter of the current delimiter list.
LastDelimiter() *Delimiter
// PushDelimiter appends given delimiter to the tail of the current
// PushDelimiter appends the given delimiter to the tail of the current
// delimiter list.
PushDelimiter(delimiter *Delimiter)
// RemoveDelimiter removes given delimiter from the current delimiter list.
// RemoveDelimiter removes the given delimiter from the current delimiter list.
RemoveDelimiter(d *Delimiter)
// ClearDelimiters clears the current delimiter list.
@ -126,7 +123,6 @@ type Context interface {
type parseContext struct {
store []interface{}
source []byte
refs map[string]Reference
blockOffset int
delimiters *Delimiter
@ -135,10 +131,9 @@ type parseContext struct {
}
// NewContext returns a new Context.
func NewContext(source []byte) Context {
func NewContext() Context {
return &parseContext{
store: make([]interface{}, ContextKeyMax+1),
source: source,
refs: map[string]Reference{},
blockOffset: 0,
delimiters: nil,
@ -163,10 +158,6 @@ func (p *parseContext) SetBlockOffset(v int) {
p.blockOffset = v
}
func (p *parseContext) Source() []byte {
return p.source
}
func (p *parseContext) LastDelimiter() *Delimiter {
return p.lastDelimiter
}
@ -322,16 +313,16 @@ type OptionName string
// A Parser interface parses Markdown text into AST nodes.
type Parser interface {
// Parse parses given Markdown text into AST nodes.
// Parse parses the given Markdown text into AST nodes.
Parse(reader text.Reader, opts ...ParseOption) ast.Node
// AddOption adds given option to thie parser.
// AddOption adds the given option to thie parser.
AddOption(Option)
}
// A SetOptioner interface sets given option to the object.
// A SetOptioner interface sets the given option to the object.
type SetOptioner interface {
// SetOption sets given option to the object.
// SetOption sets the given option to the object.
// Unacceptable options may be passed.
// Thus implementations must ignore unacceptable options.
SetOption(name OptionName, value interface{})
@ -364,14 +355,14 @@ type BlockParser interface {
Continue(node ast.Node, reader text.Reader, pc Context) State
// Close will be called when the parser returns Close.
Close(node ast.Node, pc Context)
Close(node ast.Node, reader text.Reader, pc Context)
// CanInterruptParagraph returns true if the parser can interrupt pargraphs,
// otherwise false.
CanInterruptParagraph() bool
// CanAcceptIndentedLine returns true if the parser can open new node when
// given line is being indented more than 3 spaces.
// the given line is being indented more than 3 spaces.
CanAcceptIndentedLine() bool
}
@ -384,7 +375,7 @@ type InlineParser interface {
// a head of line
Trigger() []byte
// Parse parse given block into an inline node.
// Parse parse the given block into an inline node.
//
// Parse can parse beyond the current line.
// If Parse has been able to parse the current line, it must advance a reader
@ -396,20 +387,20 @@ type InlineParser interface {
// called when block is closed in the inline parsing.
type CloseBlocker interface {
// CloseBlock will be called when a block is closed.
CloseBlock(parent ast.Node, pc Context)
CloseBlock(parent ast.Node, block text.Reader, pc Context)
}
// A ParagraphTransformer transforms parsed Paragraph nodes.
// For example, link references are searched in parsed Paragraphs.
type ParagraphTransformer interface {
// Transform transforms given paragraph.
Transform(node *ast.Paragraph, pc Context)
// Transform transforms the given paragraph.
Transform(node *ast.Paragraph, reader text.Reader, pc Context)
}
// ASTTransformer transforms entire Markdown document AST tree.
type ASTTransformer interface {
// Transform transforms given AST tree.
Transform(node *ast.Document, pc Context)
// Transform transforms the given AST tree.
Transform(node *ast.Document, reader text.Reader, pc Context)
}
// DefaultBlockParsers returns a new list of default BlockParsers.
@ -683,7 +674,7 @@ func (p *parser) Parse(reader text.Reader, opts ...ParseOption) ast.Node {
opt(c)
}
if c.Context == nil {
c.Context = NewContext(reader.Source())
c.Context = NewContext()
}
pc := c.Context
root := ast.NewDocument()
@ -693,29 +684,29 @@ func (p *parser) Parse(reader text.Reader, opts ...ParseOption) ast.Node {
p.parseBlock(blockReader, node, pc)
})
for _, at := range p.astTransformers {
at.Transform(root, pc)
at.Transform(root, reader, pc)
}
//root.Dump(reader.Source(), 0)
return root
}
func (p *parser) transformParagraph(node *ast.Paragraph, pc Context) {
func (p *parser) transformParagraph(node *ast.Paragraph, reader text.Reader, pc Context) {
for _, pt := range p.paragraphTransformers {
pt.Transform(node, pc)
pt.Transform(node, reader, pc)
if node.Parent() == nil {
break
}
}
}
func (p *parser) closeBlocks(from, to int, pc Context) {
func (p *parser) closeBlocks(from, to int, reader text.Reader, pc Context) {
blocks := pc.OpenedBlocks()
for i := from; i >= to; i-- {
node := blocks[i].Node
blocks[i].Parser.Close(blocks[i].Node, pc)
blocks[i].Parser.Close(blocks[i].Node, reader, pc)
paragraph, ok := node.(*ast.Paragraph)
if ok && node.Parent() != nil {
p.transformParagraph(paragraph, pc)
p.transformParagraph(paragraph, reader, pc)
}
}
if from == len(blocks)-1 {
@ -774,7 +765,7 @@ retry:
node.SetBlankPreviousLines(blankLine)
if last != nil && last.Parent() == nil {
lastPos := len(pc.OpenedBlocks()) - 1
p.closeBlocks(lastPos, lastPos, pc)
p.closeBlocks(lastPos, lastPos, reader, pc)
}
parent.AppendChild(parent, node)
result = newBlocksOpened
@ -806,15 +797,18 @@ func isBlankLine(lineNum, level int, stats []lineStat) ([]lineStat, bool) {
ret := false
for i := len(stats) - 1 - level; i >= 0; i-- {
s := stats[i]
if s.lineNum == lineNum && s.level == level {
ret = s.isBlank
continue
if s.lineNum == lineNum {
if s.level < level && s.isBlank {
return stats[i:], true
} else if s.level == level {
return stats[i:], s.isBlank
}
}
if s.lineNum < lineNum {
return stats[i:], ret
}
}
return stats[0:0], ret
return stats, ret
}
func (p *parser) parseBlocks(parent ast.Node, reader text.Reader, pc Context) {
@ -826,14 +820,17 @@ func (p *parser) parseBlocks(parent ast.Node, reader text.Reader, pc Context) {
if !ok {
return
}
// first, we try to open blocks
if p.openBlocks(parent, lines != 0, reader, pc) != newBlocksOpened {
return
}
lineNum, _ := reader.Position()
l := len(pc.OpenedBlocks())
for i := 0; i < l; i++ {
blankLines = append(blankLines, lineStat{lineNum - 1, i, lines != 0})
if lines != 0 {
l := len(pc.OpenedBlocks())
for i := 0; i < l; i++ {
blankLines = append(blankLines, lineStat{lineNum - 1, i, lines != 0})
}
}
blankLines, isBlank = isBlankLine(lineNum-1, 0, blankLines)
// first, we try to open blocks
if p.openBlocks(parent, isBlank, reader, pc) != newBlocksOpened {
return
}
reader.AdvanceLine()
for { // process opened blocks line by line
@ -847,7 +844,7 @@ func (p *parser) parseBlocks(parent ast.Node, reader text.Reader, pc Context) {
be := openedBlocks[i]
line, _ := reader.PeekLine()
if line == nil {
p.closeBlocks(lastIndex, 0, pc)
p.closeBlocks(lastIndex, 0, reader, pc)
reader.AdvanceLine()
return
}
@ -876,7 +873,7 @@ func (p *parser) parseBlocks(parent ast.Node, reader text.Reader, pc Context) {
}
result := p.openBlocks(thisParent, isBlank, reader, pc)
if result != paragraphContinuation {
p.closeBlocks(lastIndex, i, pc)
p.closeBlocks(lastIndex, i, reader, pc)
}
break
}
@ -988,7 +985,7 @@ func (p *parser) parseBlock(block text.BlockReader, parent ast.Node, pc Context)
ProcessDelimiters(nil, pc)
for _, ip := range p.closeBlockers {
ip.CloseBlock(parent, pc)
ip.CloseBlock(parent, block, pc)
}
}

View file

@ -71,7 +71,7 @@ func (b *setextHeadingParser) Continue(node ast.Node, reader text.Reader, pc Con
return Close
}
func (b *setextHeadingParser) Close(node ast.Node, pc Context) {
func (b *setextHeadingParser) Close(node ast.Node, reader text.Reader, pc Context) {
heading := node.(*ast.Heading)
segment := node.Lines().At(0)
heading.Lines().Clear()
@ -79,7 +79,7 @@ func (b *setextHeadingParser) Close(node ast.Node, pc Context) {
pc.Set(temporaryParagraphKey, nil)
if tmp.Lines().Len() == 0 {
next := heading.NextSibling()
segment = segment.TrimLeftSpace(pc.Source())
segment = segment.TrimLeftSpace(reader.Source())
if next == nil || !ast.IsParagraph(next) {
para := ast.NewParagraph()
para.Lines().Append(segment)
@ -97,7 +97,7 @@ func (b *setextHeadingParser) Close(node ast.Node, pc Context) {
if !b.HeadingID {
return
}
parseOrGenerateHeadingID(heading, pc)
parseOrGenerateHeadingID(heading, reader, pc)
}
func (b *setextHeadingParser) CanInterruptParagraph() bool {

View file

@ -58,7 +58,7 @@ func (b *themanticBreakParser) Continue(node ast.Node, reader text.Reader, pc Co
return Close
}
func (b *themanticBreakParser) Close(node ast.Node, pc Context) {
func (b *themanticBreakParser) Close(node ast.Node, reader text.Reader, pc Context) {
// nothing to do
}

View file

@ -62,7 +62,7 @@ func (o *withWriter) SetHTMLOption(c *Config) {
c.Writer = o.value
}
// WithWriter is a functional option that allow you to set given writer to
// WithWriter is a functional option that allow you to set the given writer to
// the renderer.
func WithWriter(writer Writer) interface {
renderer.Option
@ -493,11 +493,11 @@ func (r *Renderer) renderText(w util.BufWriter, source []byte, node ast.Node, en
// A Writer interface wirtes textual contents to a writer.
type Writer interface {
// Write writes given source to writer with resolving references and unescaping
// Write writes the given source to writer with resolving references and unescaping
// backslash escaped characters.
Write(writer util.BufWriter, source []byte)
// RawWrite wirtes given source to writer without resolving references and
// RawWrite wirtes the given source to writer without resolving references and
// unescaping backslash escaped characters.
RawWrite(writer util.BufWriter, source []byte)
}
@ -617,7 +617,7 @@ var bVb = []byte("vbscript:")
var bFile = []byte("file:")
var bData = []byte("data:")
// IsDangerousURL returns true if given url seems a potentially dangerous url,
// IsDangerousURL returns true if the given url seems a potentially dangerous url,
// otherwise false.
func IsDangerousURL(url []byte) bool {
if bytes.HasPrefix(url, bDataImage) && len(url) >= 11 {

View file

@ -1,4 +1,4 @@
// Package renderer renders given AST to certain formats.
// Package renderer renders the given AST to certain formats.
package renderer
import (
@ -130,7 +130,7 @@ func (r *renderer) Register(kind ast.NodeKind, v NodeRendererFunc) {
}
}
// Render renders given AST node to given writer with given Renderer.
// Render renders the given AST node to the given writer with the given Renderer.
func (r *renderer) Render(w io.Writer, source []byte, n ast.Node) error {
r.initSync.Do(func() {
r.options = r.config.Options

View file

@ -19,6 +19,9 @@ type Reader interface {
// Source returns a source of the reader.
Source() []byte
// ResetPosition resets positions.
ResetPosition()
// Peek returns a byte at current position without advancing the internal pointer.
Peek() byte
@ -28,7 +31,7 @@ type Reader interface {
// PrecendingCharacter returns a character just before current internal pointer.
PrecendingCharacter() rune
// Value returns a value of given segment.
// Value returns a value of the given segment.
Value(Segment) []byte
// LineOffset returns a distance from the line head to current position.
@ -82,13 +85,17 @@ func NewReader(source []byte) Reader {
r := &reader{
source: source,
sourceLength: len(source),
line: -1,
head: 0,
}
r.AdvanceLine()
r.ResetPosition()
return r
}
func (r *reader) ResetPosition() {
r.line = -1
r.head = 0
r.AdvanceLine()
}
func (r *reader) Source() []byte {
return r.source
}
@ -226,6 +233,7 @@ func (r *reader) FindSubMatch(reg *regexp.Regexp) [][]byte {
// A BlockReader interface is a reader that is optimized for Blocks.
type BlockReader interface {
Reader
// Reset resets current state and sets new segments to the reader.
Reset(segment *Segments)
}
@ -250,10 +258,7 @@ func NewBlockReader(source []byte, segments *Segments) BlockReader {
return r
}
// Reset resets current state and sets new segments to the reader.
func (r *blockReader) Reset(segments *Segments) {
r.segments = segments
r.segmentsLength = segments.Len()
func (r *blockReader) ResetPosition() {
r.line = -1
r.head = 0
r.last = 0
@ -267,6 +272,12 @@ func (r *blockReader) Reset(segments *Segments) {
r.AdvanceLine()
}
func (r *blockReader) Reset(segments *Segments) {
r.segments = segments
r.segmentsLength = segments.Len()
r.ResetPosition()
}
func (r *blockReader) Source() []byte {
return r.source
}

View file

@ -29,7 +29,7 @@ func NewSegment(start, stop int) Segment {
}
}
// NewSegmentPadding returns a new Segment with given padding.
// NewSegmentPadding returns a new Segment with the given padding.
func NewSegmentPadding(start, stop, n int) Segment {
return Segment{
Start: start,
@ -53,7 +53,7 @@ func (t *Segment) Len() int {
return t.Stop - t.Start + t.Padding
}
// Between returns a segment between this segment and given segment.
// Between returns a segment between this segment and the given segment.
func (t *Segment) Between(other Segment) Segment {
if t.Stop != other.Stop {
panic("invalid state")
@ -90,7 +90,7 @@ func (t *Segment) TrimLeftSpace(buffer []byte) Segment {
}
// TrimLeftSpaceWidth returns a new segment by slicing off leading space
// characters until given width.
// characters until the given width.
func (t *Segment) TrimLeftSpaceWidth(width int, buffer []byte) Segment {
padding := t.Padding
for ; width > 0; width-- {
@ -133,7 +133,7 @@ func (t *Segment) WithStop(v int) Segment {
return NewSegmentPadding(t.Start, v, t.Padding)
}
// ConcatPadding concats the padding to given slice.
// ConcatPadding concats the padding to the given slice.
func (t *Segment) ConcatPadding(v []byte) []byte {
if t.Padding > 0 {
return append(v, bytes.Repeat(space, t.Padding)...)
@ -153,7 +153,7 @@ func NewSegments() *Segments {
}
}
// Append appends given segment after the tail of the collection.
// Append appends the given segment after the tail of the collection.
func (s *Segments) Append(t Segment) {
if s.values == nil {
s.values = make([]Segment, 0, 20)
@ -177,12 +177,12 @@ func (s *Segments) Len() int {
return len(s.values)
}
// At returns a segment at given index.
// At returns a segment at the given index.
func (s *Segments) At(i int) Segment {
return s.values[i]
}
// Set sets given Segment.
// Set sets the given Segment.
func (s *Segments) Set(i int, v Segment) {
s.values[i] = v
}
@ -202,7 +202,7 @@ func (s *Segments) Clear() {
s.values = nil
}
// Unshift insert given Segment to head of the collection.
// Unshift insert the given Segment to head of the collection.
func (s *Segments) Unshift(v Segment) {
s.values = append(s.values[0:1], s.values[0:]...)
s.values[0] = v

View file

@ -36,7 +36,7 @@ func (b *CopyOnWriteBuffer) Write(value []byte) {
b.buffer = append(b.buffer, value...)
}
// WriteByte writes given byte to the buffer.
// WriteByte writes the given byte to the buffer.
func (b *CopyOnWriteBuffer) WriteByte(c byte) {
if !b.copied {
b.buffer = make([]byte, 0, len(b.buffer)+20)
@ -55,7 +55,7 @@ func (b *CopyOnWriteBuffer) IsCopied() bool {
return b.copied
}
// ReadWhile read given source while pred is true.
// ReadWhile read the given source while pred is true.
func ReadWhile(source []byte, index [2]int, pred func(byte) bool) (int, bool) {
j := index[0]
ok := false
@ -70,7 +70,7 @@ func ReadWhile(source []byte, index [2]int, pred func(byte) bool) (int, bool) {
return j, ok
}
// IsBlank returns true if given string is all space characters.
// IsBlank returns true if the given string is all space characters.
func IsBlank(bs []byte) bool {
for _, b := range bs {
if IsSpace(b) {
@ -81,7 +81,7 @@ func IsBlank(bs []byte) bool {
return true
}
// DedentPosition dedents lines by given width.
// DedentPosition dedents lines by the given width.
func DedentPosition(bs []byte, width int) (pos, padding int) {
if width == 0 {
return
@ -114,12 +114,12 @@ func VisualizeSpaces(bs []byte) []byte {
return bs
}
// TabWidth calculates actual width of a tab at given position.
// TabWidth calculates actual width of a tab at the given position.
func TabWidth(currentPos int) int {
return 4 - currentPos%4
}
// IndentPosition searches an indent position with given width for given line.
// IndentPosition searches an indent position with the given width for the given line.
// If the line contains tab characters, paddings may be not zero.
// currentPos==0 and width==2:
//
@ -148,7 +148,7 @@ func IndentPosition(bs []byte, currentPos, width int) (pos, padding int) {
return -1, -1
}
// IndentWidth calculate an indent width for given line.
// IndentWidth calculate an indent width for the given line.
func IndentWidth(bs []byte, currentPos int) (width, pos int) {
l := len(bs)
for i := 0; i < l; i++ {
@ -183,7 +183,7 @@ func FirstNonSpacePosition(bs []byte) int {
return -1
}
// FindClosure returns a position that closes given opener.
// FindClosure returns a position that closes the given opener.
// If codeSpan is set true, it ignores characters in code spans.
// If allowNesting is set true, closures correspond to nested opener will be
// ignored.
@ -234,7 +234,7 @@ func FindClosure(bs []byte, opener, closure byte, codeSpan, allowNesting bool) i
return -1
}
// TrimLeft trims characters in given s from head of the source.
// TrimLeft trims characters in the given s from head of the source.
// bytes.TrimLeft offers same functionalities, but bytes.TrimLeft
// allocates new buffer for the result.
func TrimLeft(source, b []byte) []byte {
@ -255,7 +255,7 @@ func TrimLeft(source, b []byte) []byte {
return source[i:]
}
// TrimRight trims characters in given s from tail of the source.
// TrimRight trims characters in the given s from tail of the source.
func TrimRight(source, b []byte) []byte {
i := len(source) - 1
for ; i >= 0; i-- {
@ -294,19 +294,19 @@ func TrimRightSpaceLength(source []byte) int {
return TrimRightLength(source, spaces)
}
// TrimLeftSpace returns a subslice of given string by slicing off all leading
// TrimLeftSpace returns a subslice of the given string by slicing off all leading
// space characters.
func TrimLeftSpace(source []byte) []byte {
return TrimLeft(source, spaces)
}
// TrimRightSpace returns a subslice of given string by slicing off all trailing
// TrimRightSpace returns a subslice of the given string by slicing off all trailing
// space characters.
func TrimRightSpace(source []byte) []byte {
return TrimRight(source, spaces)
}
// ReplaceSpaces replaces sequence of spaces with given repl.
// ReplaceSpaces replaces sequence of spaces with the given repl.
func ReplaceSpaces(source []byte, repl byte) []byte {
var ret []byte
start := -1
@ -350,7 +350,7 @@ func ToRune(source []byte, pos int) rune {
return r
}
// ToValidRune returns 0xFFFD if given rune is invalid, otherwise v.
// ToValidRune returns 0xFFFD if the given rune is invalid, otherwise v.
func ToValidRune(v rune) rune {
if v == 0 || !utf8.ValidRune(v) {
return rune(0xFFFD)
@ -369,7 +369,7 @@ func ToLinkReference(v []byte) string {
var htmlEscapeTable = [256][]byte{nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, []byte("&quot;"), nil, nil, nil, []byte("&amp;"), nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, []byte("&lt;"), nil, []byte("&gt;"), nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil}
// EscapeHTMLByte returns HTML escaped bytes if given byte should be escaped,
// EscapeHTMLByte returns HTML escaped bytes if the given byte should be escaped,
// otherwise nil.
func EscapeHTMLByte(b byte) []byte {
return htmlEscapeTable[b]
@ -500,7 +500,7 @@ func ResolveEntityNames(source []byte) []byte {
var htmlSpace = []byte("%20")
// URLEscape escape given URL.
// URLEscape escape the given URL.
// If resolveReference is set true:
// 1. unescape punctuations
// 2. resolve numeric references
@ -723,27 +723,27 @@ var urlEscapeTable = [256]int8{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
var utf8lenTable = [256]int8{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 99, 99, 99, 99, 99, 99, 99, 99}
// IsPunct returns true if given character is a punctuation, otherwise false.
// IsPunct returns true if the given character is a punctuation, otherwise false.
func IsPunct(c byte) bool {
return punctTable[c] == 1
}
// IsSpace returns true if given character is a space, otherwise false.
// IsSpace returns true if the given character is a space, otherwise false.
func IsSpace(c byte) bool {
return spaceTable[c] == 1
}
// IsNumeric returns true if given character is a numeric, otherwise false.
// IsNumeric returns true if the given character is a numeric, otherwise false.
func IsNumeric(c byte) bool {
return c >= '0' && c <= '9'
}
// IsHexDecimal returns true if given character is a hexdecimal, otherwise false.
// IsHexDecimal returns true if the given character is a hexdecimal, otherwise false.
func IsHexDecimal(c byte) bool {
return c >= '0' && c <= '9' || c >= 'a' && c <= 'f' || c >= 'A' && c <= 'F'
}
// IsAlphaNumeric returns true if given character is a alphabet or a numeric, otherwise false.
// IsAlphaNumeric returns true if the given character is a alphabet or a numeric, otherwise false.
func IsAlphaNumeric(c byte) bool {
return c >= 'a' && c <= 'z' || c >= 'A' && c <= 'Z' || c >= '0' && c <= '9'
}
@ -777,7 +777,7 @@ func (s PrioritizedSlice) Sort() {
})
}
// Remove removes given value from this slice.
// Remove removes the given value from this slice.
func (s PrioritizedSlice) Remove(v interface{}) PrioritizedSlice {
i := 0
found := false