package chromaimport ()var ( defaultOptions = &TokeniseOptions{State: "root",EnsureLF: true, })// Config for a lexer.typeConfigstruct {// Name of the lexer. Name string`xml:"name,omitempty"`// Shortcuts for the lexer Aliases []string`xml:"alias,omitempty"`// File name globs Filenames []string`xml:"filename,omitempty"`// Secondary file name globs AliasFilenames []string`xml:"alias_filename,omitempty"`// MIME types MimeTypes []string`xml:"mime_type,omitempty"`// Regex matching is case-insensitive. CaseInsensitive bool`xml:"case_insensitive,omitempty"`// Regex matches all characters. DotAll bool`xml:"dot_all,omitempty"`// Regex does not match across lines ($ matches EOL). // // Defaults to multiline. NotMultiline bool`xml:"not_multiline,omitempty"`// Don't strip leading and trailing newlines from the input. // DontStripNL bool// Strip all leading and trailing whitespace from the input // StripAll bool// Make sure that the input ends with a newline. This // is required for some lexers that consume input linewise. EnsureNL bool`xml:"ensure_nl,omitempty"`// If given and greater than 0, expand tabs in the input. // TabSize int// Priority of lexer. // // If this is 0 it will be treated as a default of 1. Priority float32`xml:"priority,omitempty"`// Analyse is a list of regexes to match against the input. // // If a match is found, the score is returned if single attribute is set to true, // otherwise the sum of all the score of matching patterns will be // used as the final score. Analyse *AnalyseConfig`xml:"analyse,omitempty"`}// AnalyseConfig defines the list of regexes analysers.typeAnalyseConfigstruct { Regexes []RegexConfig`xml:"regex,omitempty"`// If true, the first matching score is returned. First bool`xml:"first,attr"`}// RegexConfig defines a single regex pattern and its score in case of match.typeRegexConfigstruct { Pattern string`xml:"pattern,attr"` Score float32`xml:"score,attr"`}// Token output to formatter.typeTokenstruct { Type TokenType`json:"type"` Value string`json:"value"`}func ( *Token) () string { return .Value }func ( *Token) () string { returnfmt.Sprintf("&Token{%s, %q}", .Type, .Value) }// Clone returns a clone of the Token.func ( *Token) () Token {return *}// EOF is returned by lexers at the end of input.varEOFToken// TokeniseOptions contains options for tokenisers.typeTokeniseOptionsstruct {// State to start tokenisation in. Defaults to "root". State string// Nested tokenisation. Nested bool// If true, all EOLs are converted into LF // by replacing CRLF and CR EnsureLF bool}// A Lexer for tokenising source code.typeLexerinterface {// Config describing the features of the Lexer.Config() *Config// Tokenise returns an Iterator over tokens in text.Tokenise(options *TokeniseOptions, text string) (Iterator, error)// SetRegistry sets the registry this Lexer is associated with. // // The registry should be used by the Lexer if it needs to look up other // lexers.SetRegistry(registry *LexerRegistry) Lexer// SetAnalyser sets a function the Lexer should use for scoring how // likely a fragment of text is to match this lexer, between 0.0 and 1.0. // A value of 1 indicates high confidence. // // Lexers may ignore this if they implement their own analysers.SetAnalyser(analyser func(text string) float32) Lexer// AnalyseText scores how likely a fragment of text is to match // this lexer, between 0.0 and 1.0. A value of 1 indicates high confidence.AnalyseText(text string) float32}// Lexers is a slice of lexers sortable by name.typeLexers []Lexerfunc ( Lexers) () int { returnlen() }func ( Lexers) (, int) { [], [] = [], [] }func ( Lexers) (, int) bool {returnstrings.ToLower([].Config().Name) < strings.ToLower([].Config().Name)}// PrioritisedLexers is a slice of lexers sortable by priority.typePrioritisedLexers []Lexerfunc ( PrioritisedLexers) () int { returnlen() }func ( PrioritisedLexers) (, int) { [], [] = [], [] }func ( PrioritisedLexers) (, int) bool { := [].Config().Priorityif == 0 { = 1 } := [].Config().Priorityif == 0 { = 1 }return > }// Analyser determines how appropriate this lexer is for the given text.typeAnalyserinterface {AnalyseText(text string) float32}
The pages are generated with Goldsv0.8.2. (GOOS=linux GOARCH=amd64)
Golds is a Go 101 project developed by Tapir Liu.
PR and bug reports are welcome and can be submitted to the issue list.
Please follow @zigo_101 (reachable from the left QR code) to get the latest news of Golds.