// Copyright (c) 2016, Daniel Martí <mvdan@mvdan.cc>
// See LICENSE for licensing information

package syntax

import (
	
	
	
)

// bytes that form or start a token
func regOps( rune) bool {
	switch  {
	case ';', '"', '\'', '(', ')', '$', '|', '&', '>', '<', '`':
		return true
	}
	return false
}

// tokenize these inside parameter expansions
func paramOps( rune) bool {
	switch  {
	case '}', '#', '!', ':', '-', '+', '=', '?', '%', '[', ']', '/', '^',
		',', '@', '*':
		return true
	}
	return false
}

// these start a parameter expansion name
func paramNameOp( rune) bool {
	switch  {
	case '}', ':', '+', '=', '%', '[', ']', '/', '^', ',':
		return false
	}
	return true
}

// tokenize these inside arithmetic expansions
func arithmOps( rune) bool {
	switch  {
	case '+', '-', '!', '~', '*', '/', '%', '(', ')', '^', '<', '>', ':', '=',
		',', '?', '|', '&', '[', ']', '#':
		return true
	}
	return false
}

func bquoteEscaped( byte) bool {
	switch  {
	case '$', '`', '\\':
		return true
	}
	return false
}

const escNewl rune = utf8.RuneSelf + 1

func ( *Parser) () rune {
	if .r == '\n' || .r == escNewl {
		// p.r instead of b so that newline
		// character positions don't have col 0.
		if .line++; .line > lineMax {
			.lineOverflow = true
		}
		.col = 0
		.colOverflow = false
	}
	if .col += .w; .col > colMax {
		.colOverflow = true
	}
	 := 0
:
	if .bsp < len(.bs) {
		if  := .bs[.bsp];  < utf8.RuneSelf {
			.bsp++
			if  == '\x00' {
				// Ignore null bytes while parsing, like bash.
				goto 
			}
			if  == '\\' {
				if .r == '\\' {
				} else if .peekByte('\n') {
					.bsp++
					.w, .r = 1, escNewl
					return escNewl
				} else if .peekBytes("\r\n") {
					.bsp += 2
					.w, .r = 2, escNewl
					return escNewl
				}
				if .openBquotes > 0 &&  < .openBquotes &&
					.bsp < len(.bs) && bquoteEscaped(.bs[.bsp]) {
					++
					goto 
				}
			}
			if  == '`' {
				.lastBquoteEsc = 
			}
			if .litBs != nil {
				.litBs = append(.litBs, )
			}
			.w, .r = 1, rune()
			return .r
		}
		if !utf8.FullRune(.bs[.bsp:]) {
			// we need more bytes to read a full non-ascii rune
			.fill()
		}
		var  int
		.r,  = utf8.DecodeRune(.bs[.bsp:])
		if .litBs != nil {
			.litBs = append(.litBs, .bs[.bsp:.bsp+]...)
		}
		.bsp += 
		if .r == utf8.RuneError &&  == 1 {
			.posErr(.nextPos(), "invalid UTF-8 encoding")
		}
		.w = 
	} else {
		if .r == utf8.RuneSelf {
		} else if .fill(); .bs == nil {
			.bsp++
			.r = utf8.RuneSelf
			.w = 1
		} else {
			goto 
		}
	}
	return .r
}

// fill reads more bytes from the input src into readBuf. Any bytes that
// had not yet been used at the end of the buffer are slid into the
// beginning of the buffer.
func ( *Parser) () {
	.offs += .bsp
	 := len(.bs) - .bsp
	copy(.readBuf[:], .readBuf[.bsp:])
:
	,  := 0, .readErr
	if  == nil {
		,  = .src.Read(.readBuf[:])
		.readErr = 
	}
	if  == 0 {
		if  == nil {
			goto 
		}
		// don't use p.errPass as we don't want to overwrite p.tok
		if  != io.EOF {
			.err = 
		}
		if  > 0 {
			.bs = .readBuf[:]
		} else {
			.bs = nil
		}
	} else {
		.bs = .readBuf[:+]
	}
	.bsp = 0
}

func ( *Parser) () {
	 := .r
	if .quote != hdocBody && .quote != hdocBodyTabs {
		// Heredocs handle escaped newlines in a special way, but others
		// do not.
		for  == escNewl {
			 = .rune()
		}
	}
	.pos = .nextPos()
	switch .quote {
	case paramExpRepl:
		switch  {
		case '}', '/':
			.tok = .paramToken()
		case '`', '"', '$', '\'':
			.tok = .regToken()
		default:
			.advanceLitOther()
		}
	case dblQuotes:
		switch  {
		case '`', '"', '$':
			.tok = .dqToken()
		default:
			.advanceLitDquote()
		}
	case hdocBody, hdocBodyTabs:
		switch  {
		case '`', '$':
			.tok = .dqToken()
		default:
			.advanceLitHdoc()
		}
	default: // paramExpExp:
		switch  {
		case '}':
			.tok = .paramToken()
		case '`', '"', '$', '\'':
			.tok = .regToken()
		default:
			.advanceLitOther()
		}
	}
	if .err != nil && .tok != _EOF {
		.tok = _EOF
	}
}

func ( *Parser) () {
	if .r == utf8.RuneSelf {
		.tok = _EOF
		return
	}
	.spaced = false
	if .quote&allKeepSpaces != 0 {
		.nextKeepSpaces()
		return
	}
	 := .r
	for  == escNewl {
		 = .rune()
	}
:
	for {
		switch  {
		case utf8.RuneSelf:
			.tok = _EOF
			return
		case escNewl:
			 = .rune()
		case ' ', '\t', '\r':
			.spaced = true
			 = .rune()
		case '\n':
			if .tok == _Newl {
				// merge consecutive newline tokens
				 = .rune()
				continue
			}
			.spaced = true
			.tok = _Newl
			if .quote != hdocWord && len(.heredocs) > .buriedHdocs {
				.doHeredocs()
			}
			return
		default:
			break 
		}
	}
	if .stopAt != nil && (.spaced || .tok == illegalTok || .stopToken()) {
		 := utf8.RuneLen()
		if bytes.HasPrefix(.bs[.bsp-:], .stopAt) {
			.r = utf8.RuneSelf
			.w = 1
			.tok = _EOF
			return
		}
	}
	.pos = .nextPos()
	switch {
	case .quote&allRegTokens != 0:
		switch  {
		case ';', '"', '\'', '(', ')', '$', '|', '&', '>', '<', '`':
			.tok = .regToken()
		case '#':
			// If we're parsing $foo#bar, ${foo}#bar, 'foo'#bar, or "foo"#bar,
			// #bar is a continuation of the same word, not a comment.
			// TODO: support $(foo)#bar and `foo`#bar as well, which is slightly tricky,
			// as we can't easily tell them apart from (foo)#bar and `#bar`,
			// where #bar should remain a comment.
			if !.spaced {
				switch .tok {
				case _LitWord, rightBrace, sglQuote, dblQuote:
					.advanceLitNone()
					return
				}
			}
			 = .rune()
			.newLit()
		:
			for {
				switch  {
				case '\n', utf8.RuneSelf:
					break 
				case escNewl:
					.litBs = append(.litBs, '\\', '\n')
					break 
				case '`':
					if .backquoteEnd() {
						break 
					}
				}
				 = .rune()
			}
			if .keepComments {
				*.curComs = append(*.curComs, Comment{
					Hash: .pos,
					Text: .endLit(),
				})
			} else {
				.litBs = nil
			}
			.()
		case '[', '=':
			if .quote == arrayElems {
				.tok = .paramToken()
			} else {
				.advanceLitNone()
			}
		case '?', '*', '+', '@', '!':
			if .extendedGlob() {
				switch  {
				case '?':
					.tok = globQuest
				case '*':
					.tok = globStar
				case '+':
					.tok = globPlus
				case '@':
					.tok = globAt
				default: // '!'
					.tok = globExcl
				}
				.rune()
				.rune()
			} else {
				.advanceLitNone()
			}
		default:
			.advanceLitNone()
		}
	case .quote&allArithmExpr != 0 && arithmOps():
		.tok = .arithmToken()
	case .quote&allParamExp != 0 && paramOps():
		.tok = .paramToken()
	case .quote == testExprRegexp:
		if !.rxFirstPart && .spaced {
			.quote = noState
			goto 
		}
		.rxFirstPart = false
		switch  {
		case ';', '"', '\'', '$', '&', '>', '<', '`':
			.tok = .regToken()
		case ')':
			if .rxOpenParens > 0 {
				// continuation of open paren
				.advanceLitRe()
			} else {
				.tok = rightParen
				.quote = noState
				.rune() // we are tokenizing manually
			}
		default: // including '(', '|'
			.advanceLitRe()
		}
	case regOps():
		.tok = .regToken()
	default:
		.advanceLitOther()
	}
	if .err != nil && .tok != _EOF {
		.tok = _EOF
	}
}

// extendedGlob determines whether we're parsing a Bash extended globbing expression.
// For example, whether `*` or `@` are followed by `(` to form `@(foo)`.
func ( *Parser) () bool {
	if .val == "function" {
		return false
	}
	if .peekByte('(') {
		// NOTE: empty pattern list is a valid globbing syntax like `@()`,
		// but we'll operate on the "likelihood" that it is a function;
		// only tokenize if its a non-empty pattern list.
		// We do this after peeking for just one byte, so that the input `echo *`
		// followed by a newline does not hang an interactive shell parser until
		// another byte is input.
		return !.peekBytes("()")
	}
	return false
}

func ( *Parser) ( string) bool {
	 := .bsp + len()
	// TODO: This should loop for slow readers, e.g. those providing one byte at
	// a time. Use a loop and test it with testing/iotest.OneByteReader.
	if  > len(.bs) {
		.fill()
	}
	return  <= len(.bs) && bytes.HasPrefix(.bs[.bsp:], []byte())
}

func ( *Parser) ( byte) bool {
	if .bsp == len(.bs) {
		.fill()
	}
	return .bsp < len(.bs) && .bs[.bsp] == 
}

func ( *Parser) ( rune) token {
	switch  {
	case '\'':
		if .openBquotes > 0 {
			// bury openBquotes
			.buriedBquotes = .openBquotes
			.openBquotes = 0
		}
		.rune()
		return sglQuote
	case '"':
		.rune()
		return dblQuote
	case '`':
		// Don't call p.rune, as we need to work out p.openBquotes to
		// properly handle backslashes in the lexer.
		return bckQuote
	case '&':
		switch .rune() {
		case '&':
			.rune()
			return andAnd
		case '>':
			if .rune() == '>' {
				.rune()
				return appAll
			}
			return rdrAll
		}
		return and
	case '|':
		switch .rune() {
		case '|':
			.rune()
			return orOr
		case '&':
			if .lang == LangPOSIX {
				break
			}
			.rune()
			return orAnd
		}
		return or
	case '$':
		switch .rune() {
		case '\'':
			if .lang == LangPOSIX {
				break
			}
			.rune()
			return dollSglQuote
		case '"':
			if .lang == LangPOSIX {
				break
			}
			.rune()
			return dollDblQuote
		case '{':
			.rune()
			return dollBrace
		case '[':
			if !.lang.isBash() || .quote == paramExpName {
				// latter to not tokenise ${$[@]} as $[
				break
			}
			.rune()
			return dollBrack
		case '(':
			if .rune() == '(' {
				.rune()
				return dollDblParen
			}
			return dollParen
		}
		return dollar
	case '(':
		if .rune() == '(' && .lang != LangPOSIX && .quote != testExpr {
			.rune()
			return dblLeftParen
		}
		return leftParen
	case ')':
		.rune()
		return rightParen
	case ';':
		switch .rune() {
		case ';':
			if .rune() == '&' && .lang.isBash() {
				.rune()
				return dblSemiAnd
			}
			return dblSemicolon
		case '&':
			if .lang == LangPOSIX {
				break
			}
			.rune()
			return semiAnd
		case '|':
			if .lang != LangMirBSDKorn {
				break
			}
			.rune()
			return semiOr
		}
		return semicolon
	case '<':
		switch .rune() {
		case '<':
			if  = .rune();  == '-' {
				.rune()
				return dashHdoc
			} else if  == '<' {
				.rune()
				return wordHdoc
			}
			return hdoc
		case '>':
			.rune()
			return rdrInOut
		case '&':
			.rune()
			return dplIn
		case '(':
			if !.lang.isBash() {
				break
			}
			.rune()
			return cmdIn
		}
		return rdrIn
	default: // '>'
		switch .rune() {
		case '>':
			.rune()
			return appOut
		case '&':
			.rune()
			return dplOut
		case '|':
			.rune()
			return clbOut
		case '(':
			if !.lang.isBash() {
				break
			}
			.rune()
			return cmdOut
		}
		return rdrOut
	}
}

func ( *Parser) ( rune) token {
	switch  {
	case '"':
		.rune()
		return dblQuote
	case '`':
		// Don't call p.rune, as we need to work out p.openBquotes to
		// properly handle backslashes in the lexer.
		return bckQuote
	default: // '$'
		switch .rune() {
		case '{':
			.rune()
			return dollBrace
		case '[':
			if !.lang.isBash() {
				break
			}
			.rune()
			return dollBrack
		case '(':
			if .rune() == '(' {
				.rune()
				return dollDblParen
			}
			return dollParen
		}
		return dollar
	}
}

func ( *Parser) ( rune) token {
	switch  {
	case '}':
		.rune()
		return rightBrace
	case ':':
		switch .rune() {
		case '+':
			.rune()
			return colPlus
		case '-':
			.rune()
			return colMinus
		case '?':
			.rune()
			return colQuest
		case '=':
			.rune()
			return colAssgn
		}
		return colon
	case '+':
		.rune()
		return plus
	case '-':
		.rune()
		return minus
	case '?':
		.rune()
		return quest
	case '=':
		.rune()
		return assgn
	case '%':
		if .rune() == '%' {
			.rune()
			return dblPerc
		}
		return perc
	case '#':
		if .rune() == '#' {
			.rune()
			return dblHash
		}
		return hash
	case '!':
		.rune()
		return exclMark
	case '[':
		.rune()
		return leftBrack
	case ']':
		.rune()
		return rightBrack
	case '/':
		if .rune() == '/' && .quote != paramExpRepl {
			.rune()
			return dblSlash
		}
		return slash
	case '^':
		if .rune() == '^' {
			.rune()
			return dblCaret
		}
		return caret
	case ',':
		if .rune() == ',' {
			.rune()
			return dblComma
		}
		return comma
	case '@':
		.rune()
		return at
	default: // '*'
		.rune()
		return star
	}
}

func ( *Parser) ( rune) token {
	switch  {
	case '!':
		if .rune() == '=' {
			.rune()
			return nequal
		}
		return exclMark
	case '=':
		if .rune() == '=' {
			.rune()
			return equal
		}
		return assgn
	case '~':
		.rune()
		return tilde
	case '(':
		.rune()
		return leftParen
	case ')':
		.rune()
		return rightParen
	case '&':
		switch .rune() {
		case '&':
			.rune()
			return andAnd
		case '=':
			.rune()
			return andAssgn
		}
		return and
	case '|':
		switch .rune() {
		case '|':
			.rune()
			return orOr
		case '=':
			.rune()
			return orAssgn
		}
		return or
	case '<':
		switch .rune() {
		case '<':
			if .rune() == '=' {
				.rune()
				return shlAssgn
			}
			return hdoc
		case '=':
			.rune()
			return lequal
		}
		return rdrIn
	case '>':
		switch .rune() {
		case '>':
			if .rune() == '=' {
				.rune()
				return shrAssgn
			}
			return appOut
		case '=':
			.rune()
			return gequal
		}
		return rdrOut
	case '+':
		switch .rune() {
		case '+':
			.rune()
			return addAdd
		case '=':
			.rune()
			return addAssgn
		}
		return plus
	case '-':
		switch .rune() {
		case '-':
			.rune()
			return subSub
		case '=':
			.rune()
			return subAssgn
		}
		return minus
	case '%':
		if .rune() == '=' {
			.rune()
			return remAssgn
		}
		return perc
	case '*':
		switch .rune() {
		case '*':
			.rune()
			return power
		case '=':
			.rune()
			return mulAssgn
		}
		return star
	case '/':
		if .rune() == '=' {
			.rune()
			return quoAssgn
		}
		return slash
	case '^':
		if .rune() == '=' {
			.rune()
			return xorAssgn
		}
		return caret
	case '[':
		.rune()
		return leftBrack
	case ']':
		.rune()
		return rightBrack
	case ',':
		.rune()
		return comma
	case '?':
		.rune()
		return quest
	case ':':
		.rune()
		return colon
	default: // '#'
		.rune()
		return hash
	}
}

func ( *Parser) ( rune) {
	switch {
	case  < utf8.RuneSelf:
		.litBs = .litBuf[:1]
		.litBs[0] = byte()
	case  > escNewl:
		 := utf8.RuneLen()
		.litBs = append(.litBuf[:0], .bs[.bsp-:.bsp]...)
	default:
		// don't let r == utf8.RuneSelf go to the second case as RuneLen
		// would return -1
		.litBs = .litBuf[:0]
	}
}

func ( *Parser) () ( string) {
	if .r == utf8.RuneSelf || .r == escNewl {
		 = string(.litBs)
	} else {
		 = string(.litBs[:len(.litBs)-.w])
	}
	.litBs = nil
	return
}

func ( *Parser) () bool {
	 := .litBs[:len(.litBs)-1]
	if [0] == '{' && [len()-1] == '}' {
		return ValidName(string([1 : len()-1]))
	}
	for ,  := range  {
		switch  {
		case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
		default:
			return false
		}
	}
	return true
}

func ( *Parser) ( rune) {
	// we know that r is a letter or underscore
:
	for .newLit();  != utf8.RuneSelf;  = .rune() {
		switch {
		case 'a' <=  &&  <= 'z':
		case 'A' <=  &&  <= 'Z':
		case  == '_':
		case '0' <=  &&  <= '9':
		case  == escNewl:
		default:
			break 
		}
	}
	.tok, .val = _LitWord, .endLit()
}

func ( *Parser) ( rune) {
	 := _LitWord
:
	for .newLit();  != utf8.RuneSelf;  = .rune() {
		switch  {
		case '\\': // escaped byte follows
			.rune()
		case '\'', '"', '`', '$':
			 = _Lit
			break 
		case '}':
			if .quote&allParamExp != 0 {
				break 
			}
		case '/':
			if .quote != paramExpExp {
				break 
			}
		case ':', '=', '%', '^', ',', '?', '!', '~', '*':
			if .quote&allArithmExpr != 0 || .quote == paramExpName {
				break 
			}
		case '[', ']':
			if .lang != LangPOSIX && .quote&allArithmExpr != 0 {
				break 
			}
			fallthrough
		case '#', '@':
			if .quote&allParamReg != 0 {
				break 
			}
		case '+', '-', ' ', '\t', ';', '&', '>', '<', '|', '(', ')', '\n', '\r':
			if .quote&allKeepSpaces == 0 {
				break 
			}
		}
	}
	.tok, .val = , .endLit()
}

func ( *Parser) ( rune) {
	.eqlOffs = -1
	 := _LitWord
:
	for .newLit();  != utf8.RuneSelf;  = .rune() {
		switch  {
		case ' ', '\t', '\n', '\r', '&', '|', ';', '(', ')':
			break 
		case '\\': // escaped byte follows
			.rune()
		case '>', '<':
			if .peekByte('(') {
				 = _Lit
			} else if .isLitRedir() {
				 = _LitRedir
			}
			break 
		case '`':
			if .quote != subCmdBckquo {
				 = _Lit
			}
			break 
		case '"', '\'', '$':
			 = _Lit
			break 
		case '?', '*', '+', '@', '!':
			if .extendedGlob() {
				 = _Lit
				break 
			}
		case '=':
			if .eqlOffs < 0 {
				.eqlOffs = len(.litBs) - 1
			}
		case '[':
			if .lang != LangPOSIX && len(.litBs) > 1 && .litBs[0] != '[' {
				 = _Lit
				break 
			}
		}
	}
	.tok, .val = , .endLit()
}

func ( *Parser) ( rune) {
	 := _LitWord
:
	for .newLit();  != utf8.RuneSelf;  = .rune() {
		switch  {
		case '"':
			break 
		case '\\': // escaped byte follows
			.rune()
		case escNewl, '`', '$':
			 = _Lit
			break 
		}
	}
	.tok, .val = , .endLit()
}

func ( *Parser) ( rune) {
	// Unlike the rest of nextKeepSpaces quote states, we handle escaped
	// newlines here. If lastTok==_Lit, then we know we're following an
	// escaped newline, so the first line can't end the heredoc.
	 := .tok
	for  == escNewl {
		 = .rune()
		 = _Lit
	}
	.pos = .nextPos()

	.tok = _Lit
	.newLit()
	if .quote == hdocBodyTabs {
		for  == '\t' {
			 = .rune()
		}
	}
	 := len(.litBs) - 1
	 := .hdocStops[len(.hdocStops)-1]
	for ; ;  = .rune() {
		switch  {
		case escNewl, '$':
			.val = .endLit()
			return
		case '\\': // escaped byte follows
			.rune()
		case '`':
			if !.backquoteEnd() {
				.val = .endLit()
				return
			}
			fallthrough
		case '\n', utf8.RuneSelf:
			if .parsingDoc {
				if  == utf8.RuneSelf {
					.tok = _LitWord
					.val = .endLit()
					return
				}
			} else if  == 0 &&  == _Lit {
				// This line starts right after an escaped
				// newline, so it should never end the heredoc.
			} else if  >= 0 {
				// Compare the current line with the stop word.
				 := .litBs[:]
				if  != utf8.RuneSelf && len() > 0 {
					 = [:len()-1] // minus trailing character
				}
				if bytes.Equal(, ) {
					.tok = _LitWord
					.val = .endLit()[:]
					if .val == "" {
						.tok = _Newl
					}
					.hdocStops[len(.hdocStops)-1] = nil
					return
				}
			}
			if  != '\n' {
				return // hit an unexpected EOF or closing backquote
			}
			if .quote == hdocBodyTabs {
				for .peekByte('\t') {
					.rune()
				}
			}
			 = len(.litBs)
		}
	}
}

func ( *Parser) () *Word {
	 := .r
	.newLit()
	 := .nextPos()
	 := .hdocStops[len(.hdocStops)-1]
	for ; ;  = .rune() {
		if  == utf8.RuneSelf {
			return nil
		}
		if .quote == hdocBodyTabs {
			for  == '\t' {
				 = .rune()
			}
		}
		 := len(.litBs) - 1
	:
		for {
			switch  {
			case utf8.RuneSelf, '\n':
				break 
			case '`':
				if .backquoteEnd() {
					break 
				}
			case escNewl:
				.litBs = append(.litBs, '\\', '\n')
				break 
			}
			 = .rune()
		}
		if  < 0 {
			continue
		}
		// Compare the current line with the stop word.
		 := .litBs[:]
		if  != utf8.RuneSelf && len() > 0 {
			 = [:len()-1] // minus \n
		}
		if bytes.Equal(, ) {
			.hdocStops[len(.hdocStops)-1] = nil
			 := .endLit()[:]
			if  == "" {
				return nil
			}
			return .wordOne(.lit(, ))
		}
	}
}

func ( *Parser) ( rune) {
	for .newLit(); ;  = .rune() {
		switch  {
		case '\\':
			.rune()
		case '(':
			.rxOpenParens++
		case ')':
			if .rxOpenParens--; .rxOpenParens < 0 {
				.tok, .val = _LitWord, .endLit()
				.quote = noState
				return
			}
		case ' ', '\t', '\r', '\n', ';', '&', '>', '<':
			if .rxOpenParens <= 0 {
				.tok, .val = _LitWord, .endLit()
				.quote = noState
				return
			}
		case '"', '\'', '$', '`':
			.tok, .val = _Lit, .endLit()
			return
		case utf8.RuneSelf:
			.tok, .val = _LitWord, .endLit()
			.quote = noState
			return
		}
	}
}

func testUnaryOp( string) UnTestOperator {
	switch  {
	case "!":
		return TsNot
	case "-e", "-a":
		return TsExists
	case "-f":
		return TsRegFile
	case "-d":
		return TsDirect
	case "-c":
		return TsCharSp
	case "-b":
		return TsBlckSp
	case "-p":
		return TsNmPipe
	case "-S":
		return TsSocket
	case "-L", "-h":
		return TsSmbLink
	case "-k":
		return TsSticky
	case "-g":
		return TsGIDSet
	case "-u":
		return TsUIDSet
	case "-G":
		return TsGrpOwn
	case "-O":
		return TsUsrOwn
	case "-N":
		return TsModif
	case "-r":
		return TsRead
	case "-w":
		return TsWrite
	case "-x":
		return TsExec
	case "-s":
		return TsNoEmpty
	case "-t":
		return TsFdTerm
	case "-z":
		return TsEmpStr
	case "-n":
		return TsNempStr
	case "-o":
		return TsOptSet
	case "-v":
		return TsVarSet
	case "-R":
		return TsRefVar
	default:
		return 0
	}
}

func testBinaryOp( string) BinTestOperator {
	switch  {
	case "=":
		return TsMatchShort
	case "==":
		return TsMatch
	case "!=":
		return TsNoMatch
	case "=~":
		return TsReMatch
	case "-nt":
		return TsNewer
	case "-ot":
		return TsOlder
	case "-ef":
		return TsDevIno
	case "-eq":
		return TsEql
	case "-ne":
		return TsNeq
	case "-le":
		return TsLeq
	case "-ge":
		return TsGeq
	case "-lt":
		return TsLss
	case "-gt":
		return TsGtr
	default:
		return 0
	}
}