aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRobert Griesemer <gri@golang.org>2010-12-07 10:08:00 -0800
committerRobert Griesemer <gri@golang.org>2010-12-07 10:08:00 -0800
commit14eb03f614c9b5ae78f09b01be3ba948ce2d17d5 (patch)
treec793f4f3387bf870fabe9a1e76701e881e3df125
parent6aa85d1cbea011fed57292cc4ff99e5a62de47af (diff)
downloadgo-14eb03f614c9b5ae78f09b01be3ba948ce2d17d5.tar.gz
go-14eb03f614c9b5ae78f09b01be3ba948ce2d17d5.zip
go/scanner: remove Tokenize() - was only used in tests
R=r CC=golang-dev https://golang.org/cl/3415042
-rw-r--r--src/pkg/go/scanner/scanner.go32
-rw-r--r--src/pkg/go/scanner/scanner_test.go87
2 files changed, 60 insertions, 59 deletions
diff --git a/src/pkg/go/scanner/scanner.go b/src/pkg/go/scanner/scanner.go
index b2d9d7c25d..cb50fa1ea9 100644
--- a/src/pkg/go/scanner/scanner.go
+++ b/src/pkg/go/scanner/scanner.go
@@ -4,7 +4,18 @@
// A scanner for Go source text. Takes a []byte as source which can
// then be tokenized through repeated calls to the Scan function.
-// For a sample use of a scanner, see the implementation of Tokenize.
+// Typical use:
+//
+// var s Scanner
+// fset := token.NewFileSet() // position information is relative to fset
+// s.Init(fset, filename, src, nil /* no error handler */, 0)
+// for {
+// pos, tok, lit := s.Scan()
+// if tok == token.EOF {
+// break
+// }
+// // do something here with pos, tok, and lit
+// }
//
package scanner
@@ -19,8 +30,7 @@ import (
// A Scanner holds the scanner's internal state while processing
// a given text. It can be allocated as part of another data
-// structure but must be initialized via Init before use. For
-// a sample use, see the implementation of Tokenize.
+// structure but must be initialized via Init before use.
//
type Scanner struct {
// immutable state
@@ -692,19 +702,3 @@ scanAgain:
}
return S.file.Pos(offs), tok, S.src[offs:S.offset]
}
-
-
-// Tokenize calls a function f with the token position, token value, and token
-// text for each token in the source src. The other parameters have the same
-// meaning as for the Init function. Tokenize keeps scanning until f returns
-// false (usually when the token value is token.EOF). The result is the number
-// of errors encountered.
-//
-func Tokenize(set *token.FileSet, filename string, src []byte, err ErrorHandler, mode uint, f func(pos token.Pos, tok token.Token, lit []byte) bool) int {
- var s Scanner
- s.Init(set, filename, src, err, mode)
- for f(s.Scan()) {
- // action happens in f
- }
- return s.ErrorCount
-}
diff --git a/src/pkg/go/scanner/scanner_test.go b/src/pkg/go/scanner/scanner_test.go
index 845dd73f77..edaeb1fd2e 100644
--- a/src/pkg/go/scanner/scanner_test.go
+++ b/src/pkg/go/scanner/scanner_test.go
@@ -227,42 +227,46 @@ func TestScan(t *testing.T) {
whitespace_linecount := newlineCount(whitespace)
// verify scan
+ var s Scanner
+ s.Init(fset, "", []byte(src), &testErrorHandler{t}, ScanComments)
index := 0
epos := token.Position{"", 0, 1, 1} // expected position
- nerrors := Tokenize(fset, "", []byte(src), &testErrorHandler{t}, ScanComments,
- func(pos token.Pos, tok token.Token, litb []byte) bool {
- e := elt{token.EOF, "", special}
- if index < len(tokens) {
- e = tokens[index]
- }
- lit := string(litb)
- if tok == token.EOF {
- lit = "<EOF>"
- epos.Line = src_linecount
- epos.Column = 1
- }
- checkPos(t, lit, pos, epos)
- if tok != e.tok {
- t.Errorf("bad token for %q: got %s, expected %s", lit, tok.String(), e.tok.String())
- }
- if e.tok.IsLiteral() && lit != e.lit {
- t.Errorf("bad literal for %q: got %q, expected %q", lit, lit, e.lit)
- }
- if tokenclass(tok) != e.class {
- t.Errorf("bad class for %q: got %d, expected %d", lit, tokenclass(tok), e.class)
- }
- epos.Offset += len(lit) + len(whitespace)
- epos.Line += newlineCount(lit) + whitespace_linecount
- if tok == token.COMMENT && litb[1] == '/' {
- // correct for unaccounted '/n' in //-style comment
- epos.Offset++
- epos.Line++
- }
- index++
- return tok != token.EOF
- })
- if nerrors != 0 {
- t.Errorf("found %d errors", nerrors)
+ for {
+ pos, tok, litb := s.Scan()
+ e := elt{token.EOF, "", special}
+ if index < len(tokens) {
+ e = tokens[index]
+ }
+ lit := string(litb)
+ if tok == token.EOF {
+ lit = "<EOF>"
+ epos.Line = src_linecount
+ epos.Column = 1
+ }
+ checkPos(t, lit, pos, epos)
+ if tok != e.tok {
+ t.Errorf("bad token for %q: got %s, expected %s", lit, tok.String(), e.tok.String())
+ }
+ if e.tok.IsLiteral() && lit != e.lit {
+ t.Errorf("bad literal for %q: got %q, expected %q", lit, lit, e.lit)
+ }
+ if tokenclass(tok) != e.class {
+ t.Errorf("bad class for %q: got %d, expected %d", lit, tokenclass(tok), e.class)
+ }
+ epos.Offset += len(lit) + len(whitespace)
+ epos.Line += newlineCount(lit) + whitespace_linecount
+ if tok == token.COMMENT && litb[1] == '/' {
+ // correct for unaccounted '/n' in //-style comment
+ epos.Offset++
+ epos.Line++
+ }
+ index++
+ if tok == token.EOF {
+ break
+ }
+ }
+ if s.ErrorCount != 0 {
+ t.Errorf("found %d errors", s.ErrorCount)
}
}
@@ -551,10 +555,13 @@ func TestStdErrorHander(t *testing.T) {
"@ @ @" // original file, line 1 again
v := new(ErrorVector)
- nerrors := Tokenize(fset, "File1", []byte(src), v, 0,
- func(pos token.Pos, tok token.Token, litb []byte) bool {
- return tok != token.EOF
- })
+ var s Scanner
+ s.Init(fset, "File1", []byte(src), v, 0)
+ for {
+ if _, tok, _ := s.Scan(); tok == token.EOF {
+ break
+ }
+ }
list := v.GetErrorList(Raw)
if len(list) != 9 {
@@ -574,8 +581,8 @@ func TestStdErrorHander(t *testing.T) {
PrintError(os.Stderr, list)
}
- if v.ErrorCount() != nerrors {
- t.Errorf("found %d errors, expected %d", v.ErrorCount(), nerrors)
+ if v.ErrorCount() != s.ErrorCount {
+ t.Errorf("found %d errors, expected %d", v.ErrorCount(), s.ErrorCount)
}
}