diff options
author | Russ Cox <rsc@golang.org> | 2017-11-20 09:21:00 -0500 |
---|---|---|
committer | Russ Cox <rsc@golang.org> | 2017-11-20 09:21:00 -0500 |
commit | cda3c6f91d7c3cdd370cbc4c34f34fd597d028bd (patch) | |
tree | e70ceeaa27261783f88e5c1ca935b3ff60078f90 | |
parent | adc1f587ac20d76434aa140413afc537a8aaabc7 (diff) | |
parent | 2ea7d3461bb41d0ae12b56ee52d43314bcdb97f9 (diff) | |
download | go-cda3c6f91d7c3cdd370cbc4c34f34fd597d028bd.tar.gz go-cda3c6f91d7c3cdd370cbc4c34f34fd597d028bd.zip |
[dev.boringcrypto] all: merge go1.9.2 into dev.boringcrypto
Change-Id: I695e804ad8bbb6d90a28108bcf8623fc2bfab659
65 files changed, 1754 insertions, 648 deletions
diff --git a/doc/devel/release.html b/doc/devel/release.html index 8304522449..eac2ddd3ef 100644 --- a/doc/devel/release.html +++ b/doc/devel/release.html @@ -30,6 +30,25 @@ Go 1.9 is a major release of Go. Read the <a href="/doc/go1.9">Go 1.9 Release Notes</a> for more information. </p> +<h3 id="go1.9.minor">Minor revisions</h3> + +<p> +go1.9.1 (released 2017/10/04) includes two security fixes. +See the <a href="https://github.com/golang/go/issues?q=milestone%3AGo1.9.1">Go +1.9.1 milestone</a> on our issue tracker for details. +</p> + +<p> +go1.9.2 (released 2017/10/25) includes fixes to the compiler, linker, runtime, +documentation, <code>go</code> command, +and the <code>crypto/x509</code>, <code>database/sql</code>, <code>log</code>, +and <code>net/smtp</code> packages. +It includes a fix to a bug introduced in Go 1.9.1 that broke <code>go</code> <code>get</code> +of non-Git repositories under certain conditions. +See the <a href="https://github.com/golang/go/issues?q=milestone%3AGo1.9.2">Go +1.9.2 milestone</a> on our issue tracker for details. +</p> + <h2 id="go1.8">go1.8 (released 2017/02/16)</h2> <p> @@ -63,6 +82,13 @@ See the <a href="https://github.com/golang/go/issues?q=milestone%3AGo1.8.3">Go 1.8.3 milestone</a> on our issue tracker for details. </p> +<p> +go1.8.4 (released 2017/10/04) includes two security fixes. +It contains the same fixes as Go 1.9.1 and was released at the same time. +See the <a href="https://github.com/golang/go/issues?q=milestone%3AGo1.8.4">Go +1.8.4 milestone</a> on our issue tracker for details. +</p> + <h2 id="go1.7">go1.7 (released 2016/08/15)</h2> <p> diff --git a/doc/go1.9.html b/doc/go1.9.html index ce103cdb9a..222e0e9ba0 100644 --- a/doc/go1.9.html +++ b/doc/go1.9.html @@ -740,6 +740,11 @@ version of gccgo. and <a href="/pkg/context/#WithValue"><code>context.WithValue</code></a> instead. </li> + + <li><!-- CL 35490 --> + <a href="/pkg/net/http/#LocalAddrContextKey"><code>LocalAddrContextKey</code></a> now contains + the connection's actual network address instead of the interface address used by the listener. + </li> </ul> <p>Client & Transport changes:</p> diff --git a/misc/cgo/test/cgo_test.go b/misc/cgo/test/cgo_test.go index f7cf6f613c..9485e25bf4 100644 --- a/misc/cgo/test/cgo_test.go +++ b/misc/cgo/test/cgo_test.go @@ -80,5 +80,6 @@ func Test20369(t *testing.T) { test20369(t) } func Test18720(t *testing.T) { test18720(t) } func Test20266(t *testing.T) { test20266(t) } func Test20129(t *testing.T) { test20129(t) } +func Test21708(t *testing.T) { test21708(t) } func BenchmarkCgoCall(b *testing.B) { benchCgoCall(b) } diff --git a/misc/cgo/test/issue21668.go b/misc/cgo/test/issue21668.go new file mode 100644 index 0000000000..f15b9202ac --- /dev/null +++ b/misc/cgo/test/issue21668.go @@ -0,0 +1,13 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Fail to guess the kind of the constant "x". +// No runtime test; just make sure it compiles. + +package cgotest + +// const int x = 42; +import "C" + +var issue21668_X = C.x diff --git a/misc/cgo/test/issue21708.go b/misc/cgo/test/issue21708.go new file mode 100644 index 0000000000..d413e3c57a --- /dev/null +++ b/misc/cgo/test/issue21708.go @@ -0,0 +1,16 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cgotest + +// #include <stdint.h> +// #define CAST_TO_INT64 (int64_t)(-1) +import "C" +import "testing" + +func test21708(t *testing.T) { + if got, want := C.CAST_TO_INT64, -1; got != want { + t.Errorf("C.CAST_TO_INT64 == %v, expected %v", got, want) + } +} diff --git a/src/cmd/cgo/gcc.go b/src/cmd/cgo/gcc.go index ff8b81354b..c104067a93 100644 --- a/src/cmd/cgo/gcc.go +++ b/src/cmd/cgo/gcc.go @@ -296,27 +296,21 @@ func (p *Package) guessKinds(f *File) []*Name { // For each name, we generate these lines, where xxx is the index in toSniff plus one. // // #line xxx "not-declared" - // void __cgo_f_xxx_1(void) { __typeof__(name) *__cgo_undefined__; } + // void __cgo_f_xxx_1(void) { __typeof__(name) *__cgo_undefined__1; } // #line xxx "not-type" - // void __cgo_f_xxx_2(void) { name *__cgo_undefined__; } + // void __cgo_f_xxx_2(void) { name *__cgo_undefined__2; } // #line xxx "not-int-const" - // void __cgo_f_xxx_3(void) { enum { __cgo_undefined__ = (name)*1 }; } + // void __cgo_f_xxx_3(void) { enum { __cgo_undefined__3 = (name)*1 }; } // #line xxx "not-num-const" - // void __cgo_f_xxx_4(void) { static const double x = (name); } + // void __cgo_f_xxx_4(void) { static const double __cgo_undefined__4 = (name); } // #line xxx "not-str-lit" - // void __cgo_f_xxx_5(void) { static const char x[] = (name); } - // #line xxx "not-signed-int-const" - // #if 0 < -(name) - // #line xxx "not-signed-int-const" - // #error found unsigned int - // #endif + // void __cgo_f_xxx_5(void) { static const char __cgo_undefined__5[] = (name); } // // If we see an error at not-declared:xxx, the corresponding name is not declared. // If we see an error at not-type:xxx, the corresponding name is a type. // If we see an error at not-int-const:xxx, the corresponding name is not an integer constant. // If we see an error at not-num-const:xxx, the corresponding name is not a number constant. // If we see an error at not-str-lit:xxx, the corresponding name is not a string literal. - // If we see an error at not-signed-int-const:xxx, the corresponding name is not a signed integer literal. // // The specific input forms are chosen so that they are valid C syntax regardless of // whether name denotes a type or an expression. @@ -327,26 +321,20 @@ func (p *Package) guessKinds(f *File) []*Name { for i, n := range names { fmt.Fprintf(&b, "#line %d \"not-declared\"\n"+ - "void __cgo_f_%d_1(void) { __typeof__(%s) *__cgo_undefined__; }\n"+ + "void __cgo_f_%d_1(void) { __typeof__(%s) *__cgo_undefined__1; }\n"+ "#line %d \"not-type\"\n"+ - "void __cgo_f_%d_2(void) { %s *__cgo_undefined__; }\n"+ + "void __cgo_f_%d_2(void) { %s *__cgo_undefined__2; }\n"+ "#line %d \"not-int-const\"\n"+ - "void __cgo_f_%d_3(void) { enum { __cgo_undefined__ = (%s)*1 }; }\n"+ + "void __cgo_f_%d_3(void) { enum { __cgo_undefined__3 = (%s)*1 }; }\n"+ "#line %d \"not-num-const\"\n"+ - "void __cgo_f_%d_4(void) { static const double x = (%s); }\n"+ + "void __cgo_f_%d_4(void) { static const double __cgo_undefined__4 = (%s); }\n"+ "#line %d \"not-str-lit\"\n"+ - "void __cgo_f_%d_5(void) { static const char s[] = (%s); }\n"+ - "#line %d \"not-signed-int-const\"\n"+ - "#if 0 < (%s)\n"+ - "#line %d \"not-signed-int-const\"\n"+ - "#error found unsigned int\n"+ - "#endif\n", + "void __cgo_f_%d_5(void) { static const char __cgo_undefined__5[] = (%s); }\n", i+1, i+1, n.C, i+1, i+1, n.C, i+1, i+1, n.C, i+1, i+1, n.C, i+1, i+1, n.C, - i+1, n.C, i+1, ) } fmt.Fprintf(&b, "#line 1 \"completed\"\n"+ @@ -365,7 +353,6 @@ func (p *Package) guessKinds(f *File) []*Name { notNumConst notStrLiteral notDeclared - notSignedIntConst ) sawUnmatchedErrors := false for _, line := range strings.Split(stderr, "\n") { @@ -419,8 +406,6 @@ func (p *Package) guessKinds(f *File) []*Name { sniff[i] |= notNumConst case "not-str-lit": sniff[i] |= notStrLiteral - case "not-signed-int-const": - sniff[i] |= notSignedIntConst default: if isError { sawUnmatchedErrors = true @@ -436,7 +421,7 @@ func (p *Package) guessKinds(f *File) []*Name { } for i, n := range names { - switch sniff[i] &^ notSignedIntConst { + switch sniff[i] { default: var tpos token.Pos for _, ref := range f.Ref { @@ -447,11 +432,7 @@ func (p *Package) guessKinds(f *File) []*Name { } error_(tpos, "could not determine kind of name for C.%s", fixGo(n.Go)) case notStrLiteral | notType: - if sniff[i]¬SignedIntConst != 0 { - n.Kind = "uconst" - } else { - n.Kind = "iconst" - } + n.Kind = "iconst" case notIntConst | notStrLiteral | notType: n.Kind = "fconst" case notIntConst | notNumConst | notType: @@ -496,7 +477,7 @@ func (p *Package) loadDWARF(f *File, names []*Name) { b.WriteString("#line 1 \"cgo-dwarf-inference\"\n") for i, n := range names { fmt.Fprintf(&b, "__typeof__(%s) *__cgo__%d;\n", n.C, i) - if n.Kind == "iconst" || n.Kind == "uconst" { + if n.Kind == "iconst" { fmt.Fprintf(&b, "enum { __cgo_enum__%d = %s };\n", i, n.C) } } @@ -505,7 +486,7 @@ func (p *Package) loadDWARF(f *File, names []*Name) { // so we can read them out of the object file. fmt.Fprintf(&b, "long long __cgodebug_ints[] = {\n") for _, n := range names { - if n.Kind == "iconst" || n.Kind == "uconst" { + if n.Kind == "iconst" { fmt.Fprintf(&b, "\t%s,\n", n.C) } else { fmt.Fprintf(&b, "\t0,\n") @@ -614,11 +595,11 @@ func (p *Package) loadDWARF(f *File, names []*Name) { switch n.Kind { case "iconst": if i < len(ints) { - n.Const = fmt.Sprintf("%#x", ints[i]) - } - case "uconst": - if i < len(ints) { - n.Const = fmt.Sprintf("%#x", uint64(ints[i])) + if _, ok := types[i].(*dwarf.UintType); ok { + n.Const = fmt.Sprintf("%#x", uint64(ints[i])) + } else { + n.Const = fmt.Sprintf("%#x", ints[i]) + } } case "fconst": if i < len(floats) { diff --git a/src/cmd/cgo/main.go b/src/cmd/cgo/main.go index 3dc3d141b7..3ad13ef9c7 100644 --- a/src/cmd/cgo/main.go +++ b/src/cmd/cgo/main.go @@ -88,7 +88,7 @@ type Name struct { Mangle string // name used in generated Go C string // name used in C Define string // #define expansion - Kind string // "iconst", "uconst", "fconst", "sconst", "type", "var", "fpvar", "func", "not-type" + Kind string // "iconst", "fconst", "sconst", "type", "var", "fpvar", "func", "not-type" Type *Type // the type of xxx FuncType *FuncType AddError bool @@ -100,7 +100,7 @@ func (n *Name) IsVar() bool { return n.Kind == "var" || n.Kind == "fpvar" } -// IsConst reports whether Kind is either "iconst", "uconst", "fconst" or "sconst" +// IsConst reports whether Kind is either "iconst", "fconst" or "sconst" func (n *Name) IsConst() bool { return strings.HasSuffix(n.Kind, "const") } diff --git a/src/cmd/compile/internal/gc/noder.go b/src/cmd/compile/internal/gc/noder.go index 3977be1d73..54c48434cc 100644 --- a/src/cmd/compile/internal/gc/noder.go +++ b/src/cmd/compile/internal/gc/noder.go @@ -7,6 +7,7 @@ package gc import ( "fmt" "os" + "runtime" "strconv" "strings" "unicode/utf8" @@ -20,12 +21,16 @@ import ( func parseFiles(filenames []string) uint { var lines uint var noders []*noder + // Limit the number of simultaneously open files. + sem := make(chan struct{}, runtime.GOMAXPROCS(0)+10) for _, filename := range filenames { p := &noder{err: make(chan syntax.Error)} noders = append(noders, p) go func(filename string) { + sem <- struct{}{} + defer func() { <-sem }() defer close(p.err) base := src.NewFileBase(filename, absFilename(filename)) @@ -36,7 +41,7 @@ func parseFiles(filenames []string) uint { } defer f.Close() - p.file, _ = syntax.Parse(base, f, p.error, p.pragma, syntax.CheckBranches) // errors are tracked via p.error + p.file, _ = syntax.Parse(base, f, p.error, p.pragma, fileh, syntax.CheckBranches) // errors are tracked via p.error }(filename) } @@ -65,6 +70,10 @@ func yyerrorpos(pos src.Pos, format string, args ...interface{}) { var pathPrefix string +func fileh(name string) string { + return objabi.AbsFile("", name, pathPrefix) +} + func absFilename(name string) string { return objabi.AbsFile(Ctxt.Pathname, name, pathPrefix) } @@ -401,7 +410,7 @@ func (p *noder) funcDecl(fun *syntax.FuncDecl) *Node { f.Func.Endlineno = lineno } else { if pure_go || strings.HasPrefix(f.funcname(), "init.") { - yyerrorl(f.Pos, "missing function body for %q", f.funcname()) + yyerrorl(f.Pos, "missing function body") } } diff --git a/src/cmd/compile/internal/gc/ssa.go b/src/cmd/compile/internal/gc/ssa.go index 9c1b3ca69f..63e9622983 100644 --- a/src/cmd/compile/internal/gc/ssa.go +++ b/src/cmd/compile/internal/gc/ssa.go @@ -282,7 +282,7 @@ type state struct { type funcLine struct { f *obj.LSym - file string + base *src.PosBase line uint } @@ -3456,7 +3456,7 @@ func (s *state) check(cmp *ssa.Value, fn *obj.LSym) { bNext := s.f.NewBlock(ssa.BlockPlain) line := s.peekPos() pos := Ctxt.PosTable.Pos(line) - fl := funcLine{f: fn, file: pos.Filename(), line: pos.Line()} + fl := funcLine{f: fn, base: pos.Base(), line: pos.Line()} bPanic := s.panics[fl] if bPanic == nil { bPanic = s.f.NewBlock(ssa.BlockPlain) diff --git a/src/cmd/compile/internal/gc/subr.go b/src/cmd/compile/internal/gc/subr.go index d79789c4fe..047acee05f 100644 --- a/src/cmd/compile/internal/gc/subr.go +++ b/src/cmd/compile/internal/gc/subr.go @@ -166,20 +166,22 @@ func Warnl(line src.XPos, fmt_ string, args ...interface{}) { func Fatalf(fmt_ string, args ...interface{}) { flusherrors() - fmt.Printf("%v: internal compiler error: ", linestr(lineno)) - fmt.Printf(fmt_, args...) - fmt.Printf("\n") - - // If this is a released compiler version, ask for a bug report. - if strings.HasPrefix(objabi.Version, "release") { + if Debug_panic != 0 || nsavederrors+nerrors == 0 { + fmt.Printf("%v: internal compiler error: ", linestr(lineno)) + fmt.Printf(fmt_, args...) fmt.Printf("\n") - fmt.Printf("Please file a bug report including a short program that triggers the error.\n") - fmt.Printf("https://golang.org/issue/new\n") - } else { - // Not a release; dump a stack trace, too. - fmt.Println() - os.Stdout.Write(debug.Stack()) - fmt.Println() + + // If this is a released compiler version, ask for a bug report. + if strings.HasPrefix(objabi.Version, "go") { + fmt.Printf("\n") + fmt.Printf("Please file a bug report including a short program that triggers the error.\n") + fmt.Printf("https://golang.org/issue/new\n") + } else { + // Not a release; dump a stack trace, too. + fmt.Println() + os.Stdout.Write(debug.Stack()) + fmt.Println() + } } hcrash() diff --git a/src/cmd/compile/internal/gc/testdata/gen/zeroGen.go b/src/cmd/compile/internal/gc/testdata/gen/zeroGen.go index fa70b16495..c764c369e6 100644 --- a/src/cmd/compile/internal/gc/testdata/gen/zeroGen.go +++ b/src/cmd/compile/internal/gc/testdata/gen/zeroGen.go @@ -19,6 +19,7 @@ import ( // will be written into the parent directory containing the tests. var sizes = [...]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 15, 16, 17, 23, 24, 25, 31, 32, 33, 63, 64, 65, 1023, 1024, 1025} +var usizes = [...]int{8, 16, 24, 32, 64, 256} func main() { w := new(bytes.Buffer) @@ -61,12 +62,74 @@ func main() { fmt.Fprintf(w, "}\n") } + for _, s := range usizes { + // type for test + fmt.Fprintf(w, "type T%du1 struct {\n", s) + fmt.Fprintf(w, " b bool\n") + fmt.Fprintf(w, " val [%d]byte\n", s) + fmt.Fprintf(w, "}\n") + + fmt.Fprintf(w, "type T%du2 struct {\n", s) + fmt.Fprintf(w, " i uint16\n") + fmt.Fprintf(w, " val [%d]byte\n", s) + fmt.Fprintf(w, "}\n") + + // function being tested + fmt.Fprintf(w, "//go:noinline\n") + fmt.Fprintf(w, "func zero%du1_ssa(t *T%du1) {\n", s, s) + fmt.Fprintf(w, " t.val = [%d]byte{}\n", s) + fmt.Fprintf(w, "}\n") + + // function being tested + fmt.Fprintf(w, "//go:noinline\n") + fmt.Fprintf(w, "func zero%du2_ssa(t *T%du2) {\n", s, s) + fmt.Fprintf(w, " t.val = [%d]byte{}\n", s) + fmt.Fprintf(w, "}\n") + + // testing harness + fmt.Fprintf(w, "func testZero%du() {\n", s) + fmt.Fprintf(w, " a := T%du1{false, [%d]byte{", s, s) + for i := 0; i < s; i++ { + fmt.Fprintf(w, "255,") + } + fmt.Fprintf(w, "}}\n") + fmt.Fprintf(w, " zero%du1_ssa(&a)\n", s) + fmt.Fprintf(w, " want := T%du1{false, [%d]byte{", s, s) + for i := 0; i < s; i++ { + fmt.Fprintf(w, "0,") + } + fmt.Fprintf(w, "}}\n") + fmt.Fprintf(w, " if a != want {\n") + fmt.Fprintf(w, " fmt.Printf(\"zero%du2 got=%%v, want %%v\\n\", a, want)\n", s) + fmt.Fprintf(w, " failed=true\n") + fmt.Fprintf(w, " }\n") + fmt.Fprintf(w, " b := T%du2{15, [%d]byte{", s, s) + for i := 0; i < s; i++ { + fmt.Fprintf(w, "255,") + } + fmt.Fprintf(w, "}}\n") + fmt.Fprintf(w, " zero%du2_ssa(&b)\n", s) + fmt.Fprintf(w, " wantb := T%du2{15, [%d]byte{", s, s) + for i := 0; i < s; i++ { + fmt.Fprintf(w, "0,") + } + fmt.Fprintf(w, "}}\n") + fmt.Fprintf(w, " if b != wantb {\n") + fmt.Fprintf(w, " fmt.Printf(\"zero%du2 got=%%v, want %%v\\n\", b, wantb)\n", s) + fmt.Fprintf(w, " failed=true\n") + fmt.Fprintf(w, " }\n") + fmt.Fprintf(w, "}\n") + } + // boilerplate at end fmt.Fprintf(w, "var failed bool\n") fmt.Fprintf(w, "func main() {\n") for _, s := range sizes { fmt.Fprintf(w, " testZero%d()\n", s) } + for _, s := range usizes { + fmt.Fprintf(w, " testZero%du()\n", s) + } fmt.Fprintf(w, " if failed {\n") fmt.Fprintf(w, " panic(\"failed\")\n") fmt.Fprintf(w, " }\n") diff --git a/src/cmd/compile/internal/gc/testdata/zero.go b/src/cmd/compile/internal/gc/testdata/zero.go index f6354868cb..9d261aa401 100644 --- a/src/cmd/compile/internal/gc/testdata/zero.go +++ b/src/cmd/compile/internal/gc/testdata/zero.go @@ -505,6 +505,216 @@ func testZero1025() { } } +type T8u1 struct { + b bool + val [8]byte +} +type T8u2 struct { + i uint16 + val [8]byte +} + +//go:noinline +func zero8u1_ssa(t *T8u1) { + t.val = [8]byte{} +} + +//go:noinline +func zero8u2_ssa(t *T8u2) { + t.val = [8]byte{} +} +func testZero8u() { + a := T8u1{false, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}} + zero8u1_ssa(&a) + want := T8u1{false, [8]byte{0, 0, 0, 0, 0, 0, 0, 0}} + if a != want { + fmt.Printf("zero8u2 got=%v, want %v\n", a, want) + failed = true + } + b := T8u2{15, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}} + zero8u2_ssa(&b) + wantb := T8u2{15, [8]byte{0, 0, 0, 0, 0, 0, 0, 0}} + if b != wantb { + fmt.Printf("zero8u2 got=%v, want %v\n", b, wantb) + failed = true + } +} + +type T16u1 struct { + b bool + val [16]byte +} +type T16u2 struct { + i uint16 + val [16]byte +} + +//go:noinline +func zero16u1_ssa(t *T16u1) { + t.val = [16]byte{} +} + +//go:noinline +func zero16u2_ssa(t *T16u2) { + t.val = [16]byte{} +} +func testZero16u() { + a := T16u1{false, [16]byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}} + zero16u1_ssa(&a) + want := T16u1{false, [16]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}} + if a != want { + fmt.Printf("zero16u2 got=%v, want %v\n", a, want) + failed = true + } + b := T16u2{15, [16]byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}} + zero16u2_ssa(&b) + wantb := T16u2{15, [16]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}} + if b != wantb { + fmt.Printf("zero16u2 got=%v, want %v\n", b, wantb) + failed = true + } +} + +type T24u1 struct { + b bool + val [24]byte +} +type T24u2 struct { + i uint16 + val [24]byte +} + +//go:noinline +func zero24u1_ssa(t *T24u1) { + t.val = [24]byte{} +} + +//go:noinline +func zero24u2_ssa(t *T24u2) { + t.val = [24]byte{} +} +func testZero24u() { + a := T24u1{false, [24]byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}} + zero24u1_ssa(&a) + want := T24u1{false, [24]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}} + if a != want { + fmt.Printf("zero24u2 got=%v, want %v\n", a, want) + failed = true + } + b := T24u2{15, [24]byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}} + zero24u2_ssa(&b) + wantb := T24u2{15, [24]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}} + if b != wantb { + fmt.Printf("zero24u2 got=%v, want %v\n", b, wantb) + failed = true + } +} + +type T32u1 struct { + b bool + val [32]byte +} +type T32u2 struct { + i uint16 + val [32]byte +} + +//go:noinline +func zero32u1_ssa(t *T32u1) { + t.val = [32]byte{} +} + +//go:noinline +func zero32u2_ssa(t *T32u2) { + t.val = [32]byte{} +} +func testZero32u() { + a := T32u1{false, [32]byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}} + zero32u1_ssa(&a) + want := T32u1{false, [32]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}} + if a != want { + fmt.Printf("zero32u2 got=%v, want %v\n", a, want) + failed = true + } + b := T32u2{15, [32]byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}} + zero32u2_ssa(&b) + wantb := T32u2{15, [32]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}} + if b != wantb { + fmt.Printf("zero32u2 got=%v, want %v\n", b, wantb) + failed = true + } +} + +type T64u1 struct { + b bool + val [64]byte +} +type T64u2 struct { + i uint16 + val [64]byte +} + +//go:noinline +func zero64u1_ssa(t *T64u1) { + t.val = [64]byte{} +} + +//go:noinline +func zero64u2_ssa(t *T64u2) { + t.val = [64]byte{} +} +func testZero64u() { + a := T64u1{false, [64]byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}} + zero64u1_ssa(&a) + want := T64u1{false, [64]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}} + if a != want { + fmt.Printf("zero64u2 got=%v, want %v\n", a, want) + failed = true + } + b := T64u2{15, [64]byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}} + zero64u2_ssa(&b) + wantb := T64u2{15, [64]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}} + if b != wantb { + fmt.Printf("zero64u2 got=%v, want %v\n", b, wantb) + failed = true + } +} + +type T256u1 struct { + b bool + val [256]byte +} +type T256u2 struct { + i uint16 + val [256]byte +} + +//go:noinline +func zero256u1_ssa(t *T256u1) { + t.val = [256]byte{} +} + +//go:noinline +func zero256u2_ssa(t *T256u2) { + t.val = [256]byte{} +} +func testZero256u() { + a := T256u1{false, [256]byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}} + zero256u1_ssa(&a) + want := T256u1{false, [256]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}} + if a != want { + fmt.Printf("zero256u2 got=%v, want %v\n", a, want) + failed = true + } + b := T256u2{15, [256]byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}} + zero256u2_ssa(&b) + wantb := T256u2{15, [256]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}} + if b != wantb { + fmt.Printf("zero256u2 got=%v, want %v\n", b, wantb) + failed = true + } +} + var failed bool func main() { @@ -533,6 +743,12 @@ func main() { testZero1023() testZero1024() testZero1025() + testZero8u() + testZero16u() + testZero24u() + testZero32u() + testZero64u() + testZero256u() if failed { panic("failed") } diff --git a/src/cmd/compile/internal/ssa/gen/AMD64.rules b/src/cmd/compile/internal/ssa/gen/AMD64.rules index 1900f5e794..ff38be550e 100644 --- a/src/cmd/compile/internal/ssa/gen/AMD64.rules +++ b/src/cmd/compile/internal/ssa/gen/AMD64.rules @@ -1177,82 +1177,82 @@ (MOVQstoreconstidx1 [c] {sym} ptr (SHLQconst [3] idx) mem) -> (MOVQstoreconstidx8 [c] {sym} ptr idx mem) // combine ADDQ into indexed loads and stores -(MOVBloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) -> (MOVBloadidx1 [c+d] {sym} ptr idx mem) -(MOVWloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) -> (MOVWloadidx1 [c+d] {sym} ptr idx mem) -(MOVWloadidx2 [c] {sym} (ADDQconst [d] ptr) idx mem) -> (MOVWloadidx2 [c+d] {sym} ptr idx mem) -(MOVLloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) -> (MOVLloadidx1 [c+d] {sym} ptr idx mem) -(MOVLloadidx4 [c] {sym} (ADDQconst [d] ptr) idx mem) -> (MOVLloadidx4 [c+d] {sym} ptr idx mem) -(MOVQloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) -> (MOVQloadidx1 [c+d] {sym} ptr idx mem) -(MOVQloadidx8 [c] {sym} (ADDQconst [d] ptr) idx mem) -> (MOVQloadidx8 [c+d] {sym} ptr idx mem) -(MOVSSloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) -> (MOVSSloadidx1 [c+d] {sym} ptr idx mem) -(MOVSSloadidx4 [c] {sym} (ADDQconst [d] ptr) idx mem) -> (MOVSSloadidx4 [c+d] {sym} ptr idx mem) -(MOVSDloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) -> (MOVSDloadidx1 [c+d] {sym} ptr idx mem) -(MOVSDloadidx8 [c] {sym} (ADDQconst [d] ptr) idx mem) -> (MOVSDloadidx8 [c+d] {sym} ptr idx mem) - -(MOVBstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) -> (MOVBstoreidx1 [c+d] {sym} ptr idx val mem) -(MOVWstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) -> (MOVWstoreidx1 [c+d] {sym} ptr idx val mem) -(MOVWstoreidx2 [c] {sym} (ADDQconst [d] ptr) idx val mem) -> (MOVWstoreidx2 [c+d] {sym} ptr idx val mem) -(MOVLstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) -> (MOVLstoreidx1 [c+d] {sym} ptr idx val mem) -(MOVLstoreidx4 [c] {sym} (ADDQconst [d] ptr) idx val mem) -> (MOVLstoreidx4 [c+d] {sym} ptr idx val mem) -(MOVQstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) -> (MOVQstoreidx1 [c+d] {sym} ptr idx val mem) -(MOVQstoreidx8 [c] {sym} (ADDQconst [d] ptr) idx val mem) -> (MOVQstoreidx8 [c+d] {sym} ptr idx val mem) -(MOVSSstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) -> (MOVSSstoreidx1 [c+d] {sym} ptr idx val mem) -(MOVSSstoreidx4 [c] {sym} (ADDQconst [d] ptr) idx val mem) -> (MOVSSstoreidx4 [c+d] {sym} ptr idx val mem) -(MOVSDstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) -> (MOVSDstoreidx1 [c+d] {sym} ptr idx val mem) -(MOVSDstoreidx8 [c] {sym} (ADDQconst [d] ptr) idx val mem) -> (MOVSDstoreidx8 [c+d] {sym} ptr idx val mem) - -(MOVBloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) -> (MOVBloadidx1 [c+d] {sym} ptr idx mem) -(MOVWloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) -> (MOVWloadidx1 [c+d] {sym} ptr idx mem) -(MOVWloadidx2 [c] {sym} ptr (ADDQconst [d] idx) mem) -> (MOVWloadidx2 [c+2*d] {sym} ptr idx mem) -(MOVLloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) -> (MOVLloadidx1 [c+d] {sym} ptr idx mem) -(MOVLloadidx4 [c] {sym} ptr (ADDQconst [d] idx) mem) -> (MOVLloadidx4 [c+4*d] {sym} ptr idx mem) -(MOVQloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) -> (MOVQloadidx1 [c+d] {sym} ptr idx mem) -(MOVQloadidx8 [c] {sym} ptr (ADDQconst [d] idx) mem) -> (MOVQloadidx8 [c+8*d] {sym} ptr idx mem) -(MOVSSloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) -> (MOVSSloadidx1 [c+d] {sym} ptr idx mem) -(MOVSSloadidx4 [c] {sym} ptr (ADDQconst [d] idx) mem) -> (MOVSSloadidx4 [c+4*d] {sym} ptr idx mem) -(MOVSDloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) -> (MOVSDloadidx1 [c+d] {sym} ptr idx mem) -(MOVSDloadidx8 [c] {sym} ptr (ADDQconst [d] idx) mem) -> (MOVSDloadidx8 [c+8*d] {sym} ptr idx mem) - -(MOVBstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) -> (MOVBstoreidx1 [c+d] {sym} ptr idx val mem) -(MOVWstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) -> (MOVWstoreidx1 [c+d] {sym} ptr idx val mem) -(MOVWstoreidx2 [c] {sym} ptr (ADDQconst [d] idx) val mem) -> (MOVWstoreidx2 [c+2*d] {sym} ptr idx val mem) -(MOVLstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) -> (MOVLstoreidx1 [c+d] {sym} ptr idx val mem) -(MOVLstoreidx4 [c] {sym} ptr (ADDQconst [d] idx) val mem) -> (MOVLstoreidx4 [c+4*d] {sym} ptr idx val mem) -(MOVQstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) -> (MOVQstoreidx1 [c+d] {sym} ptr idx val mem) -(MOVQstoreidx8 [c] {sym} ptr (ADDQconst [d] idx) val mem) -> (MOVQstoreidx8 [c+8*d] {sym} ptr idx val mem) -(MOVSSstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) -> (MOVSSstoreidx1 [c+d] {sym} ptr idx val mem) -(MOVSSstoreidx4 [c] {sym} ptr (ADDQconst [d] idx) val mem) -> (MOVSSstoreidx4 [c+4*d] {sym} ptr idx val mem) -(MOVSDstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) -> (MOVSDstoreidx1 [c+d] {sym} ptr idx val mem) -(MOVSDstoreidx8 [c] {sym} ptr (ADDQconst [d] idx) val mem) -> (MOVSDstoreidx8 [c+8*d] {sym} ptr idx val mem) - -(MOVBstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) -> +(MOVBloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) && is32Bit(c+d) -> (MOVBloadidx1 [c+d] {sym} ptr idx mem) +(MOVWloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) && is32Bit(c+d) -> (MOVWloadidx1 [c+d] {sym} ptr idx mem) +(MOVWloadidx2 [c] {sym} (ADDQconst [d] ptr) idx mem) && is32Bit(c+d) -> (MOVWloadidx2 [c+d] {sym} ptr idx mem) +(MOVLloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) && is32Bit(c+d) -> (MOVLloadidx1 [c+d] {sym} ptr idx mem) +(MOVLloadidx4 [c] {sym} (ADDQconst [d] ptr) idx mem) && is32Bit(c+d) -> (MOVLloadidx4 [c+d] {sym} ptr idx mem) +(MOVQloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) && is32Bit(c+d) -> (MOVQloadidx1 [c+d] {sym} ptr idx mem) +(MOVQloadidx8 [c] {sym} (ADDQconst [d] ptr) idx mem) && is32Bit(c+d) -> (MOVQloadidx8 [c+d] {sym} ptr idx mem) +(MOVSSloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) && is32Bit(c+d) -> (MOVSSloadidx1 [c+d] {sym} ptr idx mem) +(MOVSSloadidx4 [c] {sym} (ADDQconst [d] ptr) idx mem) && is32Bit(c+d) -> (MOVSSloadidx4 [c+d] {sym} ptr idx mem) +(MOVSDloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) && is32Bit(c+d) -> (MOVSDloadidx1 [c+d] {sym} ptr idx mem) +(MOVSDloadidx8 [c] {sym} (ADDQconst [d] ptr) idx mem) && is32Bit(c+d) -> (MOVSDloadidx8 [c+d] {sym} ptr idx mem) + +(MOVBstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) && is32Bit(c+d) -> (MOVBstoreidx1 [c+d] {sym} ptr idx val mem) +(MOVWstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) && is32Bit(c+d) -> (MOVWstoreidx1 [c+d] {sym} ptr idx val mem) +(MOVWstoreidx2 [c] {sym} (ADDQconst [d] ptr) idx val mem) && is32Bit(c+d) -> (MOVWstoreidx2 [c+d] {sym} ptr idx val mem) +(MOVLstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) && is32Bit(c+d) -> (MOVLstoreidx1 [c+d] {sym} ptr idx val mem) +(MOVLstoreidx4 [c] {sym} (ADDQconst [d] ptr) idx val mem) && is32Bit(c+d) -> (MOVLstoreidx4 [c+d] {sym} ptr idx val mem) +(MOVQstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) && is32Bit(c+d) -> (MOVQstoreidx1 [c+d] {sym} ptr idx val mem) +(MOVQstoreidx8 [c] {sym} (ADDQconst [d] ptr) idx val mem) && is32Bit(c+d) -> (MOVQstoreidx8 [c+d] {sym} ptr idx val mem) +(MOVSSstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) && is32Bit(c+d) -> (MOVSSstoreidx1 [c+d] {sym} ptr idx val mem) +(MOVSSstoreidx4 [c] {sym} (ADDQconst [d] ptr) idx val mem) && is32Bit(c+d) -> (MOVSSstoreidx4 [c+d] {sym} ptr idx val mem) +(MOVSDstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) && is32Bit(c+d) -> (MOVSDstoreidx1 [c+d] {sym} ptr idx val mem) +(MOVSDstoreidx8 [c] {sym} (ADDQconst [d] ptr) idx val mem) && is32Bit(c+d) -> (MOVSDstoreidx8 [c+d] {sym} ptr idx val mem) + +(MOVBloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) && is32Bit(c+d) -> (MOVBloadidx1 [c+d] {sym} ptr idx mem) +(MOVWloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) && is32Bit(c+d) -> (MOVWloadidx1 [c+d] {sym} ptr idx mem) +(MOVWloadidx2 [c] {sym} ptr (ADDQconst [d] idx) mem) && is32Bit(c+2*d) -> (MOVWloadidx2 [c+2*d] {sym} ptr idx mem) +(MOVLloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) && is32Bit(c+d) -> (MOVLloadidx1 [c+d] {sym} ptr idx mem) +(MOVLloadidx4 [c] {sym} ptr (ADDQconst [d] idx) mem) && is32Bit(c+4*d) -> (MOVLloadidx4 [c+4*d] {sym} ptr idx mem) +(MOVQloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) && is32Bit(c+d) -> (MOVQloadidx1 [c+d] {sym} ptr idx mem) +(MOVQloadidx8 [c] {sym} ptr (ADDQconst [d] idx) mem) && is32Bit(c+8*d) -> (MOVQloadidx8 [c+8*d] {sym} ptr idx mem) +(MOVSSloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) && is32Bit(c+d) -> (MOVSSloadidx1 [c+d] {sym} ptr idx mem) +(MOVSSloadidx4 [c] {sym} ptr (ADDQconst [d] idx) mem) && is32Bit(c+4*d) -> (MOVSSloadidx4 [c+4*d] {sym} ptr idx mem) +(MOVSDloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) && is32Bit(c+d) -> (MOVSDloadidx1 [c+d] {sym} ptr idx mem) +(MOVSDloadidx8 [c] {sym} ptr (ADDQconst [d] idx) mem) && is32Bit(c+8*d) -> (MOVSDloadidx8 [c+8*d] {sym} ptr idx mem) + +(MOVBstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) && is32Bit(c+d) -> (MOVBstoreidx1 [c+d] {sym} ptr idx val mem) +(MOVWstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) && is32Bit(c+d) -> (MOVWstoreidx1 [c+d] {sym} ptr idx val mem) +(MOVWstoreidx2 [c] {sym} ptr (ADDQconst [d] idx) val mem) && is32Bit(c+2*d) -> (MOVWstoreidx2 [c+2*d] {sym} ptr idx val mem) +(MOVLstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) && is32Bit(c+d) -> (MOVLstoreidx1 [c+d] {sym} ptr idx val mem) +(MOVLstoreidx4 [c] {sym} ptr (ADDQconst [d] idx) val mem) && is32Bit(c+4*d) -> (MOVLstoreidx4 [c+4*d] {sym} ptr idx val mem) +(MOVQstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) && is32Bit(c+d) -> (MOVQstoreidx1 [c+d] {sym} ptr idx val mem) +(MOVQstoreidx8 [c] {sym} ptr (ADDQconst [d] idx) val mem) && is32Bit(c+8*d) -> (MOVQstoreidx8 [c+8*d] {sym} ptr idx val mem) +(MOVSSstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) && is32Bit(c+d) -> (MOVSSstoreidx1 [c+d] {sym} ptr idx val mem) +(MOVSSstoreidx4 [c] {sym} ptr (ADDQconst [d] idx) val mem) && is32Bit(c+4*d) -> (MOVSSstoreidx4 [c+4*d] {sym} ptr idx val mem) +(MOVSDstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) && is32Bit(c+d) -> (MOVSDstoreidx1 [c+d] {sym} ptr idx val mem) +(MOVSDstoreidx8 [c] {sym} ptr (ADDQconst [d] idx) val mem) && is32Bit(c+8*d) -> (MOVSDstoreidx8 [c+8*d] {sym} ptr idx val mem) + +(MOVBstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) && ValAndOff(x).canAdd(c) -> (MOVBstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) -(MOVWstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) -> +(MOVWstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) && ValAndOff(x).canAdd(c) -> (MOVWstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) -(MOVWstoreconstidx2 [x] {sym} (ADDQconst [c] ptr) idx mem) -> +(MOVWstoreconstidx2 [x] {sym} (ADDQconst [c] ptr) idx mem) && ValAndOff(x).canAdd(c) -> (MOVWstoreconstidx2 [ValAndOff(x).add(c)] {sym} ptr idx mem) -(MOVLstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) -> +(MOVLstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) && ValAndOff(x).canAdd(c) -> (MOVLstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) -(MOVLstoreconstidx4 [x] {sym} (ADDQconst [c] ptr) idx mem) -> +(MOVLstoreconstidx4 [x] {sym} (ADDQconst [c] ptr) idx mem) && ValAndOff(x).canAdd(c) -> (MOVLstoreconstidx4 [ValAndOff(x).add(c)] {sym} ptr idx mem) -(MOVQstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) -> +(MOVQstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) && ValAndOff(x).canAdd(c) -> (MOVQstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) -(MOVQstoreconstidx8 [x] {sym} (ADDQconst [c] ptr) idx mem) -> +(MOVQstoreconstidx8 [x] {sym} (ADDQconst [c] ptr) idx mem) && ValAndOff(x).canAdd(c) -> (MOVQstoreconstidx8 [ValAndOff(x).add(c)] {sym} ptr idx mem) -(MOVBstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) -> +(MOVBstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) && ValAndOff(x).canAdd(c) -> (MOVBstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) -(MOVWstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) -> +(MOVWstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) && ValAndOff(x).canAdd(c) -> (MOVWstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) -(MOVWstoreconstidx2 [x] {sym} ptr (ADDQconst [c] idx) mem) -> +(MOVWstoreconstidx2 [x] {sym} ptr (ADDQconst [c] idx) mem) && ValAndOff(x).canAdd(2*c) -> (MOVWstoreconstidx2 [ValAndOff(x).add(2*c)] {sym} ptr idx mem) -(MOVLstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) -> +(MOVLstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) && ValAndOff(x).canAdd(c) -> (MOVLstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) -(MOVLstoreconstidx4 [x] {sym} ptr (ADDQconst [c] idx) mem) -> +(MOVLstoreconstidx4 [x] {sym} ptr (ADDQconst [c] idx) mem) && ValAndOff(x).canAdd(4*c) -> (MOVLstoreconstidx4 [ValAndOff(x).add(4*c)] {sym} ptr idx mem) -(MOVQstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) -> +(MOVQstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) && ValAndOff(x).canAdd(c) -> (MOVQstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) -(MOVQstoreconstidx8 [x] {sym} ptr (ADDQconst [c] idx) mem) -> +(MOVQstoreconstidx8 [x] {sym} ptr (ADDQconst [c] idx) mem) && ValAndOff(x).canAdd(8*c) -> (MOVQstoreconstidx8 [ValAndOff(x).add(8*c)] {sym} ptr idx mem) // fold LEAQs together @@ -2301,22 +2301,22 @@ (ADDLconst [c] (LEAL [d] {s} x)) && is32Bit(c+d) -> (LEAL [c+d] {s} x) (LEAL [c] {s} (ADDLconst [d] x)) && is32Bit(c+d) -> (LEAL [c+d] {s} x) -(MOVQload [off1] {sym1} (LEAL [off2] {sym2} base) mem) && canMergeSym(sym1, sym2) -> +(MOVQload [off1] {sym1} (LEAL [off2] {sym2} base) mem) && canMergeSym(sym1, sym2) && is32Bit(off1+off2) -> (MOVQload [off1+off2] {mergeSym(sym1,sym2)} base mem) -(MOVLload [off1] {sym1} (LEAL [off2] {sym2} base) mem) && canMergeSym(sym1, sym2) -> +(MOVLload [off1] {sym1} (LEAL [off2] {sym2} base) mem) && canMergeSym(sym1, sym2) && is32Bit(off1+off2) -> (MOVLload [off1+off2] {mergeSym(sym1,sym2)} base mem) -(MOVWload [off1] {sym1} (LEAL [off2] {sym2} base) mem) && canMergeSym(sym1, sym2) -> +(MOVWload [off1] {sym1} (LEAL [off2] {sym2} base) mem) && canMergeSym(sym1, sym2) && is32Bit(off1+off2) -> (MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem) -(MOVBload [off1] {sym1} (LEAL [off2] {sym2} base) mem) && canMergeSym(sym1, sym2) -> +(MOVBload [off1] {sym1} (LEAL [off2] {sym2} base) mem) && canMergeSym(sym1, sym2) && is32Bit(off1+off2) -> (MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem) -(MOVQstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) && canMergeSym(sym1, sym2) -> +(MOVQstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) && canMergeSym(sym1, sym2) && is32Bit(off1+off2) -> (MOVQstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) -(MOVLstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) && canMergeSym(sym1, sym2) -> +(MOVLstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) && canMergeSym(sym1, sym2) && is32Bit(off1+off2) -> (MOVLstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) -(MOVWstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) && canMergeSym(sym1, sym2) -> +(MOVWstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) && canMergeSym(sym1, sym2) && is32Bit(off1+off2) -> (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) -(MOVBstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) && canMergeSym(sym1, sym2) -> +(MOVBstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) && canMergeSym(sym1, sym2) && is32Bit(off1+off2) -> (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) (MOVQstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) && canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) -> @@ -2405,15 +2405,17 @@ (BSFQ (ORQconst <t> [1<<16] (MOVWQZX x))) -> (BSFQ (ORQconst <t> [1<<16] x)) // Redundant sign/zero extensions -(MOVLQSX x:(MOVLQSX _)) -> x -(MOVLQSX x:(MOVWQSX _)) -> x -(MOVLQSX x:(MOVBQSX _)) -> x -(MOVWQSX x:(MOVWQSX _)) -> x -(MOVWQSX x:(MOVBQSX _)) -> x -(MOVBQSX x:(MOVBQSX _)) -> x -(MOVLQZX x:(MOVLQZX _)) -> x -(MOVLQZX x:(MOVWQZX _)) -> x -(MOVLQZX x:(MOVBQZX _)) -> x -(MOVWQZX x:(MOVWQZX _)) -> x -(MOVWQZX x:(MOVBQZX _)) -> x -(MOVBQZX x:(MOVBQZX _)) -> x +// Note: see issue 21963. We have to make sure we use the right type on +// the resulting extension (the outer type, not the inner type). +(MOVLQSX (MOVLQSX x)) -> (MOVLQSX x) +(MOVLQSX (MOVWQSX x)) -> (MOVWQSX x) +(MOVLQSX (MOVBQSX x)) -> (MOVBQSX x) +(MOVWQSX (MOVWQSX x)) -> (MOVWQSX x) +(MOVWQSX (MOVBQSX x)) -> (MOVBQSX x) +(MOVBQSX (MOVBQSX x)) -> (MOVBQSX x) +(MOVLQZX (MOVLQZX x)) -> (MOVLQZX x) +(MOVLQZX (MOVWQZX x)) -> (MOVWQZX x) +(MOVLQZX (MOVBQZX x)) -> (MOVBQZX x) +(MOVWQZX (MOVWQZX x)) -> (MOVWQZX x) +(MOVWQZX (MOVBQZX x)) -> (MOVBQZX x) +(MOVBQZX (MOVBQZX x)) -> (MOVBQZX x) diff --git a/src/cmd/compile/internal/ssa/gen/AMD64Ops.go b/src/cmd/compile/internal/ssa/gen/AMD64Ops.go index c51cbd2238..c984cbfb12 100644 --- a/src/cmd/compile/internal/ssa/gen/AMD64Ops.go +++ b/src/cmd/compile/internal/ssa/gen/AMD64Ops.go @@ -20,6 +20,7 @@ import "strings" // - Unused portions of AuxInt (or the Val portion of ValAndOff) are // filled by sign-extending the used portion. Users of AuxInt which interpret // AuxInt as unsigned (e.g. shifts) must be careful. +// - All SymOff opcodes require their offset to fit in an int32. // Suffixes encode the bit width of various instructions. // Q (quad word) = 64 bit @@ -189,17 +190,17 @@ func init() { // binary ops {name: "ADDQ", argLength: 2, reg: gp21sp, asm: "ADDQ", commutative: true, clobberFlags: true}, // arg0 + arg1 {name: "ADDL", argLength: 2, reg: gp21sp, asm: "ADDL", commutative: true, clobberFlags: true}, // arg0 + arg1 - {name: "ADDQconst", argLength: 1, reg: gp11sp, asm: "ADDQ", aux: "Int64", typ: "UInt64", clobberFlags: true}, // arg0 + auxint + {name: "ADDQconst", argLength: 1, reg: gp11sp, asm: "ADDQ", aux: "Int32", typ: "UInt64", clobberFlags: true}, // arg0 + auxint {name: "ADDLconst", argLength: 1, reg: gp11sp, asm: "ADDL", aux: "Int32", clobberFlags: true}, // arg0 + auxint {name: "SUBQ", argLength: 2, reg: gp21, asm: "SUBQ", resultInArg0: true, clobberFlags: true}, // arg0 - arg1 {name: "SUBL", argLength: 2, reg: gp21, asm: "SUBL", resultInArg0: true, clobberFlags: true}, // arg0 - arg1 - {name: "SUBQconst", argLength: 1, reg: gp11, asm: "SUBQ", aux: "Int64", resultInArg0: true, clobberFlags: true}, // arg0 - auxint + {name: "SUBQconst", argLength: 1, reg: gp11, asm: "SUBQ", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 - auxint {name: "SUBLconst", argLength: 1, reg: gp11, asm: "SUBL", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 - auxint {name: "MULQ", argLength: 2, reg: gp21, asm: "IMULQ", commutative: true, resultInArg0: true, clobberFlags: true}, // arg0 * arg1 {name: "MULL", argLength: 2, reg: gp21, asm: "IMULL", commutative: true, resultInArg0: true, clobberFlags: true}, // arg0 * arg1 - {name: "MULQconst", argLength: 1, reg: gp11, asm: "IMULQ", aux: "Int64", resultInArg0: true, clobberFlags: true}, // arg0 * auxint + {name: "MULQconst", argLength: 1, reg: gp11, asm: "IMULQ", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 * auxint {name: "MULLconst", argLength: 1, reg: gp11, asm: "IMULL", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 * auxint {name: "HMULQ", argLength: 2, reg: gp21hmul, commutative: true, asm: "IMULQ", clobberFlags: true}, // (arg0 * arg1) >> width @@ -221,24 +222,24 @@ func init() { {name: "ANDQ", argLength: 2, reg: gp21, asm: "ANDQ", commutative: true, resultInArg0: true, clobberFlags: true}, // arg0 & arg1 {name: "ANDL", argLength: 2, reg: gp21, asm: "ANDL", commutative: true, resultInArg0: true, clobberFlags: true}, // arg0 & arg1 - {name: "ANDQconst", argLength: 1, reg: gp11, asm: "ANDQ", aux: "Int64", resultInArg0: true, clobberFlags: true}, // arg0 & auxint + {name: "ANDQconst", argLength: 1, reg: gp11, asm: "ANDQ", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 & auxint {name: "ANDLconst", argLength: 1, reg: gp11, asm: "ANDL", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 & auxint {name: "ORQ", argLength: 2, reg: gp21, asm: "ORQ", commutative: true, resultInArg0: true, clobberFlags: true}, // arg0 | arg1 {name: "ORL", argLength: 2, reg: gp21, asm: "ORL", commutative: true, resultInArg0: true, clobberFlags: true}, // arg0 | arg1 - {name: "ORQconst", argLength: 1, reg: gp11, asm: "ORQ", aux: "Int64", resultInArg0: true, clobberFlags: true}, // arg0 | auxint + {name: "ORQconst", argLength: 1, reg: gp11, asm: "ORQ", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 | auxint {name: "ORLconst", argLength: 1, reg: gp11, asm: "ORL", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 | auxint {name: "XORQ", argLength: 2, reg: gp21, asm: "XORQ", commutative: true, resultInArg0: true, clobberFlags: true}, // arg0 ^ arg1 {name: "XORL", argLength: 2, reg: gp21, asm: "XORL", commutative: true, resultInArg0: true, clobberFlags: true}, // arg0 ^ arg1 - {name: "XORQconst", argLength: 1, reg: gp11, asm: "XORQ", aux: "Int64", resultInArg0: true, clobberFlags: true}, // arg0 ^ auxint + {name: "XORQconst", argLength: 1, reg: gp11, asm: "XORQ", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 ^ auxint {name: "XORLconst", argLength: 1, reg: gp11, asm: "XORL", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 ^ auxint {name: "CMPQ", argLength: 2, reg: gp2flags, asm: "CMPQ", typ: "Flags"}, // arg0 compare to arg1 {name: "CMPL", argLength: 2, reg: gp2flags, asm: "CMPL", typ: "Flags"}, // arg0 compare to arg1 {name: "CMPW", argLength: 2, reg: gp2flags, asm: "CMPW", typ: "Flags"}, // arg0 compare to arg1 {name: "CMPB", argLength: 2, reg: gp2flags, asm: "CMPB", typ: "Flags"}, // arg0 compare to arg1 - {name: "CMPQconst", argLength: 1, reg: gp1flags, asm: "CMPQ", typ: "Flags", aux: "Int64"}, // arg0 compare to auxint + {name: "CMPQconst", argLength: 1, reg: gp1flags, asm: "CMPQ", typ: "Flags", aux: "Int32"}, // arg0 compare to auxint {name: "CMPLconst", argLength: 1, reg: gp1flags, asm: "CMPL", typ: "Flags", aux: "Int32"}, // arg0 compare to auxint {name: "CMPWconst", argLength: 1, reg: gp1flags, asm: "CMPW", typ: "Flags", aux: "Int16"}, // arg0 compare to auxint {name: "CMPBconst", argLength: 1, reg: gp1flags, asm: "CMPB", typ: "Flags", aux: "Int8"}, // arg0 compare to auxint @@ -255,7 +256,7 @@ func init() { {name: "TESTL", argLength: 2, reg: gp2flags, commutative: true, asm: "TESTL", typ: "Flags"}, // (arg0 & arg1) compare to 0 {name: "TESTW", argLength: 2, reg: gp2flags, commutative: true, asm: "TESTW", typ: "Flags"}, // (arg0 & arg1) compare to 0 {name: "TESTB", argLength: 2, reg: gp2flags, commutative: true, asm: "TESTB", typ: "Flags"}, // (arg0 & arg1) compare to 0 - {name: "TESTQconst", argLength: 1, reg: gp1flags, asm: "TESTQ", typ: "Flags", aux: "Int64"}, // (arg0 & auxint) compare to 0 + {name: "TESTQconst", argLength: 1, reg: gp1flags, asm: "TESTQ", typ: "Flags", aux: "Int32"}, // (arg0 & auxint) compare to 0 {name: "TESTLconst", argLength: 1, reg: gp1flags, asm: "TESTL", typ: "Flags", aux: "Int32"}, // (arg0 & auxint) compare to 0 {name: "TESTWconst", argLength: 1, reg: gp1flags, asm: "TESTW", typ: "Flags", aux: "Int16"}, // (arg0 & auxint) compare to 0 {name: "TESTBconst", argLength: 1, reg: gp1flags, asm: "TESTB", typ: "Flags", aux: "Int8"}, // (arg0 & auxint) compare to 0 diff --git a/src/cmd/compile/internal/ssa/gen/PPC64.rules b/src/cmd/compile/internal/ssa/gen/PPC64.rules index 81ac3c26af..45853b4b48 100644 --- a/src/cmd/compile/internal/ssa/gen/PPC64.rules +++ b/src/cmd/compile/internal/ssa/gen/PPC64.rules @@ -526,49 +526,29 @@ (MOVBstorezero [6] destptr (MOVHstorezero [4] destptr (MOVWstorezero destptr mem))) -(Zero [8] destptr mem) -> - (MOVDstorezero destptr mem) -// Zero small numbers of words directly. -(Zero [12] destptr mem) -> +// MOVD for store with DS must have offsets that are multiple of 4 +(Zero [8] {t} destptr mem) && t.(*types.Type).Alignment()%4 == 0 -> + (MOVDstorezero destptr mem) +(Zero [8] destptr mem) -> + (MOVWstorezero [4] destptr + (MOVWstorezero [0] destptr mem)) +// Handle these cases only if aligned properly, otherwise use general case below +(Zero [12] {t} destptr mem) && t.(*types.Type).Alignment()%4 == 0 -> (MOVWstorezero [8] destptr (MOVDstorezero [0] destptr mem)) -(Zero [16] destptr mem) -> - (MOVDstorezero [8] destptr +(Zero [16] {t} destptr mem) && t.(*types.Type).Alignment()%4 == 0 -> + (MOVDstorezero [8] destptr (MOVDstorezero [0] destptr mem)) -(Zero [24] destptr mem) -> - (MOVDstorezero [16] destptr - (MOVDstorezero [8] destptr - (MOVDstorezero [0] destptr mem))) -(Zero [32] destptr mem) -> - (MOVDstorezero [24] destptr - (MOVDstorezero [16] destptr - (MOVDstorezero [8] destptr - (MOVDstorezero [0] destptr mem)))) - -(Zero [40] destptr mem) -> - (MOVDstorezero [32] destptr - (MOVDstorezero [24] destptr - (MOVDstorezero [16] destptr - (MOVDstorezero [8] destptr - (MOVDstorezero [0] destptr mem))))) - -(Zero [48] destptr mem) -> - (MOVDstorezero [40] destptr - (MOVDstorezero [32] destptr - (MOVDstorezero [24] destptr - (MOVDstorezero [16] destptr - (MOVDstorezero [8] destptr - (MOVDstorezero [0] destptr mem)))))) - -(Zero [56] destptr mem) -> - (MOVDstorezero [48] destptr - (MOVDstorezero [40] destptr - (MOVDstorezero [32] destptr - (MOVDstorezero [24] destptr - (MOVDstorezero [16] destptr - (MOVDstorezero [8] destptr - (MOVDstorezero [0] destptr mem))))))) +(Zero [24] {t} destptr mem) && t.(*types.Type).Alignment()%4 == 0 -> + (MOVDstorezero [16] destptr + (MOVDstorezero [8] destptr + (MOVDstorezero [0] destptr mem))) +(Zero [32] {t} destptr mem) && t.(*types.Type).Alignment()%4 == 0 -> + (MOVDstorezero [24] destptr + (MOVDstorezero [16] destptr + (MOVDstorezero [8] destptr + (MOVDstorezero [0] destptr mem)))) // Handle cases not handled above (Zero [s] ptr mem) -> (LoweredZero [s] ptr mem) diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index ae2dd5f550..763a1cbd4d 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -4824,7 +4824,7 @@ var opcodeTable = [...]opInfo{ }, { name: "ADDQconst", - auxType: auxInt64, + auxType: auxInt32, argLen: 1, clobberFlags: true, asm: x86.AADDQ, @@ -4886,7 +4886,7 @@ var opcodeTable = [...]opInfo{ }, { name: "SUBQconst", - auxType: auxInt64, + auxType: auxInt32, argLen: 1, resultInArg0: true, clobberFlags: true, @@ -4952,7 +4952,7 @@ var opcodeTable = [...]opInfo{ }, { name: "MULQconst", - auxType: auxInt64, + auxType: auxInt32, argLen: 1, resultInArg0: true, clobberFlags: true, @@ -5232,7 +5232,7 @@ var opcodeTable = [...]opInfo{ }, { name: "ANDQconst", - auxType: auxInt64, + auxType: auxInt32, argLen: 1, resultInArg0: true, clobberFlags: true, @@ -5298,7 +5298,7 @@ var opcodeTable = [...]opInfo{ }, { name: "ORQconst", - auxType: auxInt64, + auxType: auxInt32, argLen: 1, resultInArg0: true, clobberFlags: true, @@ -5364,7 +5364,7 @@ var opcodeTable = [...]opInfo{ }, { name: "XORQconst", - auxType: auxInt64, + auxType: auxInt32, argLen: 1, resultInArg0: true, clobberFlags: true, @@ -5440,7 +5440,7 @@ var opcodeTable = [...]opInfo{ }, { name: "CMPQconst", - auxType: auxInt64, + auxType: auxInt32, argLen: 1, asm: x86.ACMPQ, reg: regInfo{ @@ -5598,7 +5598,7 @@ var opcodeTable = [...]opInfo{ }, { name: "TESTQconst", - auxType: auxInt64, + auxType: auxInt32, argLen: 1, asm: x86.ATESTQ, reg: regInfo{ diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index f9a94cac36..9213616f83 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -4319,16 +4319,16 @@ func rewriteValueAMD64_OpAMD64MOVBQSX_0(v *Value) bool { v.AddArg(x) return true } - // match: (MOVBQSX x:(MOVBQSX _)) + // match: (MOVBQSX (MOVBQSX x)) // cond: - // result: x + // result: (MOVBQSX x) for { - x := v.Args[0] - if x.Op != OpAMD64MOVBQSX { + v_0 := v.Args[0] + if v_0.Op != OpAMD64MOVBQSX { break } - v.reset(OpCopy) - v.Type = x.Type + x := v_0.Args[0] + v.reset(OpAMD64MOVBQSX) v.AddArg(x) return true } @@ -4536,16 +4536,16 @@ func rewriteValueAMD64_OpAMD64MOVBQZX_0(v *Value) bool { v.AddArg(x) return true } - // match: (MOVBQZX x:(MOVBQZX _)) + // match: (MOVBQZX (MOVBQZX x)) // cond: - // result: x + // result: (MOVBQZX x) for { - x := v.Args[0] - if x.Op != OpAMD64MOVBQZX { + v_0 := v.Args[0] + if v_0.Op != OpAMD64MOVBQZX { break } - v.reset(OpCopy) - v.Type = x.Type + x := v_0.Args[0] + v.reset(OpAMD64MOVBQZX) v.AddArg(x) return true } @@ -4680,7 +4680,7 @@ func rewriteValueAMD64_OpAMD64MOVBload_0(v *Value) bool { return true } // match: (MOVBload [off1] {sym1} (LEAL [off2] {sym2} base) mem) - // cond: canMergeSym(sym1, sym2) + // cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2) // result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem) for { off1 := v.AuxInt @@ -4694,7 +4694,7 @@ func rewriteValueAMD64_OpAMD64MOVBload_0(v *Value) bool { sym2 := v_0.Aux base := v_0.Args[0] mem := v.Args[1] - if !(canMergeSym(sym1, sym2)) { + if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { break } v.reset(OpAMD64MOVBload) @@ -4732,7 +4732,7 @@ func rewriteValueAMD64_OpAMD64MOVBload_0(v *Value) bool { } func rewriteValueAMD64_OpAMD64MOVBloadidx1_0(v *Value) bool { // match: (MOVBloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) - // cond: + // cond: is32Bit(c+d) // result: (MOVBloadidx1 [c+d] {sym} ptr idx mem) for { c := v.AuxInt @@ -4746,6 +4746,9 @@ func rewriteValueAMD64_OpAMD64MOVBloadidx1_0(v *Value) bool { ptr := v_0.Args[0] idx := v.Args[1] mem := v.Args[2] + if !(is32Bit(c + d)) { + break + } v.reset(OpAMD64MOVBloadidx1) v.AuxInt = c + d v.Aux = sym @@ -4755,7 +4758,7 @@ func rewriteValueAMD64_OpAMD64MOVBloadidx1_0(v *Value) bool { return true } // match: (MOVBloadidx1 [c] {sym} idx (ADDQconst [d] ptr) mem) - // cond: + // cond: is32Bit(c+d) // result: (MOVBloadidx1 [c+d] {sym} ptr idx mem) for { c := v.AuxInt @@ -4769,6 +4772,9 @@ func rewriteValueAMD64_OpAMD64MOVBloadidx1_0(v *Value) bool { d := v_1.AuxInt ptr := v_1.Args[0] mem := v.Args[2] + if !(is32Bit(c + d)) { + break + } v.reset(OpAMD64MOVBloadidx1) v.AuxInt = c + d v.Aux = sym @@ -4778,7 +4784,7 @@ func rewriteValueAMD64_OpAMD64MOVBloadidx1_0(v *Value) bool { return true } // match: (MOVBloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) - // cond: + // cond: is32Bit(c+d) // result: (MOVBloadidx1 [c+d] {sym} ptr idx mem) for { c := v.AuxInt @@ -4792,6 +4798,9 @@ func rewriteValueAMD64_OpAMD64MOVBloadidx1_0(v *Value) bool { d := v_1.AuxInt idx := v_1.Args[0] mem := v.Args[2] + if !(is32Bit(c + d)) { + break + } v.reset(OpAMD64MOVBloadidx1) v.AuxInt = c + d v.Aux = sym @@ -4801,7 +4810,7 @@ func rewriteValueAMD64_OpAMD64MOVBloadidx1_0(v *Value) bool { return true } // match: (MOVBloadidx1 [c] {sym} (ADDQconst [d] idx) ptr mem) - // cond: + // cond: is32Bit(c+d) // result: (MOVBloadidx1 [c+d] {sym} ptr idx mem) for { c := v.AuxInt @@ -4815,6 +4824,9 @@ func rewriteValueAMD64_OpAMD64MOVBloadidx1_0(v *Value) bool { idx := v_0.Args[0] ptr := v.Args[1] mem := v.Args[2] + if !(is32Bit(c + d)) { + break + } v.reset(OpAMD64MOVBloadidx1) v.AuxInt = c + d v.Aux = sym @@ -5440,7 +5452,7 @@ func rewriteValueAMD64_OpAMD64MOVBstore_10(v *Value) bool { return true } // match: (MOVBstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) - // cond: canMergeSym(sym1, sym2) + // cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2) // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) for { off1 := v.AuxInt @@ -5455,7 +5467,7 @@ func rewriteValueAMD64_OpAMD64MOVBstore_10(v *Value) bool { base := v_0.Args[0] val := v.Args[1] mem := v.Args[2] - if !(canMergeSym(sym1, sym2)) { + if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { break } v.reset(OpAMD64MOVBstore) @@ -5679,7 +5691,7 @@ func rewriteValueAMD64_OpAMD64MOVBstoreconst_0(v *Value) bool { } func rewriteValueAMD64_OpAMD64MOVBstoreconstidx1_0(v *Value) bool { // match: (MOVBstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) - // cond: + // cond: ValAndOff(x).canAdd(c) // result: (MOVBstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) for { x := v.AuxInt @@ -5693,6 +5705,9 @@ func rewriteValueAMD64_OpAMD64MOVBstoreconstidx1_0(v *Value) bool { ptr := v_0.Args[0] idx := v.Args[1] mem := v.Args[2] + if !(ValAndOff(x).canAdd(c)) { + break + } v.reset(OpAMD64MOVBstoreconstidx1) v.AuxInt = ValAndOff(x).add(c) v.Aux = sym @@ -5702,7 +5717,7 @@ func rewriteValueAMD64_OpAMD64MOVBstoreconstidx1_0(v *Value) bool { return true } // match: (MOVBstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) - // cond: + // cond: ValAndOff(x).canAdd(c) // result: (MOVBstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) for { x := v.AuxInt @@ -5716,6 +5731,9 @@ func rewriteValueAMD64_OpAMD64MOVBstoreconstidx1_0(v *Value) bool { c := v_1.AuxInt idx := v_1.Args[0] mem := v.Args[2] + if !(ValAndOff(x).canAdd(c)) { + break + } v.reset(OpAMD64MOVBstoreconstidx1) v.AuxInt = ValAndOff(x).add(c) v.Aux = sym @@ -5766,7 +5784,7 @@ func rewriteValueAMD64_OpAMD64MOVBstoreidx1_0(v *Value) bool { b := v.Block _ = b // match: (MOVBstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) - // cond: + // cond: is32Bit(c+d) // result: (MOVBstoreidx1 [c+d] {sym} ptr idx val mem) for { c := v.AuxInt @@ -5781,6 +5799,9 @@ func rewriteValueAMD64_OpAMD64MOVBstoreidx1_0(v *Value) bool { idx := v.Args[1] val := v.Args[2] mem := v.Args[3] + if !(is32Bit(c + d)) { + break + } v.reset(OpAMD64MOVBstoreidx1) v.AuxInt = c + d v.Aux = sym @@ -5791,7 +5812,7 @@ func rewriteValueAMD64_OpAMD64MOVBstoreidx1_0(v *Value) bool { return true } // match: (MOVBstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) - // cond: + // cond: is32Bit(c+d) // result: (MOVBstoreidx1 [c+d] {sym} ptr idx val mem) for { c := v.AuxInt @@ -5806,6 +5827,9 @@ func rewriteValueAMD64_OpAMD64MOVBstoreidx1_0(v *Value) bool { idx := v_1.Args[0] val := v.Args[2] mem := v.Args[3] + if !(is32Bit(c + d)) { + break + } v.reset(OpAMD64MOVBstoreidx1) v.AuxInt = c + d v.Aux = sym @@ -6368,42 +6392,42 @@ func rewriteValueAMD64_OpAMD64MOVLQSX_0(v *Value) bool { v.AddArg(x) return true } - // match: (MOVLQSX x:(MOVLQSX _)) + // match: (MOVLQSX (MOVLQSX x)) // cond: - // result: x + // result: (MOVLQSX x) for { - x := v.Args[0] - if x.Op != OpAMD64MOVLQSX { + v_0 := v.Args[0] + if v_0.Op != OpAMD64MOVLQSX { break } - v.reset(OpCopy) - v.Type = x.Type + x := v_0.Args[0] + v.reset(OpAMD64MOVLQSX) v.AddArg(x) return true } - // match: (MOVLQSX x:(MOVWQSX _)) + // match: (MOVLQSX (MOVWQSX x)) // cond: - // result: x + // result: (MOVWQSX x) for { - x := v.Args[0] - if x.Op != OpAMD64MOVWQSX { + v_0 := v.Args[0] + if v_0.Op != OpAMD64MOVWQSX { break } - v.reset(OpCopy) - v.Type = x.Type + x := v_0.Args[0] + v.reset(OpAMD64MOVWQSX) v.AddArg(x) return true } - // match: (MOVLQSX x:(MOVBQSX _)) + // match: (MOVLQSX (MOVBQSX x)) // cond: - // result: x + // result: (MOVBQSX x) for { - x := v.Args[0] - if x.Op != OpAMD64MOVBQSX { + v_0 := v.Args[0] + if v_0.Op != OpAMD64MOVBQSX { break } - v.reset(OpCopy) - v.Type = x.Type + x := v_0.Args[0] + v.reset(OpAMD64MOVBQSX) v.AddArg(x) return true } @@ -6587,42 +6611,42 @@ func rewriteValueAMD64_OpAMD64MOVLQZX_0(v *Value) bool { v.AddArg(x) return true } - // match: (MOVLQZX x:(MOVLQZX _)) + // match: (MOVLQZX (MOVLQZX x)) // cond: - // result: x + // result: (MOVLQZX x) for { - x := v.Args[0] - if x.Op != OpAMD64MOVLQZX { + v_0 := v.Args[0] + if v_0.Op != OpAMD64MOVLQZX { break } - v.reset(OpCopy) - v.Type = x.Type + x := v_0.Args[0] + v.reset(OpAMD64MOVLQZX) v.AddArg(x) return true } - // match: (MOVLQZX x:(MOVWQZX _)) + // match: (MOVLQZX (MOVWQZX x)) // cond: - // result: x + // result: (MOVWQZX x) for { - x := v.Args[0] - if x.Op != OpAMD64MOVWQZX { + v_0 := v.Args[0] + if v_0.Op != OpAMD64MOVWQZX { break } - v.reset(OpCopy) - v.Type = x.Type + x := v_0.Args[0] + v.reset(OpAMD64MOVWQZX) v.AddArg(x) return true } - // match: (MOVLQZX x:(MOVBQZX _)) + // match: (MOVLQZX (MOVBQZX x)) // cond: - // result: x + // result: (MOVBQZX x) for { - x := v.Args[0] - if x.Op != OpAMD64MOVBQZX { + v_0 := v.Args[0] + if v_0.Op != OpAMD64MOVBQZX { break } - v.reset(OpCopy) - v.Type = x.Type + x := v_0.Args[0] + v.reset(OpAMD64MOVBQZX) v.AddArg(x) return true } @@ -6837,7 +6861,7 @@ func rewriteValueAMD64_OpAMD64MOVLload_0(v *Value) bool { return true } // match: (MOVLload [off1] {sym1} (LEAL [off2] {sym2} base) mem) - // cond: canMergeSym(sym1, sym2) + // cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2) // result: (MOVLload [off1+off2] {mergeSym(sym1,sym2)} base mem) for { off1 := v.AuxInt @@ -6851,7 +6875,7 @@ func rewriteValueAMD64_OpAMD64MOVLload_0(v *Value) bool { sym2 := v_0.Aux base := v_0.Args[0] mem := v.Args[1] - if !(canMergeSym(sym1, sym2)) { + if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { break } v.reset(OpAMD64MOVLload) @@ -6939,7 +6963,7 @@ func rewriteValueAMD64_OpAMD64MOVLloadidx1_0(v *Value) bool { return true } // match: (MOVLloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) - // cond: + // cond: is32Bit(c+d) // result: (MOVLloadidx1 [c+d] {sym} ptr idx mem) for { c := v.AuxInt @@ -6953,6 +6977,9 @@ func rewriteValueAMD64_OpAMD64MOVLloadidx1_0(v *Value) bool { ptr := v_0.Args[0] idx := v.Args[1] mem := v.Args[2] + if !(is32Bit(c + d)) { + break + } v.reset(OpAMD64MOVLloadidx1) v.AuxInt = c + d v.Aux = sym @@ -6962,7 +6989,7 @@ func rewriteValueAMD64_OpAMD64MOVLloadidx1_0(v *Value) bool { return true } // match: (MOVLloadidx1 [c] {sym} idx (ADDQconst [d] ptr) mem) - // cond: + // cond: is32Bit(c+d) // result: (MOVLloadidx1 [c+d] {sym} ptr idx mem) for { c := v.AuxInt @@ -6976,6 +7003,9 @@ func rewriteValueAMD64_OpAMD64MOVLloadidx1_0(v *Value) bool { d := v_1.AuxInt ptr := v_1.Args[0] mem := v.Args[2] + if !(is32Bit(c + d)) { + break + } v.reset(OpAMD64MOVLloadidx1) v.AuxInt = c + d v.Aux = sym @@ -6985,7 +7015,7 @@ func rewriteValueAMD64_OpAMD64MOVLloadidx1_0(v *Value) bool { return true } // match: (MOVLloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) - // cond: + // cond: is32Bit(c+d) // result: (MOVLloadidx1 [c+d] {sym} ptr idx mem) for { c := v.AuxInt @@ -6999,6 +7029,9 @@ func rewriteValueAMD64_OpAMD64MOVLloadidx1_0(v *Value) bool { d := v_1.AuxInt idx := v_1.Args[0] mem := v.Args[2] + if !(is32Bit(c + d)) { + break + } v.reset(OpAMD64MOVLloadidx1) v.AuxInt = c + d v.Aux = sym @@ -7008,7 +7041,7 @@ func rewriteValueAMD64_OpAMD64MOVLloadidx1_0(v *Value) bool { return true } // match: (MOVLloadidx1 [c] {sym} (ADDQconst [d] idx) ptr mem) - // cond: + // cond: is32Bit(c+d) // result: (MOVLloadidx1 [c+d] {sym} ptr idx mem) for { c := v.AuxInt @@ -7022,6 +7055,9 @@ func rewriteValueAMD64_OpAMD64MOVLloadidx1_0(v *Value) bool { idx := v_0.Args[0] ptr := v.Args[1] mem := v.Args[2] + if !(is32Bit(c + d)) { + break + } v.reset(OpAMD64MOVLloadidx1) v.AuxInt = c + d v.Aux = sym @@ -7034,7 +7070,7 @@ func rewriteValueAMD64_OpAMD64MOVLloadidx1_0(v *Value) bool { } func rewriteValueAMD64_OpAMD64MOVLloadidx4_0(v *Value) bool { // match: (MOVLloadidx4 [c] {sym} (ADDQconst [d] ptr) idx mem) - // cond: + // cond: is32Bit(c+d) // result: (MOVLloadidx4 [c+d] {sym} ptr idx mem) for { c := v.AuxInt @@ -7048,6 +7084,9 @@ func rewriteValueAMD64_OpAMD64MOVLloadidx4_0(v *Value) bool { ptr := v_0.Args[0] idx := v.Args[1] mem := v.Args[2] + if !(is32Bit(c + d)) { + break + } v.reset(OpAMD64MOVLloadidx4) v.AuxInt = c + d v.Aux = sym @@ -7057,7 +7096,7 @@ func rewriteValueAMD64_OpAMD64MOVLloadidx4_0(v *Value) bool { return true } // match: (MOVLloadidx4 [c] {sym} ptr (ADDQconst [d] idx) mem) - // cond: + // cond: is32Bit(c+4*d) // result: (MOVLloadidx4 [c+4*d] {sym} ptr idx mem) for { c := v.AuxInt @@ -7071,6 +7110,9 @@ func rewriteValueAMD64_OpAMD64MOVLloadidx4_0(v *Value) bool { d := v_1.AuxInt idx := v_1.Args[0] mem := v.Args[2] + if !(is32Bit(c + 4*d)) { + break + } v.reset(OpAMD64MOVLloadidx4) v.AuxInt = c + 4*d v.Aux = sym @@ -7390,7 +7432,7 @@ func rewriteValueAMD64_OpAMD64MOVLstore_0(v *Value) bool { } func rewriteValueAMD64_OpAMD64MOVLstore_10(v *Value) bool { // match: (MOVLstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) - // cond: canMergeSym(sym1, sym2) + // cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2) // result: (MOVLstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) for { off1 := v.AuxInt @@ -7405,7 +7447,7 @@ func rewriteValueAMD64_OpAMD64MOVLstore_10(v *Value) bool { base := v_0.Args[0] val := v.Args[1] mem := v.Args[2] - if !(canMergeSym(sym1, sym2)) { + if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { break } v.reset(OpAMD64MOVLstore) @@ -7693,7 +7735,7 @@ func rewriteValueAMD64_OpAMD64MOVLstoreconstidx1_0(v *Value) bool { return true } // match: (MOVLstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) - // cond: + // cond: ValAndOff(x).canAdd(c) // result: (MOVLstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) for { x := v.AuxInt @@ -7707,6 +7749,9 @@ func rewriteValueAMD64_OpAMD64MOVLstoreconstidx1_0(v *Value) bool { ptr := v_0.Args[0] idx := v.Args[1] mem := v.Args[2] + if !(ValAndOff(x).canAdd(c)) { + break + } v.reset(OpAMD64MOVLstoreconstidx1) v.AuxInt = ValAndOff(x).add(c) v.Aux = sym @@ -7716,7 +7761,7 @@ func rewriteValueAMD64_OpAMD64MOVLstoreconstidx1_0(v *Value) bool { return true } // match: (MOVLstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) - // cond: + // cond: ValAndOff(x).canAdd(c) // result: (MOVLstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) for { x := v.AuxInt @@ -7730,6 +7775,9 @@ func rewriteValueAMD64_OpAMD64MOVLstoreconstidx1_0(v *Value) bool { c := v_1.AuxInt idx := v_1.Args[0] mem := v.Args[2] + if !(ValAndOff(x).canAdd(c)) { + break + } v.reset(OpAMD64MOVLstoreconstidx1) v.AuxInt = ValAndOff(x).add(c) v.Aux = sym @@ -7785,7 +7833,7 @@ func rewriteValueAMD64_OpAMD64MOVLstoreconstidx4_0(v *Value) bool { typ := &b.Func.Config.Types _ = typ // match: (MOVLstoreconstidx4 [x] {sym} (ADDQconst [c] ptr) idx mem) - // cond: + // cond: ValAndOff(x).canAdd(c) // result: (MOVLstoreconstidx4 [ValAndOff(x).add(c)] {sym} ptr idx mem) for { x := v.AuxInt @@ -7799,6 +7847,9 @@ func rewriteValueAMD64_OpAMD64MOVLstoreconstidx4_0(v *Value) bool { ptr := v_0.Args[0] idx := v.Args[1] mem := v.Args[2] + if !(ValAndOff(x).canAdd(c)) { + break + } v.reset(OpAMD64MOVLstoreconstidx4) v.AuxInt = ValAndOff(x).add(c) v.Aux = sym @@ -7808,7 +7859,7 @@ func rewriteValueAMD64_OpAMD64MOVLstoreconstidx4_0(v *Value) bool { return true } // match: (MOVLstoreconstidx4 [x] {sym} ptr (ADDQconst [c] idx) mem) - // cond: + // cond: ValAndOff(x).canAdd(4*c) // result: (MOVLstoreconstidx4 [ValAndOff(x).add(4*c)] {sym} ptr idx mem) for { x := v.AuxInt @@ -7822,6 +7873,9 @@ func rewriteValueAMD64_OpAMD64MOVLstoreconstidx4_0(v *Value) bool { c := v_1.AuxInt idx := v_1.Args[0] mem := v.Args[2] + if !(ValAndOff(x).canAdd(4 * c)) { + break + } v.reset(OpAMD64MOVLstoreconstidx4) v.AuxInt = ValAndOff(x).add(4 * c) v.Aux = sym @@ -7903,7 +7957,7 @@ func rewriteValueAMD64_OpAMD64MOVLstoreidx1_0(v *Value) bool { return true } // match: (MOVLstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) - // cond: + // cond: is32Bit(c+d) // result: (MOVLstoreidx1 [c+d] {sym} ptr idx val mem) for { c := v.AuxInt @@ -7918,6 +7972,9 @@ func rewriteValueAMD64_OpAMD64MOVLstoreidx1_0(v *Value) bool { idx := v.Args[1] val := v.Args[2] mem := v.Args[3] + if !(is32Bit(c + d)) { + break + } v.reset(OpAMD64MOVLstoreidx1) v.AuxInt = c + d v.Aux = sym @@ -7928,7 +7985,7 @@ func rewriteValueAMD64_OpAMD64MOVLstoreidx1_0(v *Value) bool { return true } // match: (MOVLstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) - // cond: + // cond: is32Bit(c+d) // result: (MOVLstoreidx1 [c+d] {sym} ptr idx val mem) for { c := v.AuxInt @@ -7943,6 +8000,9 @@ func rewriteValueAMD64_OpAMD64MOVLstoreidx1_0(v *Value) bool { idx := v_1.Args[0] val := v.Args[2] mem := v.Args[3] + if !(is32Bit(c + d)) { + break + } v.reset(OpAMD64MOVLstoreidx1) v.AuxInt = c + d v.Aux = sym @@ -8063,7 +8123,7 @@ func rewriteValueAMD64_OpAMD64MOVLstoreidx4_0(v *Value) bool { b := v.Block _ = b // match: (MOVLstoreidx4 [c] {sym} (ADDQconst [d] ptr) idx val mem) - // cond: + // cond: is32Bit(c+d) // result: (MOVLstoreidx4 [c+d] {sym} ptr idx val mem) for { c := v.AuxInt @@ -8078,6 +8138,9 @@ func rewriteValueAMD64_OpAMD64MOVLstoreidx4_0(v *Value) bool { idx := v.Args[1] val := v.Args[2] mem := v.Args[3] + if !(is32Bit(c + d)) { + break + } v.reset(OpAMD64MOVLstoreidx4) v.AuxInt = c + d v.Aux = sym @@ -8088,7 +8151,7 @@ func rewriteValueAMD64_OpAMD64MOVLstoreidx4_0(v *Value) bool { return true } // match: (MOVLstoreidx4 [c] {sym} ptr (ADDQconst [d] idx) val mem) - // cond: + // cond: is32Bit(c+4*d) // result: (MOVLstoreidx4 [c+4*d] {sym} ptr idx val mem) for { c := v.AuxInt @@ -8103,6 +8166,9 @@ func rewriteValueAMD64_OpAMD64MOVLstoreidx4_0(v *Value) bool { idx := v_1.Args[0] val := v.Args[2] mem := v.Args[3] + if !(is32Bit(c + 4*d)) { + break + } v.reset(OpAMD64MOVLstoreidx4) v.AuxInt = c + 4*d v.Aux = sym @@ -8543,7 +8609,7 @@ func rewriteValueAMD64_OpAMD64MOVQload_0(v *Value) bool { return true } // match: (MOVQload [off1] {sym1} (LEAL [off2] {sym2} base) mem) - // cond: canMergeSym(sym1, sym2) + // cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2) // result: (MOVQload [off1+off2] {mergeSym(sym1,sym2)} base mem) for { off1 := v.AuxInt @@ -8557,7 +8623,7 @@ func rewriteValueAMD64_OpAMD64MOVQload_0(v *Value) bool { sym2 := v_0.Aux base := v_0.Args[0] mem := v.Args[1] - if !(canMergeSym(sym1, sym2)) { + if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { break } v.reset(OpAMD64MOVQload) @@ -8645,7 +8711,7 @@ func rewriteValueAMD64_OpAMD64MOVQloadidx1_0(v *Value) bool { return true } // match: (MOVQloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) - // cond: + // cond: is32Bit(c+d) // result: (MOVQloadidx1 [c+d] {sym} ptr idx mem) for { c := v.AuxInt @@ -8659,6 +8725,9 @@ func rewriteValueAMD64_OpAMD64MOVQloadidx1_0(v *Value) bool { ptr := v_0.Args[0] idx := v.Args[1] mem := v.Args[2] + if !(is32Bit(c + d)) { + break + } v.reset(OpAMD64MOVQloadidx1) v.AuxInt = c + d v.Aux = sym @@ -8668,7 +8737,7 @@ func rewriteValueAMD64_OpAMD64MOVQloadidx1_0(v *Value) bool { return true } // match: (MOVQloadidx1 [c] {sym} idx (ADDQconst [d] ptr) mem) - // cond: + // cond: is32Bit(c+d) // result: (MOVQloadidx1 [c+d] {sym} ptr idx mem) for { c := v.AuxInt @@ -8682,6 +8751,9 @@ func rewriteValueAMD64_OpAMD64MOVQloadidx1_0(v *Value) bool { d := v_1.AuxInt ptr := v_1.Args[0] mem := v.Args[2] + if !(is32Bit(c + d)) { + break + } v.reset(OpAMD64MOVQloadidx1) v.AuxInt = c + d v.Aux = sym @@ -8691,7 +8763,7 @@ func rewriteValueAMD64_OpAMD64MOVQloadidx1_0(v *Value) bool { return true } // match: (MOVQloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) - // cond: + // cond: is32Bit(c+d) // result: (MOVQloadidx1 [c+d] {sym} ptr idx mem) for { c := v.AuxInt @@ -8705,6 +8777,9 @@ func rewriteValueAMD64_OpAMD64MOVQloadidx1_0(v *Value) bool { d := v_1.AuxInt idx := v_1.Args[0] mem := v.Args[2] + if !(is32Bit(c + d)) { + break + } v.reset(OpAMD64MOVQloadidx1) v.AuxInt = c + d v.Aux = sym @@ -8714,7 +8789,7 @@ func rewriteValueAMD64_OpAMD64MOVQloadidx1_0(v *Value) bool { return true } // match: (MOVQloadidx1 [c] {sym} (ADDQconst [d] idx) ptr mem) - // cond: + // cond: is32Bit(c+d) // result: (MOVQloadidx1 [c+d] {sym} ptr idx mem) for { c := v.AuxInt @@ -8728,6 +8803,9 @@ func rewriteValueAMD64_OpAMD64MOVQloadidx1_0(v *Value) bool { idx := v_0.Args[0] ptr := v.Args[1] mem := v.Args[2] + if !(is32Bit(c + d)) { + break + } v.reset(OpAMD64MOVQloadidx1) v.AuxInt = c + d v.Aux = sym @@ -8740,7 +8818,7 @@ func rewriteValueAMD64_OpAMD64MOVQloadidx1_0(v *Value) bool { } func rewriteValueAMD64_OpAMD64MOVQloadidx8_0(v *Value) bool { // match: (MOVQloadidx8 [c] {sym} (ADDQconst [d] ptr) idx mem) - // cond: + // cond: is32Bit(c+d) // result: (MOVQloadidx8 [c+d] {sym} ptr idx mem) for { c := v.AuxInt @@ -8754,6 +8832,9 @@ func rewriteValueAMD64_OpAMD64MOVQloadidx8_0(v *Value) bool { ptr := v_0.Args[0] idx := v.Args[1] mem := v.Args[2] + if !(is32Bit(c + d)) { + break + } v.reset(OpAMD64MOVQloadidx8) v.AuxInt = c + d v.Aux = sym @@ -8763,7 +8844,7 @@ func rewriteValueAMD64_OpAMD64MOVQloadidx8_0(v *Value) bool { return true } // match: (MOVQloadidx8 [c] {sym} ptr (ADDQconst [d] idx) mem) - // cond: + // cond: is32Bit(c+8*d) // result: (MOVQloadidx8 [c+8*d] {sym} ptr idx mem) for { c := v.AuxInt @@ -8777,6 +8858,9 @@ func rewriteValueAMD64_OpAMD64MOVQloadidx8_0(v *Value) bool { d := v_1.AuxInt idx := v_1.Args[0] mem := v.Args[2] + if !(is32Bit(c + 8*d)) { + break + } v.reset(OpAMD64MOVQloadidx8) v.AuxInt = c + 8*d v.Aux = sym @@ -8954,7 +9038,7 @@ func rewriteValueAMD64_OpAMD64MOVQstore_0(v *Value) bool { return true } // match: (MOVQstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) - // cond: canMergeSym(sym1, sym2) + // cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2) // result: (MOVQstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) for { off1 := v.AuxInt @@ -8969,7 +9053,7 @@ func rewriteValueAMD64_OpAMD64MOVQstore_0(v *Value) bool { base := v_0.Args[0] val := v.Args[1] mem := v.Args[2] - if !(canMergeSym(sym1, sym2)) { + if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { break } v.reset(OpAMD64MOVQstore) @@ -9215,7 +9299,7 @@ func rewriteValueAMD64_OpAMD64MOVQstoreconstidx1_0(v *Value) bool { return true } // match: (MOVQstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) - // cond: + // cond: ValAndOff(x).canAdd(c) // result: (MOVQstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) for { x := v.AuxInt @@ -9229,6 +9313,9 @@ func rewriteValueAMD64_OpAMD64MOVQstoreconstidx1_0(v *Value) bool { ptr := v_0.Args[0] idx := v.Args[1] mem := v.Args[2] + if !(ValAndOff(x).canAdd(c)) { + break + } v.reset(OpAMD64MOVQstoreconstidx1) v.AuxInt = ValAndOff(x).add(c) v.Aux = sym @@ -9238,7 +9325,7 @@ func rewriteValueAMD64_OpAMD64MOVQstoreconstidx1_0(v *Value) bool { return true } // match: (MOVQstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) - // cond: + // cond: ValAndOff(x).canAdd(c) // result: (MOVQstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) for { x := v.AuxInt @@ -9252,6 +9339,9 @@ func rewriteValueAMD64_OpAMD64MOVQstoreconstidx1_0(v *Value) bool { c := v_1.AuxInt idx := v_1.Args[0] mem := v.Args[2] + if !(ValAndOff(x).canAdd(c)) { + break + } v.reset(OpAMD64MOVQstoreconstidx1) v.AuxInt = ValAndOff(x).add(c) v.Aux = sym @@ -9264,7 +9354,7 @@ func rewriteValueAMD64_OpAMD64MOVQstoreconstidx1_0(v *Value) bool { } func rewriteValueAMD64_OpAMD64MOVQstoreconstidx8_0(v *Value) bool { // match: (MOVQstoreconstidx8 [x] {sym} (ADDQconst [c] ptr) idx mem) - // cond: + // cond: ValAndOff(x).canAdd(c) // result: (MOVQstoreconstidx8 [ValAndOff(x).add(c)] {sym} ptr idx mem) for { x := v.AuxInt @@ -9278,6 +9368,9 @@ func rewriteValueAMD64_OpAMD64MOVQstoreconstidx8_0(v *Value) bool { ptr := v_0.Args[0] idx := v.Args[1] mem := v.Args[2] + if !(ValAndOff(x).canAdd(c)) { + break + } v.reset(OpAMD64MOVQstoreconstidx8) v.AuxInt = ValAndOff(x).add(c) v.Aux = sym @@ -9287,7 +9380,7 @@ func rewriteValueAMD64_OpAMD64MOVQstoreconstidx8_0(v *Value) bool { return true } // match: (MOVQstoreconstidx8 [x] {sym} ptr (ADDQconst [c] idx) mem) - // cond: + // cond: ValAndOff(x).canAdd(8*c) // result: (MOVQstoreconstidx8 [ValAndOff(x).add(8*c)] {sym} ptr idx mem) for { x := v.AuxInt @@ -9301,6 +9394,9 @@ func rewriteValueAMD64_OpAMD64MOVQstoreconstidx8_0(v *Value) bool { c := v_1.AuxInt idx := v_1.Args[0] mem := v.Args[2] + if !(ValAndOff(x).canAdd(8 * c)) { + break + } v.reset(OpAMD64MOVQstoreconstidx8) v.AuxInt = ValAndOff(x).add(8 * c) v.Aux = sym @@ -9340,7 +9436,7 @@ func rewriteValueAMD64_OpAMD64MOVQstoreidx1_0(v *Value) bool { return true } // match: (MOVQstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) - // cond: + // cond: is32Bit(c+d) // result: (MOVQstoreidx1 [c+d] {sym} ptr idx val mem) for { c := v.AuxInt @@ -9355,6 +9451,9 @@ func rewriteValueAMD64_OpAMD64MOVQstoreidx1_0(v *Value) bool { idx := v.Args[1] val := v.Args[2] mem := v.Args[3] + if !(is32Bit(c + d)) { + break + } v.reset(OpAMD64MOVQstoreidx1) v.AuxInt = c + d v.Aux = sym @@ -9365,7 +9464,7 @@ func rewriteValueAMD64_OpAMD64MOVQstoreidx1_0(v *Value) bool { return true } // match: (MOVQstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) - // cond: + // cond: is32Bit(c+d) // result: (MOVQstoreidx1 [c+d] {sym} ptr idx val mem) for { c := v.AuxInt @@ -9380,6 +9479,9 @@ func rewriteValueAMD64_OpAMD64MOVQstoreidx1_0(v *Value) bool { idx := v_1.Args[0] val := v.Args[2] mem := v.Args[3] + if !(is32Bit(c + d)) { + break + } v.reset(OpAMD64MOVQstoreidx1) v.AuxInt = c + d v.Aux = sym @@ -9393,7 +9495,7 @@ func rewriteValueAMD64_OpAMD64MOVQstoreidx1_0(v *Value) bool { } func rewriteValueAMD64_OpAMD64MOVQstoreidx8_0(v *Value) bool { // match: (MOVQstoreidx8 [c] {sym} (ADDQconst [d] ptr) idx val mem) - // cond: + // cond: is32Bit(c+d) // result: (MOVQstoreidx8 [c+d] {sym} ptr idx val mem) for { c := v.AuxInt @@ -9408,6 +9510,9 @@ func rewriteValueAMD64_OpAMD64MOVQstoreidx8_0(v *Value) bool { idx := v.Args[1] val := v.Args[2] mem := v.Args[3] + if !(is32Bit(c + d)) { + break + } v.reset(OpAMD64MOVQstoreidx8) v.AuxInt = c + d v.Aux = sym @@ -9418,7 +9523,7 @@ func rewriteValueAMD64_OpAMD64MOVQstoreidx8_0(v *Value) bool { return true } // match: (MOVQstoreidx8 [c] {sym} ptr (ADDQconst [d] idx) val mem) - // cond: + // cond: is32Bit(c+8*d) // result: (MOVQstoreidx8 [c+8*d] {sym} ptr idx val mem) for { c := v.AuxInt @@ -9433,6 +9538,9 @@ func rewriteValueAMD64_OpAMD64MOVQstoreidx8_0(v *Value) bool { idx := v_1.Args[0] val := v.Args[2] mem := v.Args[3] + if !(is32Bit(c + 8*d)) { + break + } v.reset(OpAMD64MOVQstoreidx8) v.AuxInt = c + 8*d v.Aux = sym @@ -9605,7 +9713,7 @@ func rewriteValueAMD64_OpAMD64MOVSDloadidx1_0(v *Value) bool { return true } // match: (MOVSDloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) - // cond: + // cond: is32Bit(c+d) // result: (MOVSDloadidx1 [c+d] {sym} ptr idx mem) for { c := v.AuxInt @@ -9619,6 +9727,9 @@ func rewriteValueAMD64_OpAMD64MOVSDloadidx1_0(v *Value) bool { ptr := v_0.Args[0] idx := v.Args[1] mem := v.Args[2] + if !(is32Bit(c + d)) { + break + } v.reset(OpAMD64MOVSDloadidx1) v.AuxInt = c + d v.Aux = sym @@ -9628,7 +9739,7 @@ func rewriteValueAMD64_OpAMD64MOVSDloadidx1_0(v *Value) bool { return true } // match: (MOVSDloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) - // cond: + // cond: is32Bit(c+d) // result: (MOVSDloadidx1 [c+d] {sym} ptr idx mem) for { c := v.AuxInt @@ -9642,6 +9753,9 @@ func rewriteValueAMD64_OpAMD64MOVSDloadidx1_0(v *Value) bool { d := v_1.AuxInt idx := v_1.Args[0] mem := v.Args[2] + if !(is32Bit(c + d)) { + break + } v.reset(OpAMD64MOVSDloadidx1) v.AuxInt = c + d v.Aux = sym @@ -9654,7 +9768,7 @@ func rewriteValueAMD64_OpAMD64MOVSDloadidx1_0(v *Value) bool { } func rewriteValueAMD64_OpAMD64MOVSDloadidx8_0(v *Value) bool { // match: (MOVSDloadidx8 [c] {sym} (ADDQconst [d] ptr) idx mem) - // cond: + // cond: is32Bit(c+d) // result: (MOVSDloadidx8 [c+d] {sym} ptr idx mem) for { c := v.AuxInt @@ -9668,6 +9782,9 @@ func rewriteValueAMD64_OpAMD64MOVSDloadidx8_0(v *Value) bool { ptr := v_0.Args[0] idx := v.Args[1] mem := v.Args[2] + if !(is32Bit(c + d)) { + break + } v.reset(OpAMD64MOVSDloadidx8) v.AuxInt = c + d v.Aux = sym @@ -9677,7 +9794,7 @@ func rewriteValueAMD64_OpAMD64MOVSDloadidx8_0(v *Value) bool { return true } // match: (MOVSDloadidx8 [c] {sym} ptr (ADDQconst [d] idx) mem) - // cond: + // cond: is32Bit(c+8*d) // result: (MOVSDloadidx8 [c+8*d] {sym} ptr idx mem) for { c := v.AuxInt @@ -9691,6 +9808,9 @@ func rewriteValueAMD64_OpAMD64MOVSDloadidx8_0(v *Value) bool { d := v_1.AuxInt idx := v_1.Args[0] mem := v.Args[2] + if !(is32Bit(c + 8*d)) { + break + } v.reset(OpAMD64MOVSDloadidx8) v.AuxInt = c + 8*d v.Aux = sym @@ -9874,7 +9994,7 @@ func rewriteValueAMD64_OpAMD64MOVSDstoreidx1_0(v *Value) bool { return true } // match: (MOVSDstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) - // cond: + // cond: is32Bit(c+d) // result: (MOVSDstoreidx1 [c+d] {sym} ptr idx val mem) for { c := v.AuxInt @@ -9889,6 +10009,9 @@ func rewriteValueAMD64_OpAMD64MOVSDstoreidx1_0(v *Value) bool { idx := v.Args[1] val := v.Args[2] mem := v.Args[3] + if !(is32Bit(c + d)) { + break + } v.reset(OpAMD64MOVSDstoreidx1) v.AuxInt = c + d v.Aux = sym @@ -9899,7 +10022,7 @@ func rewriteValueAMD64_OpAMD64MOVSDstoreidx1_0(v *Value) bool { return true } // match: (MOVSDstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) - // cond: + // cond: is32Bit(c+d) // result: (MOVSDstoreidx1 [c+d] {sym} ptr idx val mem) for { c := v.AuxInt @@ -9914,6 +10037,9 @@ func rewriteValueAMD64_OpAMD64MOVSDstoreidx1_0(v *Value) bool { idx := v_1.Args[0] val := v.Args[2] mem := v.Args[3] + if !(is32Bit(c + d)) { + break + } v.reset(OpAMD64MOVSDstoreidx1) v.AuxInt = c + d v.Aux = sym @@ -9927,7 +10053,7 @@ func rewriteValueAMD64_OpAMD64MOVSDstoreidx1_0(v *Value) bool { } func rewriteValueAMD64_OpAMD64MOVSDstoreidx8_0(v *Value) bool { // match: (MOVSDstoreidx8 [c] {sym} (ADDQconst [d] ptr) idx val mem) - // cond: + // cond: is32Bit(c+d) // result: (MOVSDstoreidx8 [c+d] {sym} ptr idx val mem) for { c := v.AuxInt @@ -9942,6 +10068,9 @@ func rewriteValueAMD64_OpAMD64MOVSDstoreidx8_0(v *Value) bool { idx := v.Args[1] val := v.Args[2] mem := v.Args[3] + if !(is32Bit(c + d)) { + break + } v.reset(OpAMD64MOVSDstoreidx8) v.AuxInt = c + d v.Aux = sym @@ -9952,7 +10081,7 @@ func rewriteValueAMD64_OpAMD64MOVSDstoreidx8_0(v *Value) bool { return true } // match: (MOVSDstoreidx8 [c] {sym} ptr (ADDQconst [d] idx) val mem) - // cond: + // cond: is32Bit(c+8*d) // result: (MOVSDstoreidx8 [c+8*d] {sym} ptr idx val mem) for { c := v.AuxInt @@ -9967,6 +10096,9 @@ func rewriteValueAMD64_OpAMD64MOVSDstoreidx8_0(v *Value) bool { idx := v_1.Args[0] val := v.Args[2] mem := v.Args[3] + if !(is32Bit(c + 8*d)) { + break + } v.reset(OpAMD64MOVSDstoreidx8) v.AuxInt = c + 8*d v.Aux = sym @@ -10139,7 +10271,7 @@ func rewriteValueAMD64_OpAMD64MOVSSloadidx1_0(v *Value) bool { return true } // match: (MOVSSloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) - // cond: + // cond: is32Bit(c+d) // result: (MOVSSloadidx1 [c+d] {sym} ptr idx mem) for { c := v.AuxInt @@ -10153,6 +10285,9 @@ func rewriteValueAMD64_OpAMD64MOVSSloadidx1_0(v *Value) bool { ptr := v_0.Args[0] idx := v.Args[1] mem := v.Args[2] + if !(is32Bit(c + d)) { + break + } v.reset(OpAMD64MOVSSloadidx1) v.AuxInt = c + d v.Aux = sym @@ -10162,7 +10297,7 @@ func rewriteValueAMD64_OpAMD64MOVSSloadidx1_0(v *Value) bool { return true } // match: (MOVSSloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) - // cond: + // cond: is32Bit(c+d) // result: (MOVSSloadidx1 [c+d] {sym} ptr idx mem) for { c := v.AuxInt @@ -10176,6 +10311,9 @@ func rewriteValueAMD64_OpAMD64MOVSSloadidx1_0(v *Value) bool { d := v_1.AuxInt idx := v_1.Args[0] mem := v.Args[2] + if !(is32Bit(c + d)) { + break + } v.reset(OpAMD64MOVSSloadidx1) v.AuxInt = c + d v.Aux = sym @@ -10188,7 +10326,7 @@ func rewriteValueAMD64_OpAMD64MOVSSloadidx1_0(v *Value) bool { } func rewriteValueAMD64_OpAMD64MOVSSloadidx4_0(v *Value) bool { // match: (MOVSSloadidx4 [c] {sym} (ADDQconst [d] ptr) idx mem) - // cond: + // cond: is32Bit(c+d) // result: (MOVSSloadidx4 [c+d] {sym} ptr idx mem) for { c := v.AuxInt @@ -10202,6 +10340,9 @@ func rewriteValueAMD64_OpAMD64MOVSSloadidx4_0(v *Value) bool { ptr := v_0.Args[0] idx := v.Args[1] mem := v.Args[2] + if !(is32Bit(c + d)) { + break + } v.reset(OpAMD64MOVSSloadidx4) v.AuxInt = c + d v.Aux = sym @@ -10211,7 +10352,7 @@ func rewriteValueAMD64_OpAMD64MOVSSloadidx4_0(v *Value) bool { return true } // match: (MOVSSloadidx4 [c] {sym} ptr (ADDQconst [d] idx) mem) - // cond: + // cond: is32Bit(c+4*d) // result: (MOVSSloadidx4 [c+4*d] {sym} ptr idx mem) for { c := v.AuxInt @@ -10225,6 +10366,9 @@ func rewriteValueAMD64_OpAMD64MOVSSloadidx4_0(v *Value) bool { d := v_1.AuxInt idx := v_1.Args[0] mem := v.Args[2] + if !(is32Bit(c + 4*d)) { + break + } v.reset(OpAMD64MOVSSloadidx4) v.AuxInt = c + 4*d v.Aux = sym @@ -10408,7 +10552,7 @@ func rewriteValueAMD64_OpAMD64MOVSSstoreidx1_0(v *Value) bool { return true } // match: (MOVSSstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) - // cond: + // cond: is32Bit(c+d) // result: (MOVSSstoreidx1 [c+d] {sym} ptr idx val mem) for { c := v.AuxInt @@ -10423,6 +10567,9 @@ func rewriteValueAMD64_OpAMD64MOVSSstoreidx1_0(v *Value) bool { idx := v.Args[1] val := v.Args[2] mem := v.Args[3] + if !(is32Bit(c + d)) { + break + } v.reset(OpAMD64MOVSSstoreidx1) v.AuxInt = c + d v.Aux = sym @@ -10433,7 +10580,7 @@ func rewriteValueAMD64_OpAMD64MOVSSstoreidx1_0(v *Value) bool { return true } // match: (MOVSSstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) - // cond: + // cond: is32Bit(c+d) // result: (MOVSSstoreidx1 [c+d] {sym} ptr idx val mem) for { c := v.AuxInt @@ -10448,6 +10595,9 @@ func rewriteValueAMD64_OpAMD64MOVSSstoreidx1_0(v *Value) bool { idx := v_1.Args[0] val := v.Args[2] mem := v.Args[3] + if !(is32Bit(c + d)) { + break + } v.reset(OpAMD64MOVSSstoreidx1) v.AuxInt = c + d v.Aux = sym @@ -10461,7 +10611,7 @@ func rewriteValueAMD64_OpAMD64MOVSSstoreidx1_0(v *Value) bool { } func rewriteValueAMD64_OpAMD64MOVSSstoreidx4_0(v *Value) bool { // match: (MOVSSstoreidx4 [c] {sym} (ADDQconst [d] ptr) idx val mem) - // cond: + // cond: is32Bit(c+d) // result: (MOVSSstoreidx4 [c+d] {sym} ptr idx val mem) for { c := v.AuxInt @@ -10476,6 +10626,9 @@ func rewriteValueAMD64_OpAMD64MOVSSstoreidx4_0(v *Value) bool { idx := v.Args[1] val := v.Args[2] mem := v.Args[3] + if !(is32Bit(c + d)) { + break + } v.reset(OpAMD64MOVSSstoreidx4) v.AuxInt = c + d v.Aux = sym @@ -10486,7 +10639,7 @@ func rewriteValueAMD64_OpAMD64MOVSSstoreidx4_0(v *Value) bool { return true } // match: (MOVSSstoreidx4 [c] {sym} ptr (ADDQconst [d] idx) val mem) - // cond: + // cond: is32Bit(c+4*d) // result: (MOVSSstoreidx4 [c+4*d] {sym} ptr idx val mem) for { c := v.AuxInt @@ -10501,6 +10654,9 @@ func rewriteValueAMD64_OpAMD64MOVSSstoreidx4_0(v *Value) bool { idx := v_1.Args[0] val := v.Args[2] mem := v.Args[3] + if !(is32Bit(c + 4*d)) { + break + } v.reset(OpAMD64MOVSSstoreidx4) v.AuxInt = c + 4*d v.Aux = sym @@ -10611,29 +10767,29 @@ func rewriteValueAMD64_OpAMD64MOVWQSX_0(v *Value) bool { v.AddArg(x) return true } - // match: (MOVWQSX x:(MOVWQSX _)) + // match: (MOVWQSX (MOVWQSX x)) // cond: - // result: x + // result: (MOVWQSX x) for { - x := v.Args[0] - if x.Op != OpAMD64MOVWQSX { + v_0 := v.Args[0] + if v_0.Op != OpAMD64MOVWQSX { break } - v.reset(OpCopy) - v.Type = x.Type + x := v_0.Args[0] + v.reset(OpAMD64MOVWQSX) v.AddArg(x) return true } - // match: (MOVWQSX x:(MOVBQSX _)) + // match: (MOVWQSX (MOVBQSX x)) // cond: - // result: x + // result: (MOVBQSX x) for { - x := v.Args[0] - if x.Op != OpAMD64MOVBQSX { + v_0 := v.Args[0] + if v_0.Op != OpAMD64MOVBQSX { break } - v.reset(OpCopy) - v.Type = x.Type + x := v_0.Args[0] + v.reset(OpAMD64MOVBQSX) v.AddArg(x) return true } @@ -10843,29 +10999,29 @@ func rewriteValueAMD64_OpAMD64MOVWQZX_0(v *Value) bool { v.AddArg(x) return true } - // match: (MOVWQZX x:(MOVWQZX _)) + // match: (MOVWQZX (MOVWQZX x)) // cond: - // result: x + // result: (MOVWQZX x) for { - x := v.Args[0] - if x.Op != OpAMD64MOVWQZX { + v_0 := v.Args[0] + if v_0.Op != OpAMD64MOVWQZX { break } - v.reset(OpCopy) - v.Type = x.Type + x := v_0.Args[0] + v.reset(OpAMD64MOVWQZX) v.AddArg(x) return true } - // match: (MOVWQZX x:(MOVBQZX _)) + // match: (MOVWQZX (MOVBQZX x)) // cond: - // result: x + // result: (MOVBQZX x) for { - x := v.Args[0] - if x.Op != OpAMD64MOVBQZX { + v_0 := v.Args[0] + if v_0.Op != OpAMD64MOVBQZX { break } - v.reset(OpCopy) - v.Type = x.Type + x := v_0.Args[0] + v.reset(OpAMD64MOVBQZX) v.AddArg(x) return true } @@ -11028,7 +11184,7 @@ func rewriteValueAMD64_OpAMD64MOVWload_0(v *Value) bool { return true } // match: (MOVWload [off1] {sym1} (LEAL [off2] {sym2} base) mem) - // cond: canMergeSym(sym1, sym2) + // cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2) // result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem) for { off1 := v.AuxInt @@ -11042,7 +11198,7 @@ func rewriteValueAMD64_OpAMD64MOVWload_0(v *Value) bool { sym2 := v_0.Aux base := v_0.Args[0] mem := v.Args[1] - if !(canMergeSym(sym1, sym2)) { + if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { break } v.reset(OpAMD64MOVWload) @@ -11130,7 +11286,7 @@ func rewriteValueAMD64_OpAMD64MOVWloadidx1_0(v *Value) bool { return true } // match: (MOVWloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) - // cond: + // cond: is32Bit(c+d) // result: (MOVWloadidx1 [c+d] {sym} ptr idx mem) for { c := v.AuxInt @@ -11144,6 +11300,9 @@ func rewriteValueAMD64_OpAMD64MOVWloadidx1_0(v *Value) bool { ptr := v_0.Args[0] idx := v.Args[1] mem := v.Args[2] + if !(is32Bit(c + d)) { + break + } v.reset(OpAMD64MOVWloadidx1) v.AuxInt = c + d v.Aux = sym @@ -11153,7 +11312,7 @@ func rewriteValueAMD64_OpAMD64MOVWloadidx1_0(v *Value) bool { return true } // match: (MOVWloadidx1 [c] {sym} idx (ADDQconst [d] ptr) mem) - // cond: + // cond: is32Bit(c+d) // result: (MOVWloadidx1 [c+d] {sym} ptr idx mem) for { c := v.AuxInt @@ -11167,6 +11326,9 @@ func rewriteValueAMD64_OpAMD64MOVWloadidx1_0(v *Value) bool { d := v_1.AuxInt ptr := v_1.Args[0] mem := v.Args[2] + if !(is32Bit(c + d)) { + break + } v.reset(OpAMD64MOVWloadidx1) v.AuxInt = c + d v.Aux = sym @@ -11176,7 +11338,7 @@ func rewriteValueAMD64_OpAMD64MOVWloadidx1_0(v *Value) bool { return true } // match: (MOVWloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) - // cond: + // cond: is32Bit(c+d) // result: (MOVWloadidx1 [c+d] {sym} ptr idx mem) for { c := v.AuxInt @@ -11190,6 +11352,9 @@ func rewriteValueAMD64_OpAMD64MOVWloadidx1_0(v *Value) bool { d := v_1.AuxInt idx := v_1.Args[0] mem := v.Args[2] + if !(is32Bit(c + d)) { + break + } v.reset(OpAMD64MOVWloadidx1) v.AuxInt = c + d v.Aux = sym @@ -11199,7 +11364,7 @@ func rewriteValueAMD64_OpAMD64MOVWloadidx1_0(v *Value) bool { return true } // match: (MOVWloadidx1 [c] {sym} (ADDQconst [d] idx) ptr mem) - // cond: + // cond: is32Bit(c+d) // result: (MOVWloadidx1 [c+d] {sym} ptr idx mem) for { c := v.AuxInt @@ -11213,6 +11378,9 @@ func rewriteValueAMD64_OpAMD64MOVWloadidx1_0(v *Value) bool { idx := v_0.Args[0] ptr := v.Args[1] mem := v.Args[2] + if !(is32Bit(c + d)) { + break + } v.reset(OpAMD64MOVWloadidx1) v.AuxInt = c + d v.Aux = sym @@ -11225,7 +11393,7 @@ func rewriteValueAMD64_OpAMD64MOVWloadidx1_0(v *Value) bool { } func rewriteValueAMD64_OpAMD64MOVWloadidx2_0(v *Value) bool { // match: (MOVWloadidx2 [c] {sym} (ADDQconst [d] ptr) idx mem) - // cond: + // cond: is32Bit(c+d) // result: (MOVWloadidx2 [c+d] {sym} ptr idx mem) for { c := v.AuxInt @@ -11239,6 +11407,9 @@ func rewriteValueAMD64_OpAMD64MOVWloadidx2_0(v *Value) bool { ptr := v_0.Args[0] idx := v.Args[1] mem := v.Args[2] + if !(is32Bit(c + d)) { + break + } v.reset(OpAMD64MOVWloadidx2) v.AuxInt = c + d v.Aux = sym @@ -11248,7 +11419,7 @@ func rewriteValueAMD64_OpAMD64MOVWloadidx2_0(v *Value) bool { return true } // match: (MOVWloadidx2 [c] {sym} ptr (ADDQconst [d] idx) mem) - // cond: + // cond: is32Bit(c+2*d) // result: (MOVWloadidx2 [c+2*d] {sym} ptr idx mem) for { c := v.AuxInt @@ -11262,6 +11433,9 @@ func rewriteValueAMD64_OpAMD64MOVWloadidx2_0(v *Value) bool { d := v_1.AuxInt idx := v_1.Args[0] mem := v.Args[2] + if !(is32Bit(c + 2*d)) { + break + } v.reset(OpAMD64MOVWloadidx2) v.AuxInt = c + 2*d v.Aux = sym @@ -11581,7 +11755,7 @@ func rewriteValueAMD64_OpAMD64MOVWstore_0(v *Value) bool { } func rewriteValueAMD64_OpAMD64MOVWstore_10(v *Value) bool { // match: (MOVWstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) - // cond: canMergeSym(sym1, sym2) + // cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2) // result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) for { off1 := v.AuxInt @@ -11596,7 +11770,7 @@ func rewriteValueAMD64_OpAMD64MOVWstore_10(v *Value) bool { base := v_0.Args[0] val := v.Args[1] mem := v.Args[2] - if !(canMergeSym(sym1, sym2)) { + if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { break } v.reset(OpAMD64MOVWstore) @@ -11873,7 +12047,7 @@ func rewriteValueAMD64_OpAMD64MOVWstoreconstidx1_0(v *Value) bool { return true } // match: (MOVWstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) - // cond: + // cond: ValAndOff(x).canAdd(c) // result: (MOVWstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) for { x := v.AuxInt @@ -11887,6 +12061,9 @@ func rewriteValueAMD64_OpAMD64MOVWstoreconstidx1_0(v *Value) bool { ptr := v_0.Args[0] idx := v.Args[1] mem := v.Args[2] + if !(ValAndOff(x).canAdd(c)) { + break + } v.reset(OpAMD64MOVWstoreconstidx1) v.AuxInt = ValAndOff(x).add(c) v.Aux = sym @@ -11896,7 +12073,7 @@ func rewriteValueAMD64_OpAMD64MOVWstoreconstidx1_0(v *Value) bool { return true } // match: (MOVWstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) - // cond: + // cond: ValAndOff(x).canAdd(c) // result: (MOVWstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) for { x := v.AuxInt @@ -11910,6 +12087,9 @@ func rewriteValueAMD64_OpAMD64MOVWstoreconstidx1_0(v *Value) bool { c := v_1.AuxInt idx := v_1.Args[0] mem := v.Args[2] + if !(ValAndOff(x).canAdd(c)) { + break + } v.reset(OpAMD64MOVWstoreconstidx1) v.AuxInt = ValAndOff(x).add(c) v.Aux = sym @@ -11960,7 +12140,7 @@ func rewriteValueAMD64_OpAMD64MOVWstoreconstidx2_0(v *Value) bool { b := v.Block _ = b // match: (MOVWstoreconstidx2 [x] {sym} (ADDQconst [c] ptr) idx mem) - // cond: + // cond: ValAndOff(x).canAdd(c) // result: (MOVWstoreconstidx2 [ValAndOff(x).add(c)] {sym} ptr idx mem) for { x := v.AuxInt @@ -11974,6 +12154,9 @@ func rewriteValueAMD64_OpAMD64MOVWstoreconstidx2_0(v *Value) bool { ptr := v_0.Args[0] idx := v.Args[1] mem := v.Args[2] + if !(ValAndOff(x).canAdd(c)) { + break + } v.reset(OpAMD64MOVWstoreconstidx2) v.AuxInt = ValAndOff(x).add(c) v.Aux = sym @@ -11983,7 +12166,7 @@ func rewriteValueAMD64_OpAMD64MOVWstoreconstidx2_0(v *Value) bool { return true } // match: (MOVWstoreconstidx2 [x] {sym} ptr (ADDQconst [c] idx) mem) - // cond: + // cond: ValAndOff(x).canAdd(2*c) // result: (MOVWstoreconstidx2 [ValAndOff(x).add(2*c)] {sym} ptr idx mem) for { x := v.AuxInt @@ -11997,6 +12180,9 @@ func rewriteValueAMD64_OpAMD64MOVWstoreconstidx2_0(v *Value) bool { c := v_1.AuxInt idx := v_1.Args[0] mem := v.Args[2] + if !(ValAndOff(x).canAdd(2 * c)) { + break + } v.reset(OpAMD64MOVWstoreconstidx2) v.AuxInt = ValAndOff(x).add(2 * c) v.Aux = sym @@ -12075,7 +12261,7 @@ func rewriteValueAMD64_OpAMD64MOVWstoreidx1_0(v *Value) bool { return true } // match: (MOVWstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) - // cond: + // cond: is32Bit(c+d) // result: (MOVWstoreidx1 [c+d] {sym} ptr idx val mem) for { c := v.AuxInt @@ -12090,6 +12276,9 @@ func rewriteValueAMD64_OpAMD64MOVWstoreidx1_0(v *Value) bool { idx := v.Args[1] val := v.Args[2] mem := v.Args[3] + if !(is32Bit(c + d)) { + break + } v.reset(OpAMD64MOVWstoreidx1) v.AuxInt = c + d v.Aux = sym @@ -12100,7 +12289,7 @@ func rewriteValueAMD64_OpAMD64MOVWstoreidx1_0(v *Value) bool { return true } // match: (MOVWstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) - // cond: + // cond: is32Bit(c+d) // result: (MOVWstoreidx1 [c+d] {sym} ptr idx val mem) for { c := v.AuxInt @@ -12115,6 +12304,9 @@ func rewriteValueAMD64_OpAMD64MOVWstoreidx1_0(v *Value) bool { idx := v_1.Args[0] val := v.Args[2] mem := v.Args[3] + if !(is32Bit(c + d)) { + break + } v.reset(OpAMD64MOVWstoreidx1) v.AuxInt = c + d v.Aux = sym @@ -12235,7 +12427,7 @@ func rewriteValueAMD64_OpAMD64MOVWstoreidx2_0(v *Value) bool { b := v.Block _ = b // match: (MOVWstoreidx2 [c] {sym} (ADDQconst [d] ptr) idx val mem) - // cond: + // cond: is32Bit(c+d) // result: (MOVWstoreidx2 [c+d] {sym} ptr idx val mem) for { c := v.AuxInt @@ -12250,6 +12442,9 @@ func rewriteValueAMD64_OpAMD64MOVWstoreidx2_0(v *Value) bool { idx := v.Args[1] val := v.Args[2] mem := v.Args[3] + if !(is32Bit(c + d)) { + break + } v.reset(OpAMD64MOVWstoreidx2) v.AuxInt = c + d v.Aux = sym @@ -12260,7 +12455,7 @@ func rewriteValueAMD64_OpAMD64MOVWstoreidx2_0(v *Value) bool { return true } // match: (MOVWstoreidx2 [c] {sym} ptr (ADDQconst [d] idx) val mem) - // cond: + // cond: is32Bit(c+2*d) // result: (MOVWstoreidx2 [c+2*d] {sym} ptr idx val mem) for { c := v.AuxInt @@ -12275,6 +12470,9 @@ func rewriteValueAMD64_OpAMD64MOVWstoreidx2_0(v *Value) bool { idx := v_1.Args[0] val := v.Args[2] mem := v.Args[3] + if !(is32Bit(c + 2*d)) { + break + } v.reset(OpAMD64MOVWstoreidx2) v.AuxInt = c + 2*d v.Aux = sym diff --git a/src/cmd/compile/internal/ssa/rewritePPC64.go b/src/cmd/compile/internal/ssa/rewritePPC64.go index 20e354cb4a..75bf763d12 100644 --- a/src/cmd/compile/internal/ssa/rewritePPC64.go +++ b/src/cmd/compile/internal/ssa/rewritePPC64.go @@ -10727,35 +10727,39 @@ func rewriteValuePPC64_OpZero_0(v *Value) bool { v.AddArg(v0) return true } - // match: (Zero [8] destptr mem) - // cond: + // match: (Zero [8] {t} destptr mem) + // cond: t.(*types.Type).Alignment()%4 == 0 // result: (MOVDstorezero destptr mem) for { if v.AuxInt != 8 { break } + t := v.Aux _ = v.Args[1] destptr := v.Args[0] mem := v.Args[1] + if !(t.(*types.Type).Alignment()%4 == 0) { + break + } v.reset(OpPPC64MOVDstorezero) v.AddArg(destptr) v.AddArg(mem) return true } - // match: (Zero [12] destptr mem) + // match: (Zero [8] destptr mem) // cond: - // result: (MOVWstorezero [8] destptr (MOVDstorezero [0] destptr mem)) + // result: (MOVWstorezero [4] destptr (MOVWstorezero [0] destptr mem)) for { - if v.AuxInt != 12 { + if v.AuxInt != 8 { break } _ = v.Args[1] destptr := v.Args[0] mem := v.Args[1] v.reset(OpPPC64MOVWstorezero) - v.AuxInt = 8 + v.AuxInt = 4 v.AddArg(destptr) - v0 := b.NewValue0(v.Pos, OpPPC64MOVDstorezero, types.TypeMem) + v0 := b.NewValue0(v.Pos, OpPPC64MOVWstorezero, types.TypeMem) v0.AuxInt = 0 v0.AddArg(destptr) v0.AddArg(mem) @@ -10767,17 +10771,21 @@ func rewriteValuePPC64_OpZero_0(v *Value) bool { func rewriteValuePPC64_OpZero_10(v *Value) bool { b := v.Block _ = b - // match: (Zero [16] destptr mem) - // cond: - // result: (MOVDstorezero [8] destptr (MOVDstorezero [0] destptr mem)) + // match: (Zero [12] {t} destptr mem) + // cond: t.(*types.Type).Alignment()%4 == 0 + // result: (MOVWstorezero [8] destptr (MOVDstorezero [0] destptr mem)) for { - if v.AuxInt != 16 { + if v.AuxInt != 12 { break } + t := v.Aux _ = v.Args[1] destptr := v.Args[0] mem := v.Args[1] - v.reset(OpPPC64MOVDstorezero) + if !(t.(*types.Type).Alignment()%4 == 0) { + break + } + v.reset(OpPPC64MOVWstorezero) v.AuxInt = 8 v.AddArg(destptr) v0 := b.NewValue0(v.Pos, OpPPC64MOVDstorezero, types.TypeMem) @@ -10787,161 +10795,85 @@ func rewriteValuePPC64_OpZero_10(v *Value) bool { v.AddArg(v0) return true } - // match: (Zero [24] destptr mem) - // cond: - // result: (MOVDstorezero [16] destptr (MOVDstorezero [8] destptr (MOVDstorezero [0] destptr mem))) + // match: (Zero [16] {t} destptr mem) + // cond: t.(*types.Type).Alignment()%4 == 0 + // result: (MOVDstorezero [8] destptr (MOVDstorezero [0] destptr mem)) for { - if v.AuxInt != 24 { + if v.AuxInt != 16 { break } + t := v.Aux _ = v.Args[1] destptr := v.Args[0] mem := v.Args[1] - v.reset(OpPPC64MOVDstorezero) - v.AuxInt = 16 - v.AddArg(destptr) - v0 := b.NewValue0(v.Pos, OpPPC64MOVDstorezero, types.TypeMem) - v0.AuxInt = 8 - v0.AddArg(destptr) - v1 := b.NewValue0(v.Pos, OpPPC64MOVDstorezero, types.TypeMem) - v1.AuxInt = 0 - v1.AddArg(destptr) - v1.AddArg(mem) - v0.AddArg(v1) - v.AddArg(v0) - return true - } - // match: (Zero [32] destptr mem) - // cond: - // result: (MOVDstorezero [24] destptr (MOVDstorezero [16] destptr (MOVDstorezero [8] destptr (MOVDstorezero [0] destptr mem)))) - for { - if v.AuxInt != 32 { + if !(t.(*types.Type).Alignment()%4 == 0) { break } - _ = v.Args[1] - destptr := v.Args[0] - mem := v.Args[1] v.reset(OpPPC64MOVDstorezero) - v.AuxInt = 24 + v.AuxInt = 8 v.AddArg(destptr) v0 := b.NewValue0(v.Pos, OpPPC64MOVDstorezero, types.TypeMem) - v0.AuxInt = 16 + v0.AuxInt = 0 v0.AddArg(destptr) - v1 := b.NewValue0(v.Pos, OpPPC64MOVDstorezero, types.TypeMem) - v1.AuxInt = 8 - v1.AddArg(destptr) - v2 := b.NewValue0(v.Pos, OpPPC64MOVDstorezero, types.TypeMem) - v2.AuxInt = 0 - v2.AddArg(destptr) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) + v0.AddArg(mem) v.AddArg(v0) return true } - // match: (Zero [40] destptr mem) - // cond: - // result: (MOVDstorezero [32] destptr (MOVDstorezero [24] destptr (MOVDstorezero [16] destptr (MOVDstorezero [8] destptr (MOVDstorezero [0] destptr mem))))) + // match: (Zero [24] {t} destptr mem) + // cond: t.(*types.Type).Alignment()%4 == 0 + // result: (MOVDstorezero [16] destptr (MOVDstorezero [8] destptr (MOVDstorezero [0] destptr mem))) for { - if v.AuxInt != 40 { + if v.AuxInt != 24 { break } + t := v.Aux _ = v.Args[1] destptr := v.Args[0] mem := v.Args[1] - v.reset(OpPPC64MOVDstorezero) - v.AuxInt = 32 - v.AddArg(destptr) - v0 := b.NewValue0(v.Pos, OpPPC64MOVDstorezero, types.TypeMem) - v0.AuxInt = 24 - v0.AddArg(destptr) - v1 := b.NewValue0(v.Pos, OpPPC64MOVDstorezero, types.TypeMem) - v1.AuxInt = 16 - v1.AddArg(destptr) - v2 := b.NewValue0(v.Pos, OpPPC64MOVDstorezero, types.TypeMem) - v2.AuxInt = 8 - v2.AddArg(destptr) - v3 := b.NewValue0(v.Pos, OpPPC64MOVDstorezero, types.TypeMem) - v3.AuxInt = 0 - v3.AddArg(destptr) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) - return true - } - // match: (Zero [48] destptr mem) - // cond: - // result: (MOVDstorezero [40] destptr (MOVDstorezero [32] destptr (MOVDstorezero [24] destptr (MOVDstorezero [16] destptr (MOVDstorezero [8] destptr (MOVDstorezero [0] destptr mem)))))) - for { - if v.AuxInt != 48 { + if !(t.(*types.Type).Alignment()%4 == 0) { break } - _ = v.Args[1] - destptr := v.Args[0] - mem := v.Args[1] v.reset(OpPPC64MOVDstorezero) - v.AuxInt = 40 + v.AuxInt = 16 v.AddArg(destptr) v0 := b.NewValue0(v.Pos, OpPPC64MOVDstorezero, types.TypeMem) - v0.AuxInt = 32 + v0.AuxInt = 8 v0.AddArg(destptr) v1 := b.NewValue0(v.Pos, OpPPC64MOVDstorezero, types.TypeMem) - v1.AuxInt = 24 + v1.AuxInt = 0 v1.AddArg(destptr) - v2 := b.NewValue0(v.Pos, OpPPC64MOVDstorezero, types.TypeMem) - v2.AuxInt = 16 - v2.AddArg(destptr) - v3 := b.NewValue0(v.Pos, OpPPC64MOVDstorezero, types.TypeMem) - v3.AuxInt = 8 - v3.AddArg(destptr) - v4 := b.NewValue0(v.Pos, OpPPC64MOVDstorezero, types.TypeMem) - v4.AuxInt = 0 - v4.AddArg(destptr) - v4.AddArg(mem) - v3.AddArg(v4) - v2.AddArg(v3) - v1.AddArg(v2) + v1.AddArg(mem) v0.AddArg(v1) v.AddArg(v0) return true } - // match: (Zero [56] destptr mem) - // cond: - // result: (MOVDstorezero [48] destptr (MOVDstorezero [40] destptr (MOVDstorezero [32] destptr (MOVDstorezero [24] destptr (MOVDstorezero [16] destptr (MOVDstorezero [8] destptr (MOVDstorezero [0] destptr mem))))))) + // match: (Zero [32] {t} destptr mem) + // cond: t.(*types.Type).Alignment()%4 == 0 + // result: (MOVDstorezero [24] destptr (MOVDstorezero [16] destptr (MOVDstorezero [8] destptr (MOVDstorezero [0] destptr mem)))) for { - if v.AuxInt != 56 { + if v.AuxInt != 32 { break } + t := v.Aux _ = v.Args[1] destptr := v.Args[0] mem := v.Args[1] + if !(t.(*types.Type).Alignment()%4 == 0) { + break + } v.reset(OpPPC64MOVDstorezero) - v.AuxInt = 48 + v.AuxInt = 24 v.AddArg(destptr) v0 := b.NewValue0(v.Pos, OpPPC64MOVDstorezero, types.TypeMem) - v0.AuxInt = 40 + v0.AuxInt = 16 v0.AddArg(destptr) v1 := b.NewValue0(v.Pos, OpPPC64MOVDstorezero, types.TypeMem) - v1.AuxInt = 32 + v1.AuxInt = 8 v1.AddArg(destptr) v2 := b.NewValue0(v.Pos, OpPPC64MOVDstorezero, types.TypeMem) - v2.AuxInt = 24 + v2.AuxInt = 0 v2.AddArg(destptr) - v3 := b.NewValue0(v.Pos, OpPPC64MOVDstorezero, types.TypeMem) - v3.AuxInt = 16 - v3.AddArg(destptr) - v4 := b.NewValue0(v.Pos, OpPPC64MOVDstorezero, types.TypeMem) - v4.AuxInt = 8 - v4.AddArg(destptr) - v5 := b.NewValue0(v.Pos, OpPPC64MOVDstorezero, types.TypeMem) - v5.AuxInt = 0 - v5.AddArg(destptr) - v5.AddArg(mem) - v4.AddArg(v5) - v3.AddArg(v4) - v2.AddArg(v3) + v2.AddArg(mem) v1.AddArg(v2) v0.AddArg(v1) v.AddArg(v0) diff --git a/src/cmd/compile/internal/syntax/nodes_test.go b/src/cmd/compile/internal/syntax/nodes_test.go index be9d5d897c..1bba9eeacf 100644 --- a/src/cmd/compile/internal/syntax/nodes_test.go +++ b/src/cmd/compile/internal/syntax/nodes_test.go @@ -291,7 +291,7 @@ func testPos(t *testing.T, list []test, prefix, suffix string, extract func(*Fil } // build syntaxt tree - file, err := ParseBytes(nil, []byte(src), nil, nil, 0) + file, err := ParseBytes(nil, []byte(src), nil, nil, nil, 0) if err != nil { t.Errorf("parse error: %s: %v (%s)", src, err, test.nodetyp) continue diff --git a/src/cmd/compile/internal/syntax/parser.go b/src/cmd/compile/internal/syntax/parser.go index bcf56d5faa..b9129b0d9c 100644 --- a/src/cmd/compile/internal/syntax/parser.go +++ b/src/cmd/compile/internal/syntax/parser.go @@ -16,9 +16,10 @@ const debug = false const trace = false type parser struct { - base *src.PosBase - errh ErrorHandler - mode Mode + base *src.PosBase + errh ErrorHandler + fileh FilenameHandler + mode Mode scanner first error // first error encountered @@ -29,9 +30,10 @@ type parser struct { indent []byte // tracing support } -func (p *parser) init(base *src.PosBase, r io.Reader, errh ErrorHandler, pragh PragmaHandler, mode Mode) { +func (p *parser) init(base *src.PosBase, r io.Reader, errh ErrorHandler, pragh PragmaHandler, fileh FilenameHandler, mode Mode) { p.base = base p.errh = errh + p.fileh = fileh p.mode = mode p.scanner.init( r, @@ -76,7 +78,11 @@ func (p *parser) updateBase(line, col uint, text string) { p.error_at(p.pos_at(line, col+uint(i+1)), "invalid line number: "+nstr) return } - p.base = src.NewLinePragmaBase(src.MakePos(p.base.Pos().Base(), line, col), text[:i], uint(n)) + absFile := text[:i] + if p.fileh != nil { + absFile = p.fileh(absFile) + } + p.base = src.NewLinePragmaBase(src.MakePos(p.base.Pos().Base(), line, col), absFile, uint(n)) } func (p *parser) got(tok token) bool { diff --git a/src/cmd/compile/internal/syntax/parser_test.go b/src/cmd/compile/internal/syntax/parser_test.go index 4c317dab60..0478088ec8 100644 --- a/src/cmd/compile/internal/syntax/parser_test.go +++ b/src/cmd/compile/internal/syntax/parser_test.go @@ -131,7 +131,7 @@ func verifyPrint(filename string, ast1 *File) { panic(err) } - ast2, err := ParseBytes(src.NewFileBase(filename, filename), buf1.Bytes(), nil, nil, 0) + ast2, err := ParseBytes(src.NewFileBase(filename, filename), buf1.Bytes(), nil, nil, nil, 0) if err != nil { panic(err) } @@ -155,7 +155,7 @@ func verifyPrint(filename string, ast1 *File) { } func TestIssue17697(t *testing.T) { - _, err := ParseBytes(nil, nil, nil, nil, 0) // return with parser error, don't panic + _, err := ParseBytes(nil, nil, nil, nil, nil, 0) // return with parser error, don't panic if err == nil { t.Errorf("no error reported") } @@ -199,8 +199,16 @@ func TestLineDirectives(t *testing.T) { // test effect of //line directive on (relative) position information {"//line foo:123\n foo", "syntax error: package statement must be first", "foo", 123 - linebase, 3}, {"//line foo:123\n//line bar:345\nfoo", "syntax error: package statement must be first", "bar", 345 - linebase, 0}, + + {"//line " + runtime.GOROOT() + "/src/a/a.go:123\n foo", "syntax error: package statement must be first", "$GOROOT/src/a/a.go", 123 - linebase, 3}, } { - _, err := ParseBytes(nil, []byte(test.src), nil, nil, 0) + fileh := func(name string) string { + if strings.HasPrefix(name, runtime.GOROOT()) { + return "$GOROOT" + name[len(runtime.GOROOT()):] + } + return name + } + _, err := ParseBytes(nil, []byte(test.src), nil, nil, fileh, 0) if err == nil { t.Errorf("%s: no error reported", test.src) continue diff --git a/src/cmd/compile/internal/syntax/printer_test.go b/src/cmd/compile/internal/syntax/printer_test.go index 14652f4ac6..bbf75a957d 100644 --- a/src/cmd/compile/internal/syntax/printer_test.go +++ b/src/cmd/compile/internal/syntax/printer_test.go @@ -29,7 +29,7 @@ func TestPrintString(t *testing.T) { "package p; type _ = int; type T1 = struct{}; type ( _ = *struct{}; T2 = float32 )", // TODO(gri) expand } { - ast, err := ParseBytes(nil, []byte(want), nil, nil, 0) + ast, err := ParseBytes(nil, []byte(want), nil, nil, nil, 0) if err != nil { t.Error(err) continue diff --git a/src/cmd/compile/internal/syntax/scanner_test.go b/src/cmd/compile/internal/syntax/scanner_test.go index e434db9a91..53995e0c79 100644 --- a/src/cmd/compile/internal/syntax/scanner_test.go +++ b/src/cmd/compile/internal/syntax/scanner_test.go @@ -7,6 +7,7 @@ package syntax import ( "fmt" "os" + "strings" "testing" ) @@ -367,3 +368,15 @@ func TestScanErrors(t *testing.T) { } } } + +func TestIssue21938(t *testing.T) { + s := "/*" + strings.Repeat(" ", 4089) + "*/ .5" + + var got scanner + got.init(strings.NewReader(s), nil, nil) + got.next() + + if got.tok != _Literal || got.lit != ".5" { + t.Errorf("got %s %q; want %s %q", got.tok, got.lit, _Literal, ".5") + } +} diff --git a/src/cmd/compile/internal/syntax/source.go b/src/cmd/compile/internal/syntax/source.go index 93547213c0..4e3551225a 100644 --- a/src/cmd/compile/internal/syntax/source.go +++ b/src/cmd/compile/internal/syntax/source.go @@ -164,11 +164,12 @@ func (s *source) fill() { s.lit = append(s.lit, s.buf[s.suf:s.r0]...) s.suf = 1 // == s.r0 after slide below } - s.offs += s.r0 - 1 - r := s.r - s.r0 + 1 // last read char plus one byte - s.w = r + copy(s.buf[r:], s.buf[s.r:s.w]) - s.r = r - s.r0 = 1 + n := s.r0 - 1 + copy(s.buf[:], s.buf[n:s.w]) + s.offs += n + s.r0 = 1 // eqv: s.r0 -= n + s.r -= n + s.w -= n } // read more data: try a limited number of times diff --git a/src/cmd/compile/internal/syntax/syntax.go b/src/cmd/compile/internal/syntax/syntax.go index ed5e254724..f58d5efd29 100644 --- a/src/cmd/compile/internal/syntax/syntax.go +++ b/src/cmd/compile/internal/syntax/syntax.go @@ -39,11 +39,15 @@ type ErrorHandler func(err error) // appropriate. type Pragma uint16 -// A PragmaHandler is used to process //line and //go: directives as +// A PragmaHandler is used to process //go: directives as // they're scanned. The returned Pragma value will be unioned into the // next FuncDecl node. type PragmaHandler func(pos src.Pos, text string) Pragma +// A FilenameHandler is used to process each filename encountered +// in //line directives. The returned value is used as the absolute filename. +type FilenameHandler func(name string) string + // Parse parses a single Go source file from src and returns the corresponding // syntax tree. If there are errors, Parse will return the first error found, // and a possibly partially constructed syntax tree, or nil if no correct package @@ -55,8 +59,11 @@ type PragmaHandler func(pos src.Pos, text string) Pragma // // If a PragmaHandler is provided, it is called with each pragma encountered. // +// If a FilenameHandler is provided, it is called to process each filename +// encountered in //line directives. +// // The Mode argument is currently ignored. -func Parse(base *src.PosBase, src io.Reader, errh ErrorHandler, pragh PragmaHandler, mode Mode) (_ *File, first error) { +func Parse(base *src.PosBase, src io.Reader, errh ErrorHandler, pragh PragmaHandler, fileh FilenameHandler, mode Mode) (_ *File, first error) { defer func() { if p := recover(); p != nil { if err, ok := p.(Error); ok { @@ -68,14 +75,14 @@ func Parse(base *src.PosBase, src io.Reader, errh ErrorHandler, pragh PragmaHand }() var p parser - p.init(base, src, errh, pragh, mode) + p.init(base, src, errh, pragh, fileh, mode) p.next() return p.fileOrNil(), p.first } // ParseBytes behaves like Parse but it reads the source from the []byte slice provided. -func ParseBytes(base *src.PosBase, src []byte, errh ErrorHandler, pragh PragmaHandler, mode Mode) (*File, error) { - return Parse(base, &bytesReader{src}, errh, pragh, mode) +func ParseBytes(base *src.PosBase, src []byte, errh ErrorHandler, pragh PragmaHandler, fileh FilenameHandler, mode Mode) (*File, error) { + return Parse(base, &bytesReader{src}, errh, pragh, fileh, mode) } type bytesReader struct { @@ -101,5 +108,5 @@ func ParseFile(filename string, errh ErrorHandler, pragh PragmaHandler, mode Mod return nil, err } defer f.Close() - return Parse(src.NewFileBase(filename, filename), f, errh, pragh, mode) + return Parse(src.NewFileBase(filename, filename), f, errh, pragh, nil, mode) } diff --git a/src/cmd/go/go_test.go b/src/cmd/go/go_test.go index 25c1846921..a12df2988c 100644 --- a/src/cmd/go/go_test.go +++ b/src/cmd/go/go_test.go @@ -1164,7 +1164,7 @@ func testMove(t *testing.T, vcs, url, base, config string) { tg.runFail("get", "-d", "-u", url) tg.grepStderr("is a custom import path for", "go get -d -u "+url+" failed for wrong reason") tg.runFail("get", "-d", "-f", "-u", url) - tg.grepStderr("validating server certificate|not found", "go get -d -f -u "+url+" failed for wrong reason") + tg.grepStderr("validating server certificate|[nN]ot [fF]ound", "go get -d -f -u "+url+" failed for wrong reason") } func TestInternalPackageErrorsAreHandled(t *testing.T) { @@ -1185,10 +1185,9 @@ func TestMoveGit(t *testing.T) { testMove(t, "git", "rsc.io/pdf", "pdf", "rsc.io/pdf/.git/config") } -// TODO(rsc): Set up a test case on bitbucket for hg. -// func TestMoveHG(t *testing.T) { -// testMove(t, "hg", "rsc.io/x86/x86asm", "x86", "rsc.io/x86/.hg/hgrc") -// } +func TestMoveHG(t *testing.T) { + testMove(t, "hg", "vcs-test.golang.org/go/custom-hg-hello", "custom-hg-hello", "vcs-test.golang.org/go/custom-hg-hello/.hg/hgrc") +} // TODO(rsc): Set up a test case on SourceForge (?) for svn. // func testMoveSVN(t *testing.T) { @@ -1317,6 +1316,25 @@ func TestGetGitDefaultBranch(t *testing.T) { tg.grepStdout(`\* another-branch`, "not on correct default branch") } +func TestAccidentalGitCheckout(t *testing.T) { + testenv.MustHaveExternalNetwork(t) + if _, err := exec.LookPath("git"); err != nil { + t.Skip("skipping because git binary not found") + } + + tg := testgo(t) + defer tg.cleanup() + tg.parallel() + tg.tempDir("src") + tg.setenv("GOPATH", tg.path(".")) + + tg.runFail("get", "-u", "vcs-test.golang.org/go/test1-svn-git") + tg.grepStderr("src[\\\\/]vcs-test.* uses git, but parent .*src[\\\\/]vcs-test.* uses svn", "get did not fail for right reason") + + tg.runFail("get", "-u", "vcs-test.golang.org/go/test2-svn-git/test2main") + tg.grepStderr("src[\\\\/]vcs-test.* uses git, but parent .*src[\\\\/]vcs-test.* uses svn", "get did not fail for right reason") +} + func TestErrorMessageForSyntaxErrorInTestGoFileSaysFAIL(t *testing.T) { tg := testgo(t) defer tg.cleanup() @@ -2829,7 +2847,7 @@ func TestImportMain(t *testing.T) { func TestFoo(t *testing.T) {} `) tg.setenv("GOPATH", tg.path(".")) - tg.creatingTemp("x") + tg.creatingTemp("x" + exeSuffix) tg.run("build", "x") tg.run("test", "x") diff --git a/src/cmd/go/internal/get/get.go b/src/cmd/go/internal/get/get.go index 550321198d..e5dda643e4 100644 --- a/src/cmd/go/internal/get/get.go +++ b/src/cmd/go/internal/get/get.go @@ -439,6 +439,11 @@ func downloadPackage(p *load.Package) error { p.Internal.Build.PkgRoot = filepath.Join(list[0], "pkg") } root := filepath.Join(p.Internal.Build.SrcRoot, filepath.FromSlash(rootPath)) + + if err := checkNestedVCS(vcs, root, p.Internal.Build.SrcRoot); err != nil { + return err + } + // If we've considered this repository already, don't do it again. if downloadRootCache[root] { return nil diff --git a/src/cmd/go/internal/get/vcs.go b/src/cmd/go/internal/get/vcs.go index 71d0b51344..86d2e32efb 100644 --- a/src/cmd/go/internal/get/vcs.go +++ b/src/cmd/go/internal/get/vcs.go @@ -506,11 +506,28 @@ func vcsFromDir(dir, srcRoot string) (vcs *vcsCmd, root string, err error) { return nil, "", fmt.Errorf("directory %q is outside source root %q", dir, srcRoot) } + var vcsRet *vcsCmd + var rootRet string + origDir := dir for len(dir) > len(srcRoot) { for _, vcs := range vcsList { if _, err := os.Stat(filepath.Join(dir, "."+vcs.cmd)); err == nil { - return vcs, filepath.ToSlash(dir[len(srcRoot)+1:]), nil + root := filepath.ToSlash(dir[len(srcRoot)+1:]) + // Record first VCS we find, but keep looking, + // to detect mistakes like one kind of VCS inside another. + if vcsRet == nil { + vcsRet = vcs + rootRet = root + continue + } + // Allow .git inside .git, which can arise due to submodules. + if vcsRet == vcs && vcs.cmd == "git" { + continue + } + // Otherwise, we have one VCS inside a different VCS. + return nil, "", fmt.Errorf("directory %q uses %s, but parent %q uses %s", + filepath.Join(srcRoot, rootRet), vcsRet.cmd, filepath.Join(srcRoot, root), vcs.cmd) } } @@ -523,9 +540,48 @@ func vcsFromDir(dir, srcRoot string) (vcs *vcsCmd, root string, err error) { dir = ndir } + if vcsRet != nil { + return vcsRet, rootRet, nil + } + return nil, "", fmt.Errorf("directory %q is not using a known version control system", origDir) } +// checkNestedVCS checks for an incorrectly-nested VCS-inside-VCS +// situation for dir, checking parents up until srcRoot. +func checkNestedVCS(vcs *vcsCmd, dir, srcRoot string) error { + if len(dir) <= len(srcRoot) || dir[len(srcRoot)] != filepath.Separator { + return fmt.Errorf("directory %q is outside source root %q", dir, srcRoot) + } + + otherDir := dir + for len(otherDir) > len(srcRoot) { + for _, otherVCS := range vcsList { + if _, err := os.Stat(filepath.Join(otherDir, "."+otherVCS.cmd)); err == nil { + // Allow expected vcs in original dir. + if otherDir == dir && otherVCS == vcs { + continue + } + // Allow .git inside .git, which can arise due to submodules. + if otherVCS == vcs && vcs.cmd == "git" { + continue + } + // Otherwise, we have one VCS inside a different VCS. + return fmt.Errorf("directory %q uses %s, but parent %q uses %s", dir, vcs.cmd, otherDir, otherVCS.cmd) + } + } + // Move to parent. + newDir := filepath.Dir(otherDir) + if len(newDir) >= len(otherDir) { + // Shouldn't happen, but just in case, stop. + break + } + otherDir = newDir + } + + return nil +} + // repoRoot represents a version control system, a repo, and a root of // where to put it on disk. type repoRoot struct { diff --git a/src/cmd/internal/obj/x86/asm6.go b/src/cmd/internal/obj/x86/asm6.go index bcf9318e2e..5f3a8c45d5 100644 --- a/src/cmd/internal/obj/x86/asm6.go +++ b/src/cmd/internal/obj/x86/asm6.go @@ -2269,6 +2269,13 @@ func oclass(ctxt *obj.Link, p *obj.Prog, a *obj.Addr) int { return Yxxx } if ctxt.Arch.Family == sys.AMD64 { + // Offset must fit in a 32-bit signed field (or fit in a 32-bit unsigned field + // where the sign extension doesn't matter). + // Note: The latter happens only in assembly, for example crypto/sha1/sha1block_amd64.s. + if !(a.Offset == int64(int32(a.Offset)) || + a.Offset == int64(uint32(a.Offset)) && p.As == ALEAL) { + return Yxxx + } switch a.Name { case obj.NAME_EXTERN, obj.NAME_STATIC, obj.NAME_GOTREF: // Global variables can't use index registers and their diff --git a/src/cmd/internal/objabi/line.go b/src/cmd/internal/objabi/line.go index ed509b7001..1c671b211f 100644 --- a/src/cmd/internal/objabi/line.go +++ b/src/cmd/internal/objabi/line.go @@ -44,7 +44,7 @@ func AbsFile(dir, file, pathPrefix string) string { abs = "??" } - return filepath.Clean(abs) + return abs } // Does s have t as a path prefix? diff --git a/src/crypto/x509/root_darwin_test.go b/src/crypto/x509/root_darwin_test.go index 2784ce2f0f..d935cc4e9a 100644 --- a/src/crypto/x509/root_darwin_test.go +++ b/src/crypto/x509/root_darwin_test.go @@ -16,6 +16,11 @@ func TestSystemRoots(t *testing.T) { t.Skipf("skipping on %s/%s, no system root", runtime.GOOS, runtime.GOARCH) } + switch runtime.GOOS { + case "darwin": + t.Skipf("skipping on %s/%s until cgo part of golang.org/issue/16532 has been implemented.", runtime.GOOS, runtime.GOARCH) + } + t0 := time.Now() sysRoots := systemRootsPool() // actual system roots sysRootsDuration := time.Since(t0) diff --git a/src/crypto/x509/verify.go b/src/crypto/x509/verify.go index 14183f8ff5..999cb08cf3 100644 --- a/src/crypto/x509/verify.go +++ b/src/crypto/x509/verify.go @@ -196,6 +196,10 @@ func matchNameConstraint(domain, constraint string) bool { // isValid performs validity checks on the c. func (c *Certificate) isValid(certType int, currentChain []*Certificate, opts *VerifyOptions) error { + if len(c.UnhandledCriticalExtensions) > 0 { + return UnhandledCriticalExtension{} + } + if len(currentChain) > 0 { child := currentChain[len(currentChain)-1] if !bytes.Equal(child.RawIssuer, c.RawSubject) { @@ -297,10 +301,6 @@ func (c *Certificate) Verify(opts VerifyOptions) (chains [][]*Certificate, err e return c.systemVerify(&opts) } - if len(c.UnhandledCriticalExtensions) > 0 { - return nil, UnhandledCriticalExtension{} - } - if opts.Roots == nil { opts.Roots = systemRootsPool() if opts.Roots == nil { diff --git a/src/crypto/x509/verify_test.go b/src/crypto/x509/verify_test.go index 335c477d0d..41e295d3e5 100644 --- a/src/crypto/x509/verify_test.go +++ b/src/crypto/x509/verify_test.go @@ -296,6 +296,30 @@ var verifyTests = []verifyTest{ errorCallback: expectNameConstraintsError, }, + { + // Test that unknown critical extensions in a leaf cause a + // verify error. + leaf: criticalExtLeafWithExt, + dnsName: "example.com", + intermediates: []string{criticalExtIntermediate}, + roots: []string{criticalExtRoot}, + currentTime: 1486684488, + systemSkip: true, + + errorCallback: expectUnhandledCriticalExtension, + }, + { + // Test that unknown critical extensions in an intermediate + // cause a verify error. + leaf: criticalExtLeaf, + dnsName: "example.com", + intermediates: []string{criticalExtIntermediateWithExt}, + roots: []string{criticalExtRoot}, + currentTime: 1486684488, + systemSkip: true, + + errorCallback: expectUnhandledCriticalExtension, + }, } func expectHostnameError(t *testing.T, i int, err error) (ok bool) { @@ -379,6 +403,14 @@ func expectNotAuthorizedError(t *testing.T, i int, err error) (ok bool) { return true } +func expectUnhandledCriticalExtension(t *testing.T, i int, err error) (ok bool) { + if _, ok := err.(UnhandledCriticalExtension); !ok { + t.Errorf("#%d: error was not an UnhandledCriticalExtension: %s", i, err) + return false + } + return true +} + func certificateFromPEM(pemBytes string) (*Certificate, error) { block, _ := pem.Decode([]byte(pemBytes)) if block == nil { @@ -1596,3 +1628,67 @@ w67CoNRb81dy+4Q1lGpA8ORoLWh5fIq2t2eNGc4qB8vlTIKiESzAwu7u3sRfuWQi 4R+gnfLd37FWflMHwztFbVTuNtPOljCX0LN7KcuoXYlr05RhQrmoN7fQHsrZMNLs 8FVjHdKKu+uPstwd04Uy4BR/H2y1yerN9j/L6ZkMl98iiA== -----END CERTIFICATE-----` + +const criticalExtRoot = `-----BEGIN CERTIFICATE----- +MIIBqzCCAVGgAwIBAgIJAJ+mI/85cXApMAoGCCqGSM49BAMCMB0xDDAKBgNVBAoT +A09yZzENMAsGA1UEAxMEUm9vdDAeFw0xNTAxMDEwMDAwMDBaFw0yNTAxMDEwMDAw +MDBaMB0xDDAKBgNVBAoTA09yZzENMAsGA1UEAxMEUm9vdDBZMBMGByqGSM49AgEG +CCqGSM49AwEHA0IABJGp9joiG2QSQA+1FczEDAsWo84rFiP3GTL+n+ugcS6TyNib +gzMsdbJgVi+a33y0SzLZxB+YvU3/4KTk8yKLC+2jejB4MA4GA1UdDwEB/wQEAwIC +BDAdBgNVHSUEFjAUBggrBgEFBQcDAQYIKwYBBQUHAwIwDwYDVR0TAQH/BAUwAwEB +/zAZBgNVHQ4EEgQQQDfXAftAL7gcflQEJ4xZATAbBgNVHSMEFDASgBBAN9cB+0Av +uBx+VAQnjFkBMAoGCCqGSM49BAMCA0gAMEUCIFeSV00fABFceWR52K+CfIgOHotY +FizzGiLB47hGwjMuAiEA8e0um2Kr8FPQ4wmFKaTRKHMaZizCGl3m+RG5QsE1KWo= +-----END CERTIFICATE-----` + +const criticalExtIntermediate = `-----BEGIN CERTIFICATE----- +MIIBszCCAVmgAwIBAgIJAL2kcGZKpzVqMAoGCCqGSM49BAMCMB0xDDAKBgNVBAoT +A09yZzENMAsGA1UEAxMEUm9vdDAeFw0xNTAxMDEwMDAwMDBaFw0yNTAxMDEwMDAw +MDBaMCUxDDAKBgNVBAoTA09yZzEVMBMGA1UEAxMMSW50ZXJtZWRpYXRlMFkwEwYH +KoZIzj0CAQYIKoZIzj0DAQcDQgAESqVq92iPEq01cL4o99WiXDc5GZjpjNlzMS1n +rk8oHcVDp4tQRRQG3F4A6dF1rn/L923ha3b0fhDLlAvXZB+7EKN6MHgwDgYDVR0P +AQH/BAQDAgIEMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjAPBgNVHRMB +Af8EBTADAQH/MBkGA1UdDgQSBBCMGmiotXbbXVd7H40UsgajMBsGA1UdIwQUMBKA +EEA31wH7QC+4HH5UBCeMWQEwCgYIKoZIzj0EAwIDSAAwRQIhAOhhNRb6KV7h3wbE +cdap8bojzvUcPD78fbsQPCNw1jPxAiBOeAJhlTwpKn9KHpeJphYSzydj9NqcS26Y +xXbdbm27KQ== +-----END CERTIFICATE-----` + +const criticalExtLeafWithExt = `-----BEGIN CERTIFICATE----- +MIIBxTCCAWugAwIBAgIJAJZAUtw5ccb1MAoGCCqGSM49BAMCMCUxDDAKBgNVBAoT +A09yZzEVMBMGA1UEAxMMSW50ZXJtZWRpYXRlMB4XDTE1MDEwMTAwMDAwMFoXDTI1 +MDEwMTAwMDAwMFowJDEMMAoGA1UEChMDT3JnMRQwEgYDVQQDEwtleGFtcGxlLmNv +bTBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABF3ABa2+B6gUyg6ayCaRQWYY/+No +6PceLqEavZNUeVNuz7bS74Toy8I7R3bGMkMgbKpLSPlPTroAATvebTXoBaijgYQw +gYEwDgYDVR0PAQH/BAQDAgWgMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcD +AjAMBgNVHRMBAf8EAjAAMBkGA1UdDgQSBBBRNtBL2vq8nCV3qVp7ycxMMBsGA1Ud +IwQUMBKAEIwaaKi1dttdV3sfjRSyBqMwCgYDUQMEAQH/BAAwCgYIKoZIzj0EAwID +SAAwRQIgVjy8GBgZFiagexEuDLqtGjIRJQtBcf7lYgf6XFPH1h4CIQCT6nHhGo6E +I+crEm4P5q72AnA/Iy0m24l7OvLuXObAmg== +-----END CERTIFICATE-----` + +const criticalExtIntermediateWithExt = `-----BEGIN CERTIFICATE----- +MIIB2TCCAX6gAwIBAgIIQD3NrSZtcUUwCgYIKoZIzj0EAwIwHTEMMAoGA1UEChMD +T3JnMQ0wCwYDVQQDEwRSb290MB4XDTE1MDEwMTAwMDAwMFoXDTI1MDEwMTAwMDAw +MFowPTEMMAoGA1UEChMDT3JnMS0wKwYDVQQDEyRJbnRlcm1lZGlhdGUgd2l0aCBD +cml0aWNhbCBFeHRlbnNpb24wWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAQtnmzH +mcRm10bdDBnJE7xQEJ25cLCL5okuEphRR0Zneo6+nQZikoh+UBbtt5GV3Dms7LeP +oF5HOplYDCd8wi/wo4GHMIGEMA4GA1UdDwEB/wQEAwICBDAdBgNVHSUEFjAUBggr +BgEFBQcDAQYIKwYBBQUHAwIwDwYDVR0TAQH/BAUwAwEB/zAZBgNVHQ4EEgQQKxdv +UuQZ6sO3XvBsxgNZ3zAbBgNVHSMEFDASgBBAN9cB+0AvuBx+VAQnjFkBMAoGA1ED +BAEB/wQAMAoGCCqGSM49BAMCA0kAMEYCIQCQzTPd6XKex+OAPsKT/1DsoMsg8vcG +c2qZ4Q0apT/kvgIhAKu2TnNQMIUdcO0BYQIl+Uhxc78dc9h4lO+YJB47pHGx +-----END CERTIFICATE-----` + +const criticalExtLeaf = `-----BEGIN CERTIFICATE----- +MIIBzzCCAXWgAwIBAgIJANoWFIlhCI9MMAoGCCqGSM49BAMCMD0xDDAKBgNVBAoT +A09yZzEtMCsGA1UEAxMkSW50ZXJtZWRpYXRlIHdpdGggQ3JpdGljYWwgRXh0ZW5z +aW9uMB4XDTE1MDEwMTAwMDAwMFoXDTI1MDEwMTAwMDAwMFowJDEMMAoGA1UEChMD +T3JnMRQwEgYDVQQDEwtleGFtcGxlLmNvbTBZMBMGByqGSM49AgEGCCqGSM49AwEH +A0IABG1Lfh8A0Ho2UvZN5H0+ONil9c8jwtC0y0xIZftyQE+Fwr9XwqG3rV2g4M1h +GnJa9lV9MPHg8+b85Hixm0ZSw7SjdzB1MA4GA1UdDwEB/wQEAwIFoDAdBgNVHSUE +FjAUBggrBgEFBQcDAQYIKwYBBQUHAwIwDAYDVR0TAQH/BAIwADAZBgNVHQ4EEgQQ +UNhY4JhezH9gQYqvDMWrWDAbBgNVHSMEFDASgBArF29S5Bnqw7de8GzGA1nfMAoG +CCqGSM49BAMCA0gAMEUCIQClA3d4tdrDu9Eb5ZBpgyC+fU1xTZB0dKQHz6M5fPZA +2AIgN96lM+CPGicwhN24uQI6flOsO3H0TJ5lNzBYLtnQtlc= +-----END CERTIFICATE-----` diff --git a/src/crypto/x509/x509_test.go b/src/crypto/x509/x509_test.go index 2d1acf93bf..b800191db3 100644 --- a/src/crypto/x509/x509_test.go +++ b/src/crypto/x509/x509_test.go @@ -523,74 +523,6 @@ func TestCreateSelfSignedCertificate(t *testing.T) { } } -func TestUnknownCriticalExtension(t *testing.T) { - priv, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) - if err != nil { - t.Fatalf("Failed to generate ECDSA key: %s", err) - } - - oids := []asn1.ObjectIdentifier{ - // This OID is in the PKIX arc, but unknown. - {2, 5, 29, 999999}, - // This is a nonsense, unassigned OID. - {1, 2, 3, 4}, - } - - for _, oid := range oids { - template := Certificate{ - SerialNumber: big.NewInt(1), - Subject: pkix.Name{ - CommonName: "foo", - }, - NotBefore: time.Unix(1000, 0), - NotAfter: time.Now().AddDate(1, 0, 0), - - BasicConstraintsValid: true, - IsCA: true, - - KeyUsage: KeyUsageCertSign, - ExtKeyUsage: []ExtKeyUsage{ExtKeyUsageServerAuth}, - - ExtraExtensions: []pkix.Extension{ - { - Id: oid, - Critical: true, - Value: nil, - }, - }, - } - - derBytes, err := CreateCertificate(rand.Reader, &template, &template, &priv.PublicKey, priv) - if err != nil { - t.Fatalf("failed to create certificate: %s", err) - } - - cert, err := ParseCertificate(derBytes) - if err != nil { - t.Fatalf("Certificate with unknown critical extension was not parsed: %s", err) - } - - roots := NewCertPool() - roots.AddCert(cert) - - // Setting Roots ensures that Verify won't delegate to the OS - // library and thus the correct error should always be - // returned. - _, err = cert.Verify(VerifyOptions{Roots: roots}) - if err == nil { - t.Fatal("Certificate with unknown critical extension was verified without error") - } - if _, ok := err.(UnhandledCriticalExtension); !ok { - t.Fatalf("Error was %#v, but wanted one of type UnhandledCriticalExtension", err) - } - - cert.UnhandledCriticalExtensions = nil - if _, err = cert.Verify(VerifyOptions{Roots: roots}); err != nil { - t.Errorf("Certificate failed to verify after unhandled critical extensions were cleared: %s", err) - } - } -} - // Self-signed certificate using ECDSA with SHA1 & secp256r1 var ecdsaSHA1CertPem = ` -----BEGIN CERTIFICATE----- diff --git a/src/database/sql/fakedb_test.go b/src/database/sql/fakedb_test.go index 4dcd096ca4..8e77df4ace 100644 --- a/src/database/sql/fakedb_test.go +++ b/src/database/sql/fakedb_test.go @@ -943,6 +943,7 @@ type rowsCursor struct { } func (rc *rowsCursor) touchMem() { + rc.parentMem.touchMem() rc.line++ } diff --git a/src/database/sql/sql.go b/src/database/sql/sql.go index c609fe4cc4..89976c7fd0 100644 --- a/src/database/sql/sql.go +++ b/src/database/sql/sql.go @@ -2454,6 +2454,12 @@ func (rs *Rows) nextLocked() (doClose, ok bool) { if rs.lastcols == nil { rs.lastcols = make([]driver.Value, len(rs.rowsi.Columns())) } + + // Lock the driver connection before calling the driver interface + // rowsi to prevent a Tx from rolling back the connection at the same time. + rs.dc.Lock() + defer rs.dc.Unlock() + rs.lasterr = rs.rowsi.Next(rs.lastcols) if rs.lasterr != nil { // Close the connection if there is a driver error. @@ -2503,6 +2509,12 @@ func (rs *Rows) NextResultSet() bool { doClose = true return false } + + // Lock the driver connection before calling the driver interface + // rowsi to prevent a Tx from rolling back the connection at the same time. + rs.dc.Lock() + defer rs.dc.Unlock() + rs.lasterr = nextResultSet.NextResultSet() if rs.lasterr != nil { doClose = true diff --git a/src/database/sql/sql_test.go b/src/database/sql/sql_test.go index c935eb4348..dd59ab9853 100644 --- a/src/database/sql/sql_test.go +++ b/src/database/sql/sql_test.go @@ -3106,6 +3106,9 @@ func TestIssue6081(t *testing.T) { // In the test, a context is canceled while the query is in process so // the internal rollback will run concurrently with the explicitly called // Tx.Rollback. +// +// The addition of calling rows.Next also tests +// Issue 21117. func TestIssue18429(t *testing.T) { db := newTestDB(t, "people") defer closeDB(t, db) @@ -3116,7 +3119,7 @@ func TestIssue18429(t *testing.T) { const milliWait = 30 - for i := 0; i < 100; i++ { + for i := 0; i < 1000; i++ { sem <- true wg.Add(1) go func() { @@ -3138,6 +3141,9 @@ func TestIssue18429(t *testing.T) { // reported. rows, _ := tx.QueryContext(ctx, "WAIT|"+qwait+"|SELECT|people|name|") if rows != nil { + // Call Next to test Issue 21117 and check for races. + for rows.Next() { + } rows.Close() } // This call will race with the context cancel rollback to complete diff --git a/src/expvar/expvar.go b/src/expvar/expvar.go index 64dae70c62..8290e0bd72 100644 --- a/src/expvar/expvar.go +++ b/src/expvar/expvar.go @@ -125,7 +125,17 @@ func (v *Map) String() string { return b.String() } -func (v *Map) Init() *Map { return v } +// Init removes all keys from the map. +func (v *Map) Init() *Map { + v.keysMu.Lock() + defer v.keysMu.Unlock() + v.keys = v.keys[:0] + v.m.Range(func(k, _ interface{}) bool { + v.m.Delete(k) + return true + }) + return v +} // updateKeys updates the sorted list of keys in v.keys. func (v *Map) addKey(key string) { diff --git a/src/expvar/expvar_test.go b/src/expvar/expvar_test.go index 7014063d4f..728e763896 100644 --- a/src/expvar/expvar_test.go +++ b/src/expvar/expvar_test.go @@ -161,6 +161,28 @@ func BenchmarkStringSet(b *testing.B) { }) } +func TestMapInit(t *testing.T) { + RemoveAll() + colors := NewMap("bike-shed-colors") + colors.Add("red", 1) + colors.Add("blue", 1) + colors.Add("chartreuse", 1) + + n := 0 + colors.Do(func(KeyValue) { n++ }) + if n != 3 { + t.Errorf("after three Add calls with distinct keys, Do should invoke f 3 times; got %v", n) + } + + colors.Init() + + n = 0 + colors.Do(func(KeyValue) { n++ }) + if n != 0 { + t.Errorf("after Init, Do should invoke f 0 times; got %v", n) + } +} + func TestMapCounter(t *testing.T) { RemoveAll() colors := NewMap("bike-shed-colors") diff --git a/src/internal/poll/export_windows_test.go b/src/internal/poll/export_windows_test.go new file mode 100644 index 0000000000..88ed71ad84 --- /dev/null +++ b/src/internal/poll/export_windows_test.go @@ -0,0 +1,17 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Export guts for testing on windows. +// Since testing imports os and os imports internal/poll, +// the internal/poll tests can not be in package poll. + +package poll + +var ( + LogInitFD = &logInitFD +) + +func (fd *FD) IsPartOfNetpoll() bool { + return fd.pd.runtimeCtx != 0 +} diff --git a/src/internal/poll/fd_unix.go b/src/internal/poll/fd_unix.go index c40c701f59..d9538e364b 100644 --- a/src/internal/poll/fd_unix.go +++ b/src/internal/poll/fd_unix.go @@ -42,6 +42,7 @@ type FD struct { // This can be called multiple times on a single FD. // The net argument is a network name from the net package (e.g., "tcp"), // or "file". +// Set pollable to true if fd should be managed by runtime netpoll. func (fd *FD) Init(net string, pollable bool) error { // We don't actually care about the various network types. if net == "file" { diff --git a/src/internal/poll/fd_windows.go b/src/internal/poll/fd_windows.go index 655f9348c6..27fef04be0 100644 --- a/src/internal/poll/fd_windows.go +++ b/src/internal/poll/fd_windows.go @@ -31,11 +31,40 @@ var ( // package uses CancelIoEx API, if present, otherwise it fallback // to CancelIo. -var ( - canCancelIO bool // determines if CancelIoEx API is present - skipSyncNotif bool - hasLoadSetFileCompletionNotificationModes bool -) +var canCancelIO bool // determines if CancelIoEx API is present + +// This package uses SetFileCompletionNotificationModes Windows API +// to skip calling GetQueuedCompletionStatus if an IO operation completes +// synchronously. Unfortuently SetFileCompletionNotificationModes is not +// available on Windows XP. Also there is a known bug where +// SetFileCompletionNotificationModes crashes on some systems +// (see http://support.microsoft.com/kb/2568167 for details). + +var useSetFileCompletionNotificationModes bool // determines is SetFileCompletionNotificationModes is present and safe to use + +// checkSetFileCompletionNotificationModes verifies that +// SetFileCompletionNotificationModes Windows API is present +// on the system and is safe to use. +// See http://support.microsoft.com/kb/2568167 for details. +func checkSetFileCompletionNotificationModes() { + err := syscall.LoadSetFileCompletionNotificationModes() + if err != nil { + return + } + protos := [2]int32{syscall.IPPROTO_TCP, 0} + var buf [32]syscall.WSAProtocolInfo + len := uint32(unsafe.Sizeof(buf)) + n, err := syscall.WSAEnumProtocols(&protos[0], &buf[0], &len) + if err != nil { + return + } + for i := int32(0); i < n; i++ { + if buf[i].ServiceFlags1&syscall.XP1_IFS_HANDLES == 0 { + return + } + } + useSetFileCompletionNotificationModes = true +} func init() { var d syscall.WSAData @@ -44,26 +73,7 @@ func init() { initErr = e } canCancelIO = syscall.LoadCancelIoEx() == nil - hasLoadSetFileCompletionNotificationModes = syscall.LoadSetFileCompletionNotificationModes() == nil - if hasLoadSetFileCompletionNotificationModes { - // It's not safe to use FILE_SKIP_COMPLETION_PORT_ON_SUCCESS if non IFS providers are installed: - // http://support.microsoft.com/kb/2568167 - skipSyncNotif = true - protos := [2]int32{syscall.IPPROTO_TCP, 0} - var buf [32]syscall.WSAProtocolInfo - len := uint32(unsafe.Sizeof(buf)) - n, err := syscall.WSAEnumProtocols(&protos[0], &buf[0], &len) - if err != nil { - skipSyncNotif = false - } else { - for i := int32(0); i < n; i++ { - if buf[i].ServiceFlags1&syscall.XP1_IFS_HANDLES == 0 { - skipSyncNotif = false - break - } - } - } - } + checkSetFileCompletionNotificationModes() } // operation contains superset of data necessary to perform all async IO. @@ -295,11 +305,15 @@ type FD struct { isDir bool } +// logInitFD is set by tests to enable file descriptor initialization logging. +var logInitFD func(net string, fd *FD, err error) + // Init initializes the FD. The Sysfd field should already be set. // This can be called multiple times on a single FD. // The net argument is a network name from the net package (e.g., "tcp"), // or "file" or "console" or "dir". -func (fd *FD) Init(net string) (string, error) { +// Set pollable to true if fd should be managed by runtime netpoll. +func (fd *FD) Init(net string, pollable bool) (string, error) { if initErr != nil { return "", initErr } @@ -319,7 +333,8 @@ func (fd *FD) Init(net string) (string, error) { return "", errors.New("internal error: unknown network type " + net) } - if !fd.isFile && !fd.isConsole && !fd.isDir { + var err error + if pollable { // Only call init for a network socket. // This means that we don't add files to the runtime poller. // Adding files to the runtime poller can confuse matters @@ -331,16 +346,20 @@ func (fd *FD) Init(net string) (string, error) { // somehow call ExecIO, then ExecIO, and therefore the // calling method, will return an error, because // fd.pd.runtimeCtx will be 0. - if err := fd.pd.init(fd); err != nil { - return "", err - } + err = fd.pd.init(fd) + } + if logInitFD != nil { + logInitFD(net, fd, err) + } + if err != nil { + return "", err } - if hasLoadSetFileCompletionNotificationModes { + if pollable && useSetFileCompletionNotificationModes { // We do not use events, so we can skip them always. flags := uint8(syscall.FILE_SKIP_SET_EVENT_ON_HANDLE) // It's not safe to skip completion notifications for UDP: // http://blogs.technet.com/b/winserverperformance/archive/2008/06/26/designing-applications-for-high-performance-part-iii.aspx - if skipSyncNotif && (net == "tcp" || net == "file") { + if net == "tcp" { flags |= syscall.FILE_SKIP_COMPLETION_PORT_ON_SUCCESS } err := syscall.SetFileCompletionNotificationModes(fd.Sysfd, flags) diff --git a/src/internal/poll/fd_windows_test.go b/src/internal/poll/fd_windows_test.go new file mode 100644 index 0000000000..e3ca0e26ac --- /dev/null +++ b/src/internal/poll/fd_windows_test.go @@ -0,0 +1,111 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package poll_test + +import ( + "fmt" + "internal/poll" + "os" + "sync" + "syscall" + "testing" +) + +type loggedFD struct { + Net string + FD *poll.FD + Err error +} + +var ( + logMu sync.Mutex + loggedFDs map[syscall.Handle]*loggedFD +) + +func logFD(net string, fd *poll.FD, err error) { + logMu.Lock() + defer logMu.Unlock() + + loggedFDs[fd.Sysfd] = &loggedFD{ + Net: net, + FD: fd, + Err: err, + } +} + +func init() { + loggedFDs = make(map[syscall.Handle]*loggedFD) + *poll.LogInitFD = logFD +} + +func findLoggedFD(h syscall.Handle) (lfd *loggedFD, found bool) { + logMu.Lock() + defer logMu.Unlock() + + lfd, found = loggedFDs[h] + return lfd, found +} + +// checkFileIsNotPartOfNetpoll verifies that f is not managed by netpoll. +// It returns error, if check fails. +func checkFileIsNotPartOfNetpoll(f *os.File) error { + lfd, found := findLoggedFD(syscall.Handle(f.Fd())) + if !found { + return fmt.Errorf("%v fd=%v: is not found in the log", f.Name(), f.Fd()) + } + if lfd.FD.IsPartOfNetpoll() { + return fmt.Errorf("%v fd=%v: is part of netpoll, but should not be (logged: net=%v err=%v)", f.Name(), f.Fd(), lfd.Net, lfd.Err) + } + return nil +} + +func TestFileFdsAreInitialised(t *testing.T) { + exe, err := os.Executable() + if err != nil { + t.Fatal(err) + } + f, err := os.Open(exe) + if err != nil { + t.Fatal(err) + } + defer f.Close() + + err = checkFileIsNotPartOfNetpoll(f) + if err != nil { + t.Fatal(err) + } +} + +func TestSerialFdsAreInitialised(t *testing.T) { + for _, name := range []string{"COM1", "COM2", "COM3", "COM4"} { + t.Run(name, func(t *testing.T) { + h, err := syscall.CreateFile(syscall.StringToUTF16Ptr(name), + syscall.GENERIC_READ|syscall.GENERIC_WRITE, + 0, + nil, + syscall.OPEN_EXISTING, + syscall.FILE_ATTRIBUTE_NORMAL|syscall.FILE_FLAG_OVERLAPPED, + 0) + if err != nil { + if errno, ok := err.(syscall.Errno); ok { + switch errno { + case syscall.ERROR_FILE_NOT_FOUND, + syscall.ERROR_ACCESS_DENIED: + t.Log("Skipping: ", err) + return + } + } + t.Fatal(err) + } + f := os.NewFile(uintptr(h), name) + defer f.Close() + + err = checkFileIsNotPartOfNetpoll(f) + if err != nil { + t.Fatal(err) + } + }) + } +} diff --git a/src/log/log.go b/src/log/log.go index 587904b11c..e8e0c96636 100644 --- a/src/log/log.go +++ b/src/log/log.go @@ -147,11 +147,7 @@ func (l *Logger) formatHeader(buf *[]byte, t time.Time, file string, line int) { // provided for generality, although at the moment on all pre-defined // paths it will be 2. func (l *Logger) Output(calldepth int, s string) error { - // Get time early if we need it. - var now time.Time - if l.flag&(Ldate|Ltime|Lmicroseconds) != 0 { - now = time.Now() - } + now := time.Now() // get this early. var file string var line int l.mu.Lock() diff --git a/src/log/log_test.go b/src/log/log_test.go index 966fdf306b..adc15e7e8e 100644 --- a/src/log/log_test.go +++ b/src/log/log_test.go @@ -88,6 +88,17 @@ func TestOutput(t *testing.T) { } } +func TestOutputRace(t *testing.T) { + var b bytes.Buffer + l := New(&b, "", 0) + for i := 0; i < 100; i++ { + go func() { + l.SetFlags(0) + }() + l.Output(0, "") + } +} + func TestFlagAndPrefixSetting(t *testing.T) { var b bytes.Buffer l := New(&b, "Test:", LstdFlags) diff --git a/src/net/dial_test.go b/src/net/dial_test.go index a892bf1e14..13fa9faacb 100644 --- a/src/net/dial_test.go +++ b/src/net/dial_test.go @@ -161,6 +161,8 @@ func dialClosedPort() (actual, expected time.Duration) { // but other platforms should be instantaneous. if runtime.GOOS == "windows" { expected = 1500 * time.Millisecond + } else if runtime.GOOS == "darwin" { + expected = 150 * time.Millisecond } else { expected = 95 * time.Millisecond } diff --git a/src/net/fd_windows.go b/src/net/fd_windows.go index c2156b255e..563558dc52 100644 --- a/src/net/fd_windows.go +++ b/src/net/fd_windows.go @@ -52,7 +52,7 @@ func newFD(sysfd syscall.Handle, family, sotype int, net string) (*netFD, error) } func (fd *netFD) init() error { - errcall, err := fd.pfd.Init(fd.net) + errcall, err := fd.pfd.Init(fd.net, true) if errcall != "" { err = wrapSyscallError(errcall, err) } diff --git a/src/net/smtp/auth.go b/src/net/smtp/auth.go index 3f1339ebc5..fd1a472f93 100644 --- a/src/net/smtp/auth.go +++ b/src/net/smtp/auth.go @@ -44,26 +44,29 @@ type plainAuth struct { } // PlainAuth returns an Auth that implements the PLAIN authentication -// mechanism as defined in RFC 4616. -// The returned Auth uses the given username and password to authenticate -// on TLS connections to host and act as identity. Usually identity will be -// left blank to act as username. +// mechanism as defined in RFC 4616. The returned Auth uses the given +// username and password to authenticate to host and act as identity. +// Usually identity should be the empty string, to act as username. +// +// PlainAuth will only send the credentials if the connection is using TLS +// or is connected to localhost. Otherwise authentication will fail with an +// error, without sending the credentials. func PlainAuth(identity, username, password, host string) Auth { return &plainAuth{identity, username, password, host} } +func isLocalhost(name string) bool { + return name == "localhost" || name == "127.0.0.1" || name == "::1" +} + func (a *plainAuth) Start(server *ServerInfo) (string, []byte, error) { - if !server.TLS { - advertised := false - for _, mechanism := range server.Auth { - if mechanism == "PLAIN" { - advertised = true - break - } - } - if !advertised { - return "", nil, errors.New("unencrypted connection") - } + // Must have TLS, or else localhost server. + // Note: If TLS is not true, then we can't trust ANYTHING in ServerInfo. + // In particular, it doesn't matter if the server advertises PLAIN auth. + // That might just be the attacker saying + // "it's ok, you can trust me with your password." + if !server.TLS && !isLocalhost(server.Name) { + return "", nil, errors.New("unencrypted connection") } if server.Name != a.host { return "", nil, errors.New("wrong host name") diff --git a/src/net/smtp/smtp.go b/src/net/smtp/smtp.go index 28472e447b..767b077fe0 100644 --- a/src/net/smtp/smtp.go +++ b/src/net/smtp/smtp.go @@ -67,6 +67,7 @@ func NewClient(conn net.Conn, host string) (*Client, error) { return nil, err } c := &Client{Text: text, conn: conn, serverName: host, localName: "localhost"} + _, c.tls = conn.(*tls.Conn) return c, nil } diff --git a/src/net/smtp/smtp_test.go b/src/net/smtp/smtp_test.go index 9dbe3eb9ec..606a715ce4 100644 --- a/src/net/smtp/smtp_test.go +++ b/src/net/smtp/smtp_test.go @@ -62,29 +62,41 @@ testLoop: } func TestAuthPlain(t *testing.T) { - auth := PlainAuth("foo", "bar", "baz", "servername") tests := []struct { - server *ServerInfo - err string + authName string + server *ServerInfo + err string }{ { - server: &ServerInfo{Name: "servername", TLS: true}, + authName: "servername", + server: &ServerInfo{Name: "servername", TLS: true}, }, { - // Okay; explicitly advertised by server. - server: &ServerInfo{Name: "servername", Auth: []string{"PLAIN"}}, + // OK to use PlainAuth on localhost without TLS + authName: "localhost", + server: &ServerInfo{Name: "localhost", TLS: false}, }, { - server: &ServerInfo{Name: "servername", Auth: []string{"CRAM-MD5"}}, - err: "unencrypted connection", + // NOT OK on non-localhost, even if server says PLAIN is OK. + // (We don't know that the server is the real server.) + authName: "servername", + server: &ServerInfo{Name: "servername", Auth: []string{"PLAIN"}}, + err: "unencrypted connection", }, { - server: &ServerInfo{Name: "attacker", TLS: true}, - err: "wrong host name", + authName: "servername", + server: &ServerInfo{Name: "servername", Auth: []string{"CRAM-MD5"}}, + err: "unencrypted connection", + }, + { + authName: "servername", + server: &ServerInfo{Name: "attacker", TLS: true}, + err: "wrong host name", }, } for i, tt := range tests { + auth := PlainAuth("foo", "bar", "baz", tt.authName) _, _, err := auth.Start(tt.server) got := "" if err != nil { @@ -352,6 +364,53 @@ HELO localhost QUIT ` +func TestNewClientWithTLS(t *testing.T) { + cert, err := tls.X509KeyPair(localhostCert, localhostKey) + if err != nil { + t.Fatalf("loadcert: %v", err) + } + + config := tls.Config{Certificates: []tls.Certificate{cert}} + + ln, err := tls.Listen("tcp", "127.0.0.1:0", &config) + if err != nil { + ln, err = tls.Listen("tcp", "[::1]:0", &config) + if err != nil { + t.Fatalf("server: listen: %s", err) + } + } + + go func() { + conn, err := ln.Accept() + if err != nil { + t.Fatalf("server: accept: %s", err) + return + } + defer conn.Close() + + _, err = conn.Write([]byte("220 SIGNS\r\n")) + if err != nil { + t.Fatalf("server: write: %s", err) + return + } + }() + + config.InsecureSkipVerify = true + conn, err := tls.Dial("tcp", ln.Addr().String(), &config) + if err != nil { + t.Fatalf("client: dial: %s", err) + } + defer conn.Close() + + client, err := NewClient(conn, ln.Addr().String()) + if err != nil { + t.Fatalf("smtp: newclient: %s", err) + } + if !client.tls { + t.Errorf("client.tls Got: %t Expected: %t", client.tls, true) + } +} + func TestHello(t *testing.T) { if len(helloServer) != len(helloClient) { diff --git a/src/os/file_windows.go b/src/os/file_windows.go index 93b6c135c7..e2be192bcb 100644 --- a/src/os/file_windows.go +++ b/src/os/file_windows.go @@ -54,7 +54,7 @@ func newFile(h syscall.Handle, name string, kind string) *File { // Ignore initialization errors. // Assume any problems will show up in later I/O. - f.pfd.Init(kind) + f.pfd.Init(kind, false) return f } diff --git a/src/os/os_test.go b/src/os/os_test.go index dbe4ff8830..d04ba917b3 100644 --- a/src/os/os_test.go +++ b/src/os/os_test.go @@ -2176,6 +2176,8 @@ func TestPipeThreads(t *testing.T) { t.Skip("skipping on Plan 9; does not support runtime poller") } + testenv.SkipFlaky(t, 21559) + threads := 100 // OpenBSD has a low default for max number of files. diff --git a/src/reflect/all_test.go b/src/reflect/all_test.go index 5a5c91b751..33694fd10e 100644 --- a/src/reflect/all_test.go +++ b/src/reflect/all_test.go @@ -19,6 +19,7 @@ import ( "strconv" "strings" "sync" + "sync/atomic" "testing" "time" "unicode" @@ -1546,6 +1547,30 @@ func TestCallWithStruct(t *testing.T) { } } +func TestCallReturnsEmpty(t *testing.T) { + // Issue 21717: past-the-end pointer write in Call with + // nonzero-sized frame and zero-sized return value. + runtime.GC() + var finalized uint32 + f := func() (emptyStruct, *int) { + i := new(int) + runtime.SetFinalizer(i, func(*int) { atomic.StoreUint32(&finalized, 1) }) + return emptyStruct{}, i + } + v := ValueOf(f).Call(nil)[0] // out[0] should not alias out[1]'s memory, so the finalizer should run. + timeout := time.After(5 * time.Second) + for atomic.LoadUint32(&finalized) == 0 { + select { + case <-timeout: + t.Fatal("finalizer did not run") + default: + } + runtime.Gosched() + runtime.GC() + } + runtime.KeepAlive(v) +} + func BenchmarkCall(b *testing.B) { fv := ValueOf(func(a, b string) {}) b.ReportAllocs() diff --git a/src/reflect/value.go b/src/reflect/value.go index 8488e8dec1..2b0ca05c70 100644 --- a/src/reflect/value.go +++ b/src/reflect/value.go @@ -456,8 +456,14 @@ func (v Value) call(op string, in []Value) []Value { tv := t.Out(i) a := uintptr(tv.Align()) off = (off + a - 1) &^ (a - 1) - fl := flagIndir | flag(tv.Kind()) - ret[i] = Value{tv.common(), unsafe.Pointer(uintptr(args) + off), fl} + if tv.Size() != 0 { + fl := flagIndir | flag(tv.Kind()) + ret[i] = Value{tv.common(), unsafe.Pointer(uintptr(args) + off), fl} + } else { + // For zero-sized return value, args+off may point to the next object. + // In this case, return the zero value instead. + ret[i] = Zero(tv) + } off += tv.Size() } } diff --git a/src/runtime/chan_test.go b/src/runtime/chan_test.go index a75fa1b992..0c94cf1a63 100644 --- a/src/runtime/chan_test.go +++ b/src/runtime/chan_test.go @@ -5,6 +5,7 @@ package runtime_test import ( + "math" "runtime" "sync" "sync/atomic" @@ -430,6 +431,62 @@ func TestSelectStress(t *testing.T) { wg.Wait() } +func TestSelectFairness(t *testing.T) { + const trials = 10000 + c1 := make(chan byte, trials+1) + c2 := make(chan byte, trials+1) + for i := 0; i < trials+1; i++ { + c1 <- 1 + c2 <- 2 + } + c3 := make(chan byte) + c4 := make(chan byte) + out := make(chan byte) + done := make(chan byte) + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + for { + var b byte + select { + case b = <-c3: + case b = <-c4: + case b = <-c1: + case b = <-c2: + } + select { + case out <- b: + case <-done: + return + } + } + }() + cnt1, cnt2 := 0, 0 + for i := 0; i < trials; i++ { + switch b := <-out; b { + case 1: + cnt1++ + case 2: + cnt2++ + default: + t.Fatalf("unexpected value %d on channel", b) + } + } + // If the select in the goroutine is fair, + // cnt1 and cnt2 should be about the same value. + // With 10,000 trials, the expected margin of error at + // a confidence level of five nines is 4.4172 / (2 * Sqrt(10000)). + r := float64(cnt1) / trials + e := math.Abs(r - 0.5) + t.Log(cnt1, cnt2, r, e) + if e > 4.4172/(2*math.Sqrt(trials)) { + t.Errorf("unfair select: in %d trials, results were %d, %d", trials, cnt1, cnt2) + } + close(done) + wg.Wait() +} + func TestChanSendInterface(t *testing.T) { type mt struct{} m := &mt{} diff --git a/src/runtime/cpuprof.go b/src/runtime/cpuprof.go index fb841a9f3d..e00dcb1bbd 100644 --- a/src/runtime/cpuprof.go +++ b/src/runtime/cpuprof.go @@ -160,6 +160,7 @@ func (p *cpuProfile) addExtra() { funcPC(_ExternalCode) + sys.PCQuantum, } cpuprof.log.write(nil, 0, hdr[:], lostStk[:]) + p.lostExtra = 0 } } diff --git a/src/runtime/gc_test.go b/src/runtime/gc_test.go index 03acc8aaa6..0620f2d61e 100644 --- a/src/runtime/gc_test.go +++ b/src/runtime/gc_test.go @@ -170,7 +170,7 @@ func TestPeriodicGC(t *testing.T) { // slack if things are slow. var numGCs uint32 const want = 2 - for i := 0; i < 20 && numGCs < want; i++ { + for i := 0; i < 200 && numGCs < want; i++ { time.Sleep(5 * time.Millisecond) // Test that periodic GC actually happened. @@ -499,3 +499,19 @@ func BenchmarkReadMemStats(b *testing.B) { hugeSink = nil } + +func TestUserForcedGC(t *testing.T) { + // Test that runtime.GC() triggers a GC even if GOGC=off. + defer debug.SetGCPercent(debug.SetGCPercent(-1)) + + var ms1, ms2 runtime.MemStats + runtime.ReadMemStats(&ms1) + runtime.GC() + runtime.ReadMemStats(&ms2) + if ms1.NumGC == ms2.NumGC { + t.Fatalf("runtime.GC() did not trigger GC") + } + if ms1.NumForcedGC == ms2.NumForcedGC { + t.Fatalf("runtime.GC() was not accounted in NumForcedGC") + } +} diff --git a/src/runtime/mgc.go b/src/runtime/mgc.go index 111fa781e1..b708720322 100644 --- a/src/runtime/mgc.go +++ b/src/runtime/mgc.go @@ -1158,7 +1158,7 @@ func (t gcTrigger) test() bool { if t.kind == gcTriggerAlways { return true } - if gcphase != _GCoff || gcpercent < 0 { + if gcphase != _GCoff { return false } switch t.kind { @@ -1169,6 +1169,9 @@ func (t gcTrigger) test() bool { // own write. return memstats.heap_live >= memstats.gc_trigger case gcTriggerTime: + if gcpercent < 0 { + return false + } lastgc := int64(atomic.Load64(&memstats.last_gc_nanotime)) return lastgc != 0 && t.now-lastgc > forcegcperiod case gcTriggerCycle: diff --git a/src/runtime/proc.go b/src/runtime/proc.go index ed333bb92e..5787991f07 100644 --- a/src/runtime/proc.go +++ b/src/runtime/proc.go @@ -96,6 +96,9 @@ var main_init_done chan bool //go:linkname main_main main.main func main_main() +// mainStarted indicates that the main M has started. +var mainStarted bool + // runtimeInitTime is the nanotime() at which the runtime started. var runtimeInitTime int64 @@ -119,8 +122,8 @@ func main() { maxstacksize = 250000000 } - // Record when the world started. - runtimeInitTime = nanotime() + // Allow newproc to start new Ms. + mainStarted = true systemstack(func() { newm(sysmon, nil) @@ -139,6 +142,9 @@ func main() { } runtime_init() // must be before defer + if nanotime() == 0 { + throw("nanotime returning zero") + } // Defer unlock so that runtime.Goexit during init does the unlock too. needUnlock := true @@ -148,6 +154,10 @@ func main() { } }() + // Record when the world started. Must be after runtime_init + // because nanotime on some platforms depends on startNano. + runtimeInitTime = nanotime() + gcenable() main_init_done = make(chan bool) @@ -3024,7 +3034,7 @@ func newproc1(fn *funcval, argp *uint8, narg int32, nret int32, callerpc uintptr } runqput(_p_, newg, true) - if atomic.Load(&sched.npidle) != 0 && atomic.Load(&sched.nmspinning) == 0 && runtimeInitTime != 0 { + if atomic.Load(&sched.npidle) != 0 && atomic.Load(&sched.nmspinning) == 0 && mainStarted { wakep() } _g_.m.locks-- diff --git a/src/runtime/stubs.go b/src/runtime/stubs.go index c4f32a8482..72d21187ec 100644 --- a/src/runtime/stubs.go +++ b/src/runtime/stubs.go @@ -105,9 +105,11 @@ func fastrand() uint32 { //go:nosplit func fastrandn(n uint32) uint32 { - // This is similar to fastrand() % n, but faster. - // See http://lemire.me/blog/2016/06/27/a-fast-alternative-to-the-modulo-reduction/ - return uint32(uint64(fastrand()) * uint64(n) >> 32) + // Don't be clever. + // fastrand is not good enough for cleverness. + // Just use mod. + // See golang.org/issue/21806. + return fastrand() % n } //go:linkname sync_fastrand sync.fastrand diff --git a/src/runtime/time.go b/src/runtime/time.go index abf200d7d3..23f61d62d0 100644 --- a/src/runtime/time.go +++ b/src/runtime/time.go @@ -309,4 +309,10 @@ func time_runtimeNano() int64 { return nanotime() } -var startNano int64 = nanotime() +// Monotonic times are reported as offsets from startNano. +// We initialize startNano to nanotime() - 1 so that on systems where +// monotonic time resolution is fairly low (e.g. Windows 2008 +// which appears to have a default resolution of 15ms), +// we avoid ever reporting a nanotime of 0. +// (Callers may want to use 0 as "time not set".) +var startNano int64 = nanotime() - 1 diff --git a/src/time/time.go b/src/time/time.go index 8a29eef263..0f29b0ff93 100644 --- a/src/time/time.go +++ b/src/time/time.go @@ -1383,7 +1383,7 @@ func Date(year int, month Month, day, hour, min, sec, nsec int, loc *Location) T } // Truncate returns the result of rounding t down to a multiple of d (since the zero time). -// If d <= 0, Truncate returns t unchanged. +// If d <= 0, Truncate returns t stripped of any monotonic clock reading but otherwise unchanged. // // Truncate operates on the time as an absolute duration since the // zero time; it does not operate on the presentation form of the @@ -1400,7 +1400,7 @@ func (t Time) Truncate(d Duration) Time { // Round returns the result of rounding t to the nearest multiple of d (since the zero time). // The rounding behavior for halfway values is to round up. -// If d <= 0, Round returns t unchanged. +// If d <= 0, Round returns t stripped of any monotonic clock reading but otherwise unchanged. // // Round operates on the time as an absolute duration since the // zero time; it does not operate on the presentation form of the diff --git a/test/fixedbugs/issue21655.go b/test/fixedbugs/issue21655.go new file mode 100644 index 0000000000..4060c8ddbb --- /dev/null +++ b/test/fixedbugs/issue21655.go @@ -0,0 +1,40 @@ +// compile + +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Make sure assembly offsets don't get too large. + +// To trigger issue21655, the index offset needs to be small +// enough to fit into an int32 (to get rewritten to an ADDQconst) +// but large enough to overflow an int32 after multiplying by the stride. + +package main + +func f1(a []int64, i int64) int64 { + return a[i+1<<30] +} +func f2(a []int32, i int64) int32 { + return a[i+1<<30] +} +func f3(a []int16, i int64) int16 { + return a[i+1<<30] +} +func f4(a []int8, i int64) int8 { + return a[i+1<<31] +} +func f5(a []float64, i int64) float64 { + return a[i+1<<30] +} +func f6(a []float32, i int64) float32 { + return a[i+1<<30] +} + +// Note: Before the fix for issue 21655, f{1,2,5,6} made +// the compiler crash. f3 silently generated the wrong +// code, using an offset of -1<<31 instead of 1<<31. +// (This is due to the assembler accepting offsets +// like 0x80000000 and silently using them as +// signed 32 bit offsets.) +// f4 was ok, but testing it can't hurt. diff --git a/test/fixedbugs/issue21963.go b/test/fixedbugs/issue21963.go new file mode 100644 index 0000000000..996bd63d09 --- /dev/null +++ b/test/fixedbugs/issue21963.go @@ -0,0 +1,27 @@ +// run + +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "fmt" + "runtime" +) + +//go:noinline +func f(x []int32, y *int8) int32 { + c := int32(int16(*y)) + runtime.GC() + return x[0] * c +} + +func main() { + var x = [1]int32{5} + var y int8 = -1 + if got, want := f(x[:], &y), int32(-5); got != want { + panic(fmt.Sprintf("wanted %d, got %d", want, got)) + } +} diff --git a/test/fixedbugs/issue22083.go b/test/fixedbugs/issue22083.go new file mode 100644 index 0000000000..a385102d08 --- /dev/null +++ b/test/fixedbugs/issue22083.go @@ -0,0 +1,41 @@ +// run + +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// The compiler was panicking on the wrong line number, where +// the panic was occurring in an inlined call. + +package main + +import ( + "runtime/debug" + "strings" +) + +type Wrapper struct { + a []int +} + +func (w Wrapper) Get(i int) int { + return w.a[i] +} + +func main() { + defer func() { + e := recover() + if e == nil { + panic("bounds check didn't fail") + } + stk := string(debug.Stack()) + if !strings.Contains(stk, "issue22083.go:40") { + panic("wrong stack trace: " + stk) + } + }() + foo := Wrapper{a: []int{0, 1, 2}} + _ = foo.Get(0) + _ = foo.Get(1) + _ = foo.Get(2) + _ = foo.Get(3) // stack trace should mention this line +} |